Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
545ae8cb
Unverified
Commit
545ae8cb
authored
Jan 04, 2023
by
AUTOMATIC1111
Committed by
GitHub
Jan 04, 2023
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #6264 from vladmandic/add-state-info
add missing state info
parents
a8ad8666
d8d206c1
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
31 additions
and
7 deletions
+31
-7
extras.py
modules/extras.py
+25
-6
hypernetwork.py
modules/hypernetworks/hypernetwork.py
+1
-0
interrogate.py
modules/interrogate.py
+3
-1
preprocess.py
modules/textual_inversion/preprocess.py
+1
-0
textual_inversion.py
modules/textual_inversion/textual_inversion.py
+1
-0
No files found.
modules/extras.py
View file @
545ae8cb
...
@@ -58,6 +58,9 @@ cached_images: LruCache = LruCache(max_size=5)
...
@@ -58,6 +58,9 @@ cached_images: LruCache = LruCache(max_size=5)
def
run_extras
(
extras_mode
,
resize_mode
,
image
,
image_folder
,
input_dir
,
output_dir
,
show_extras_results
,
gfpgan_visibility
,
codeformer_visibility
,
codeformer_weight
,
upscaling_resize
,
upscaling_resize_w
,
upscaling_resize_h
,
upscaling_crop
,
extras_upscaler_1
,
extras_upscaler_2
,
extras_upscaler_2_visibility
,
upscale_first
:
bool
,
save_output
:
bool
=
True
):
def
run_extras
(
extras_mode
,
resize_mode
,
image
,
image_folder
,
input_dir
,
output_dir
,
show_extras_results
,
gfpgan_visibility
,
codeformer_visibility
,
codeformer_weight
,
upscaling_resize
,
upscaling_resize_w
,
upscaling_resize_h
,
upscaling_crop
,
extras_upscaler_1
,
extras_upscaler_2
,
extras_upscaler_2_visibility
,
upscale_first
:
bool
,
save_output
:
bool
=
True
):
devices
.
torch_gc
()
devices
.
torch_gc
()
shared
.
state
.
begin
()
shared
.
state
.
job
=
'extras'
imageArr
=
[]
imageArr
=
[]
# Also keep track of original file names
# Also keep track of original file names
imageNameArr
=
[]
imageNameArr
=
[]
...
@@ -94,6 +97,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
...
@@ -94,6 +97,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
# Extra operation definitions
# Extra operation definitions
def
run_gfpgan
(
image
:
Image
.
Image
,
info
:
str
)
->
Tuple
[
Image
.
Image
,
str
]:
def
run_gfpgan
(
image
:
Image
.
Image
,
info
:
str
)
->
Tuple
[
Image
.
Image
,
str
]:
shared
.
state
.
job
=
'extras-gfpgan'
restored_img
=
modules
.
gfpgan_model
.
gfpgan_fix_faces
(
np
.
array
(
image
,
dtype
=
np
.
uint8
))
restored_img
=
modules
.
gfpgan_model
.
gfpgan_fix_faces
(
np
.
array
(
image
,
dtype
=
np
.
uint8
))
res
=
Image
.
fromarray
(
restored_img
)
res
=
Image
.
fromarray
(
restored_img
)
...
@@ -104,6 +108,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
...
@@ -104,6 +108,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
return
(
res
,
info
)
return
(
res
,
info
)
def
run_codeformer
(
image
:
Image
.
Image
,
info
:
str
)
->
Tuple
[
Image
.
Image
,
str
]:
def
run_codeformer
(
image
:
Image
.
Image
,
info
:
str
)
->
Tuple
[
Image
.
Image
,
str
]:
shared
.
state
.
job
=
'extras-codeformer'
restored_img
=
modules
.
codeformer_model
.
codeformer
.
restore
(
np
.
array
(
image
,
dtype
=
np
.
uint8
),
w
=
codeformer_weight
)
restored_img
=
modules
.
codeformer_model
.
codeformer
.
restore
(
np
.
array
(
image
,
dtype
=
np
.
uint8
),
w
=
codeformer_weight
)
res
=
Image
.
fromarray
(
restored_img
)
res
=
Image
.
fromarray
(
restored_img
)
...
@@ -114,6 +119,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
...
@@ -114,6 +119,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
return
(
res
,
info
)
return
(
res
,
info
)
def
upscale
(
image
,
scaler_index
,
resize
,
mode
,
resize_w
,
resize_h
,
crop
):
def
upscale
(
image
,
scaler_index
,
resize
,
mode
,
resize_w
,
resize_h
,
crop
):
shared
.
state
.
job
=
'extras-upscale'
upscaler
=
shared
.
sd_upscalers
[
scaler_index
]
upscaler
=
shared
.
sd_upscalers
[
scaler_index
]
res
=
upscaler
.
scaler
.
upscale
(
image
,
resize
,
upscaler
.
data_path
)
res
=
upscaler
.
scaler
.
upscale
(
image
,
resize
,
upscaler
.
data_path
)
if
mode
==
1
and
crop
:
if
mode
==
1
and
crop
:
...
@@ -180,6 +186,9 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
...
@@ -180,6 +186,9 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
for
image
,
image_name
in
zip
(
imageArr
,
imageNameArr
):
for
image
,
image_name
in
zip
(
imageArr
,
imageNameArr
):
if
image
is
None
:
if
image
is
None
:
return
outputs
,
"Please select an input image."
,
''
return
outputs
,
"Please select an input image."
,
''
shared
.
state
.
textinfo
=
f
'Processing image {image_name}'
existing_pnginfo
=
image
.
info
or
{}
existing_pnginfo
=
image
.
info
or
{}
image
=
image
.
convert
(
"RGB"
)
image
=
image
.
convert
(
"RGB"
)
...
@@ -193,6 +202,10 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
...
@@ -193,6 +202,10 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
else
:
else
:
basename
=
''
basename
=
''
if
opts
.
enable_pnginfo
:
# append info before save
image
.
info
=
existing_pnginfo
image
.
info
[
"extras"
]
=
info
if
save_output
:
if
save_output
:
# Add upscaler name as a suffix.
# Add upscaler name as a suffix.
suffix
=
f
"-{shared.sd_upscalers[extras_upscaler_1].name}"
if
shared
.
opts
.
use_upscaler_name_as_suffix
else
""
suffix
=
f
"-{shared.sd_upscalers[extras_upscaler_1].name}"
if
shared
.
opts
.
use_upscaler_name_as_suffix
else
""
...
@@ -203,10 +216,6 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
...
@@ -203,10 +216,6 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
images
.
save_image
(
image
,
path
=
outpath
,
basename
=
basename
,
seed
=
None
,
prompt
=
None
,
extension
=
opts
.
samples_format
,
info
=
info
,
short_filename
=
True
,
images
.
save_image
(
image
,
path
=
outpath
,
basename
=
basename
,
seed
=
None
,
prompt
=
None
,
extension
=
opts
.
samples_format
,
info
=
info
,
short_filename
=
True
,
no_prompt
=
True
,
grid
=
False
,
pnginfo_section_name
=
"extras"
,
existing_info
=
existing_pnginfo
,
forced_filename
=
None
,
suffix
=
suffix
)
no_prompt
=
True
,
grid
=
False
,
pnginfo_section_name
=
"extras"
,
existing_info
=
existing_pnginfo
,
forced_filename
=
None
,
suffix
=
suffix
)
if
opts
.
enable_pnginfo
:
image
.
info
=
existing_pnginfo
image
.
info
[
"extras"
]
=
info
if
extras_mode
!=
2
or
show_extras_results
:
if
extras_mode
!=
2
or
show_extras_results
:
outputs
.
append
(
image
)
outputs
.
append
(
image
)
...
@@ -242,6 +251,9 @@ def run_pnginfo(image):
...
@@ -242,6 +251,9 @@ def run_pnginfo(image):
def
run_modelmerger
(
primary_model_name
,
secondary_model_name
,
tertiary_model_name
,
interp_method
,
multiplier
,
save_as_half
,
custom_name
,
checkpoint_format
):
def
run_modelmerger
(
primary_model_name
,
secondary_model_name
,
tertiary_model_name
,
interp_method
,
multiplier
,
save_as_half
,
custom_name
,
checkpoint_format
):
shared
.
state
.
begin
()
shared
.
state
.
job
=
'model-merge'
def
weighted_sum
(
theta0
,
theta1
,
alpha
):
def
weighted_sum
(
theta0
,
theta1
,
alpha
):
return
((
1
-
alpha
)
*
theta0
)
+
(
alpha
*
theta1
)
return
((
1
-
alpha
)
*
theta0
)
+
(
alpha
*
theta1
)
...
@@ -263,8 +275,11 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
...
@@ -263,8 +275,11 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
theta_func1
,
theta_func2
=
theta_funcs
[
interp_method
]
theta_func1
,
theta_func2
=
theta_funcs
[
interp_method
]
if
theta_func1
and
not
tertiary_model_info
:
if
theta_func1
and
not
tertiary_model_info
:
shared
.
state
.
textinfo
=
"Failed: Interpolation method requires a tertiary model."
shared
.
state
.
end
()
return
[
"Failed: Interpolation method requires a tertiary model."
]
+
[
gr
.
Dropdown
.
update
(
choices
=
sd_models
.
checkpoint_tiles
())
for
_
in
range
(
4
)]
return
[
"Failed: Interpolation method requires a tertiary model."
]
+
[
gr
.
Dropdown
.
update
(
choices
=
sd_models
.
checkpoint_tiles
())
for
_
in
range
(
4
)]
shared
.
state
.
textinfo
=
f
"Loading {secondary_model_info.filename}..."
print
(
f
"Loading {secondary_model_info.filename}..."
)
print
(
f
"Loading {secondary_model_info.filename}..."
)
theta_1
=
sd_models
.
read_state_dict
(
secondary_model_info
.
filename
,
map_location
=
'cpu'
)
theta_1
=
sd_models
.
read_state_dict
(
secondary_model_info
.
filename
,
map_location
=
'cpu'
)
...
@@ -281,6 +296,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
...
@@ -281,6 +296,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
theta_1
[
key
]
=
torch
.
zeros_like
(
theta_1
[
key
])
theta_1
[
key
]
=
torch
.
zeros_like
(
theta_1
[
key
])
del
theta_2
del
theta_2
shared
.
state
.
textinfo
=
f
"Loading {primary_model_info.filename}..."
print
(
f
"Loading {primary_model_info.filename}..."
)
print
(
f
"Loading {primary_model_info.filename}..."
)
theta_0
=
sd_models
.
read_state_dict
(
primary_model_info
.
filename
,
map_location
=
'cpu'
)
theta_0
=
sd_models
.
read_state_dict
(
primary_model_info
.
filename
,
map_location
=
'cpu'
)
...
@@ -291,6 +307,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
...
@@ -291,6 +307,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
a
=
theta_0
[
key
]
a
=
theta_0
[
key
]
b
=
theta_1
[
key
]
b
=
theta_1
[
key
]
shared
.
state
.
textinfo
=
f
'Merging layer {key}'
# this enables merging an inpainting model (A) with another one (B);
# this enables merging an inpainting model (A) with another one (B);
# where normal model would have 4 channels, for latenst space, inpainting model would
# where normal model would have 4 channels, for latenst space, inpainting model would
# have another 4 channels for unmasked picture's latent space, plus one channel for mask, for a total of 9
# have another 4 channels for unmasked picture's latent space, plus one channel for mask, for a total of 9
...
@@ -303,8 +320,6 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
...
@@ -303,8 +320,6 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
theta_0
[
key
][:,
0
:
4
,
:,
:]
=
theta_func2
(
a
[:,
0
:
4
,
:,
:],
b
,
multiplier
)
theta_0
[
key
][:,
0
:
4
,
:,
:]
=
theta_func2
(
a
[:,
0
:
4
,
:,
:],
b
,
multiplier
)
result_is_inpainting_model
=
True
result_is_inpainting_model
=
True
else
:
else
:
assert
a
.
shape
==
b
.
shape
,
f
'Incompatible shapes for layer {key}: A is {a.shape}, and B is {b.shape}'
theta_0
[
key
]
=
theta_func2
(
a
,
b
,
multiplier
)
theta_0
[
key
]
=
theta_func2
(
a
,
b
,
multiplier
)
if
save_as_half
:
if
save_as_half
:
...
@@ -332,6 +347,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
...
@@ -332,6 +347,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
output_modelname
=
os
.
path
.
join
(
ckpt_dir
,
filename
)
output_modelname
=
os
.
path
.
join
(
ckpt_dir
,
filename
)
shared
.
state
.
textinfo
=
f
"Saving to {output_modelname}..."
print
(
f
"Saving to {output_modelname}..."
)
print
(
f
"Saving to {output_modelname}..."
)
_
,
extension
=
os
.
path
.
splitext
(
output_modelname
)
_
,
extension
=
os
.
path
.
splitext
(
output_modelname
)
...
@@ -343,4 +359,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
...
@@ -343,4 +359,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam
sd_models
.
list_models
()
sd_models
.
list_models
()
print
(
"Checkpoint saved."
)
print
(
"Checkpoint saved."
)
shared
.
state
.
textinfo
=
"Checkpoint saved to "
+
output_modelname
shared
.
state
.
end
()
return
[
"Checkpoint saved to "
+
output_modelname
]
+
[
gr
.
Dropdown
.
update
(
choices
=
sd_models
.
checkpoint_tiles
())
for
_
in
range
(
4
)]
return
[
"Checkpoint saved to "
+
output_modelname
]
+
[
gr
.
Dropdown
.
update
(
choices
=
sd_models
.
checkpoint_tiles
())
for
_
in
range
(
4
)]
modules/hypernetworks/hypernetwork.py
View file @
545ae8cb
...
@@ -417,6 +417,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
...
@@ -417,6 +417,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
shared
.
loaded_hypernetwork
=
Hypernetwork
()
shared
.
loaded_hypernetwork
=
Hypernetwork
()
shared
.
loaded_hypernetwork
.
load
(
path
)
shared
.
loaded_hypernetwork
.
load
(
path
)
shared
.
state
.
job
=
"train-hypernetwork"
shared
.
state
.
textinfo
=
"Initializing hypernetwork training..."
shared
.
state
.
textinfo
=
"Initializing hypernetwork training..."
shared
.
state
.
job_count
=
steps
shared
.
state
.
job_count
=
steps
...
...
modules/interrogate.py
View file @
545ae8cb
...
@@ -136,7 +136,8 @@ class InterrogateModels:
...
@@ -136,7 +136,8 @@ class InterrogateModels:
def
interrogate
(
self
,
pil_image
):
def
interrogate
(
self
,
pil_image
):
res
=
""
res
=
""
shared
.
state
.
begin
()
shared
.
state
.
job
=
'interrogate'
try
:
try
:
if
shared
.
cmd_opts
.
lowvram
or
shared
.
cmd_opts
.
medvram
:
if
shared
.
cmd_opts
.
lowvram
or
shared
.
cmd_opts
.
medvram
:
...
@@ -177,5 +178,6 @@ class InterrogateModels:
...
@@ -177,5 +178,6 @@ class InterrogateModels:
res
+=
"<error>"
res
+=
"<error>"
self
.
unload
()
self
.
unload
()
shared
.
state
.
end
()
return
res
return
res
modules/textual_inversion/preprocess.py
View file @
545ae8cb
...
@@ -124,6 +124,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
...
@@ -124,6 +124,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
files
=
listfiles
(
src
)
files
=
listfiles
(
src
)
shared
.
state
.
job
=
"preprocess"
shared
.
state
.
textinfo
=
"Preprocessing..."
shared
.
state
.
textinfo
=
"Preprocessing..."
shared
.
state
.
job_count
=
len
(
files
)
shared
.
state
.
job_count
=
len
(
files
)
...
...
modules/textual_inversion/textual_inversion.py
View file @
545ae8cb
...
@@ -245,6 +245,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
...
@@ -245,6 +245,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
create_image_every
=
create_image_every
or
0
create_image_every
=
create_image_every
or
0
validate_train_inputs
(
embedding_name
,
learn_rate
,
batch_size
,
gradient_step
,
data_root
,
template_file
,
steps
,
save_embedding_every
,
create_image_every
,
log_directory
,
name
=
"embedding"
)
validate_train_inputs
(
embedding_name
,
learn_rate
,
batch_size
,
gradient_step
,
data_root
,
template_file
,
steps
,
save_embedding_every
,
create_image_every
,
log_directory
,
name
=
"embedding"
)
shared
.
state
.
job
=
"train-embedding"
shared
.
state
.
textinfo
=
"Initializing textual inversion training..."
shared
.
state
.
textinfo
=
"Initializing textual inversion training..."
shared
.
state
.
job_count
=
steps
shared
.
state
.
job_count
=
steps
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment