Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
97ceaa23
Unverified
Commit
97ceaa23
authored
Oct 15, 2022
by
MalumaDev
Committed by
GitHub
Oct 15, 2022
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' into test_resolve_conflicts
parents
3d21684e
be1596ce
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
30 additions
and
26 deletions
+30
-26
launch.py
launch.py
+3
-2
hypernetwork.py
modules/hypernetworks/hypernetwork.py
+6
-4
sd_hijack.py
modules/sd_hijack.py
+2
-2
textual_inversion.py
modules/textual_inversion/textual_inversion.py
+13
-4
ui.py
modules/ui.py
+3
-6
style.css
style.css
+3
-8
No files found.
launch.py
View file @
97ceaa23
...
@@ -104,6 +104,7 @@ def prepare_enviroment():
...
@@ -104,6 +104,7 @@ def prepare_enviroment():
args
=
shlex
.
split
(
commandline_args
)
args
=
shlex
.
split
(
commandline_args
)
args
,
skip_torch_cuda_test
=
extract_arg
(
args
,
'--skip-torch-cuda-test'
)
args
,
skip_torch_cuda_test
=
extract_arg
(
args
,
'--skip-torch-cuda-test'
)
args
,
reinstall_xformers
=
extract_arg
(
args
,
'--reinstall-xformers'
)
xformers
=
'--xformers'
in
args
xformers
=
'--xformers'
in
args
deepdanbooru
=
'--deepdanbooru'
in
args
deepdanbooru
=
'--deepdanbooru'
in
args
ngrok
=
'--ngrok'
in
args
ngrok
=
'--ngrok'
in
args
...
@@ -128,9 +129,9 @@ def prepare_enviroment():
...
@@ -128,9 +129,9 @@ def prepare_enviroment():
if
not
is_installed
(
"clip"
):
if
not
is_installed
(
"clip"
):
run_pip
(
f
"install {clip_package}"
,
"clip"
)
run_pip
(
f
"install {clip_package}"
,
"clip"
)
if
not
is_installed
(
"xformers"
)
and
xformers
and
platform
.
python_version
()
.
startswith
(
"3.10"
):
if
(
not
is_installed
(
"xformers"
)
or
reinstall_xformers
)
and
xformers
and
platform
.
python_version
()
.
startswith
(
"3.10"
):
if
platform
.
system
()
==
"Windows"
:
if
platform
.
system
()
==
"Windows"
:
run_pip
(
"install
https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/c
/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl"
,
"xformers"
)
run_pip
(
"install
-U -I --no-deps https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f
/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl"
,
"xformers"
)
elif
platform
.
system
()
==
"Linux"
:
elif
platform
.
system
()
==
"Linux"
:
run_pip
(
"install xformers"
,
"xformers"
)
run_pip
(
"install xformers"
,
"xformers"
)
...
...
modules/hypernetworks/hypernetwork.py
View file @
97ceaa23
...
@@ -272,15 +272,17 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
...
@@ -272,15 +272,17 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
optimizer
.
zero_grad
()
optimizer
.
zero_grad
()
loss
.
backward
()
loss
.
backward
()
optimizer
.
step
()
optimizer
.
step
()
mean_loss
=
losses
.
mean
()
pbar
.
set_description
(
f
"loss: {losses.mean():.7f}"
)
if
torch
.
isnan
(
mean_loss
):
raise
RuntimeError
(
"Loss diverged."
)
pbar
.
set_description
(
f
"loss: {mean_loss:.7f}"
)
if
hypernetwork
.
step
>
0
and
hypernetwork_dir
is
not
None
and
hypernetwork
.
step
%
save_hypernetwork_every
==
0
:
if
hypernetwork
.
step
>
0
and
hypernetwork_dir
is
not
None
and
hypernetwork
.
step
%
save_hypernetwork_every
==
0
:
last_saved_file
=
os
.
path
.
join
(
hypernetwork_dir
,
f
'{hypernetwork_name}-{hypernetwork.step}.pt'
)
last_saved_file
=
os
.
path
.
join
(
hypernetwork_dir
,
f
'{hypernetwork_name}-{hypernetwork.step}.pt'
)
hypernetwork
.
save
(
last_saved_file
)
hypernetwork
.
save
(
last_saved_file
)
textual_inversion
.
write_loss
(
log_directory
,
"hypernetwork_loss.csv"
,
hypernetwork
.
step
,
len
(
ds
),
{
textual_inversion
.
write_loss
(
log_directory
,
"hypernetwork_loss.csv"
,
hypernetwork
.
step
,
len
(
ds
),
{
"loss"
:
f
"{
losses.mean()
:.7f}"
,
"loss"
:
f
"{
mean_loss
:.7f}"
,
"learn_rate"
:
scheduler
.
learn_rate
"learn_rate"
:
scheduler
.
learn_rate
})
})
...
@@ -328,7 +330,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
...
@@ -328,7 +330,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
shared
.
state
.
textinfo
=
f
"""
shared
.
state
.
textinfo
=
f
"""
<p>
<p>
Loss: {
losses.mean()
:.7f}<br/>
Loss: {
mean_loss
:.7f}<br/>
Step: {hypernetwork.step}<br/>
Step: {hypernetwork.step}<br/>
Last prompt: {html.escape(entries[0].cond_text)}<br/>
Last prompt: {html.escape(entries[0].cond_text)}<br/>
Last saved embedding: {html.escape(last_saved_file)}<br/>
Last saved embedding: {html.escape(last_saved_file)}<br/>
...
...
modules/sd_hijack.py
View file @
97ceaa23
...
@@ -29,8 +29,8 @@ def apply_optimizations():
...
@@ -29,8 +29,8 @@ def apply_optimizations():
ldm
.
modules
.
diffusionmodules
.
model
.
nonlinearity
=
silu
ldm
.
modules
.
diffusionmodules
.
model
.
nonlinearity
=
silu
if
cmd_opts
.
force_enable_xformers
or
(
cmd_opts
.
xformers
and
shared
.
xformers_available
and
torch
.
version
.
cuda
and
(
6
,
0
)
<=
torch
.
cuda
.
get_device_capability
(
shared
.
device
)
<=
(
8
,
6
)):
if
cmd_opts
.
force_enable_xformers
or
(
cmd_opts
.
xformers
and
shared
.
xformers_available
and
torch
.
version
.
cuda
and
(
6
,
0
)
<=
torch
.
cuda
.
get_device_capability
(
shared
.
device
)
<=
(
9
,
0
)):
print
(
"Applying xformers cross attention optimization."
)
print
(
"Applying xformers cross attention optimization."
)
ldm
.
modules
.
attention
.
CrossAttention
.
forward
=
sd_hijack_optimizations
.
xformers_attention_forward
ldm
.
modules
.
attention
.
CrossAttention
.
forward
=
sd_hijack_optimizations
.
xformers_attention_forward
ldm
.
modules
.
diffusionmodules
.
model
.
AttnBlock
.
forward
=
sd_hijack_optimizations
.
xformers_attnblock_forward
ldm
.
modules
.
diffusionmodules
.
model
.
AttnBlock
.
forward
=
sd_hijack_optimizations
.
xformers_attnblock_forward
...
...
modules/textual_inversion/textual_inversion.py
View file @
97ceaa23
...
@@ -88,9 +88,9 @@ class EmbeddingDatabase:
...
@@ -88,9 +88,9 @@ class EmbeddingDatabase:
data
=
[]
data
=
[]
if
filename
.
upper
()
.
endswith
(
'.PNG'
)
:
if
os
.
path
.
splitext
(
filename
.
upper
())[
-
1
]
in
[
'.PNG'
,
'.WEBP'
,
'.JXL'
,
'.AVIF'
]
:
embed_image
=
Image
.
open
(
path
)
embed_image
=
Image
.
open
(
path
)
if
'sd-ti-embedding'
in
embed_image
.
text
:
if
hasattr
(
embed_image
,
'text'
)
and
'sd-ti-embedding'
in
embed_image
.
text
:
data
=
embedding_from_b64
(
embed_image
.
text
[
'sd-ti-embedding'
])
data
=
embedding_from_b64
(
embed_image
.
text
[
'sd-ti-embedding'
])
name
=
data
.
get
(
'name'
,
name
)
name
=
data
.
get
(
'name'
,
name
)
else
:
else
:
...
@@ -242,6 +242,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
...
@@ -242,6 +242,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
last_saved_file
=
"<none>"
last_saved_file
=
"<none>"
last_saved_image
=
"<none>"
last_saved_image
=
"<none>"
embedding_yet_to_be_embedded
=
False
ititial_step
=
embedding
.
step
or
0
ititial_step
=
embedding
.
step
or
0
if
ititial_step
>
steps
:
if
ititial_step
>
steps
:
...
@@ -283,6 +284,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
...
@@ -283,6 +284,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
if
embedding
.
step
>
0
and
embedding_dir
is
not
None
and
embedding
.
step
%
save_embedding_every
==
0
:
if
embedding
.
step
>
0
and
embedding_dir
is
not
None
and
embedding
.
step
%
save_embedding_every
==
0
:
last_saved_file
=
os
.
path
.
join
(
embedding_dir
,
f
'{embedding_name}-{embedding.step}.pt'
)
last_saved_file
=
os
.
path
.
join
(
embedding_dir
,
f
'{embedding_name}-{embedding.step}.pt'
)
embedding
.
save
(
last_saved_file
)
embedding
.
save
(
last_saved_file
)
embedding_yet_to_be_embedded
=
True
write_loss
(
log_directory
,
"textual_inversion_loss.csv"
,
embedding
.
step
,
len
(
ds
),
{
write_loss
(
log_directory
,
"textual_inversion_loss.csv"
,
embedding
.
step
,
len
(
ds
),
{
"loss"
:
f
"{losses.mean():.7f}"
,
"loss"
:
f
"{losses.mean():.7f}"
,
...
@@ -320,7 +322,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
...
@@ -320,7 +322,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
shared
.
state
.
current_image
=
image
shared
.
state
.
current_image
=
image
if
save_image_with_stored_embedding
and
os
.
path
.
exists
(
last_saved_file
):
if
save_image_with_stored_embedding
and
os
.
path
.
exists
(
last_saved_file
)
and
embedding_yet_to_be_embedded
:
last_saved_image_chunks
=
os
.
path
.
join
(
images_embeds_dir
,
f
'{embedding_name}-{embedding.step}.png'
)
last_saved_image_chunks
=
os
.
path
.
join
(
images_embeds_dir
,
f
'{embedding_name}-{embedding.step}.png'
)
...
@@ -329,15 +331,22 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
...
@@ -329,15 +331,22 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
info
.
add_text
(
"sd-ti-embedding"
,
embedding_to_b64
(
data
))
info
.
add_text
(
"sd-ti-embedding"
,
embedding_to_b64
(
data
))
title
=
"<{}>"
.
format
(
data
.
get
(
'name'
,
'???'
))
title
=
"<{}>"
.
format
(
data
.
get
(
'name'
,
'???'
))
try
:
vectorSize
=
list
(
data
[
'string_to_param'
]
.
values
())[
0
]
.
shape
[
0
]
except
Exception
as
e
:
vectorSize
=
'?'
checkpoint
=
sd_models
.
select_checkpoint
()
checkpoint
=
sd_models
.
select_checkpoint
()
footer_left
=
checkpoint
.
model_name
footer_left
=
checkpoint
.
model_name
footer_mid
=
'[{}]'
.
format
(
checkpoint
.
hash
)
footer_mid
=
'[{}]'
.
format
(
checkpoint
.
hash
)
footer_right
=
'{}
'
.
format
(
embedding
.
step
)
footer_right
=
'{}
v {}s'
.
format
(
vectorSize
,
embedding
.
step
)
captioned_image
=
caption_image_overlay
(
image
,
title
,
footer_left
,
footer_mid
,
footer_right
)
captioned_image
=
caption_image_overlay
(
image
,
title
,
footer_left
,
footer_mid
,
footer_right
)
captioned_image
=
insert_image_data_embed
(
captioned_image
,
data
)
captioned_image
=
insert_image_data_embed
(
captioned_image
,
data
)
captioned_image
.
save
(
last_saved_image_chunks
,
"PNG"
,
pnginfo
=
info
)
captioned_image
.
save
(
last_saved_image_chunks
,
"PNG"
,
pnginfo
=
info
)
embedding_yet_to_be_embedded
=
False
image
.
save
(
last_saved_image
)
image
.
save
(
last_saved_image
)
...
...
modules/ui.py
View file @
97ceaa23
...
@@ -158,10 +158,7 @@ def save_files(js_data, images, do_make_zip, index):
...
@@ -158,10 +158,7 @@ def save_files(js_data, images, do_make_zip, index):
writer
.
writerow
([
"prompt"
,
"seed"
,
"width"
,
"height"
,
"sampler"
,
"cfgs"
,
"steps"
,
"filename"
,
"negative_prompt"
])
writer
.
writerow
([
"prompt"
,
"seed"
,
"width"
,
"height"
,
"sampler"
,
"cfgs"
,
"steps"
,
"filename"
,
"negative_prompt"
])
for
image_index
,
filedata
in
enumerate
(
images
,
start_index
):
for
image_index
,
filedata
in
enumerate
(
images
,
start_index
):
if
filedata
.
startswith
(
"data:image/png;base64,"
):
image
=
image_from_url_text
(
filedata
)
filedata
=
filedata
[
len
(
"data:image/png;base64,"
):]
image
=
Image
.
open
(
io
.
BytesIO
(
base64
.
decodebytes
(
filedata
.
encode
(
'utf-8'
))))
is_grid
=
image_index
<
p
.
index_of_first_image
is_grid
=
image_index
<
p
.
index_of_first_image
i
=
0
if
is_grid
else
(
image_index
-
p
.
index_of_first_image
)
i
=
0
if
is_grid
else
(
image_index
-
p
.
index_of_first_image
)
...
@@ -638,7 +635,7 @@ def create_ui(wrap_gradio_gpu_call):
...
@@ -638,7 +635,7 @@ def create_ui(wrap_gradio_gpu_call):
txt2img_preview
=
gr
.
Image
(
elem_id
=
'txt2img_preview'
,
visible
=
False
)
txt2img_preview
=
gr
.
Image
(
elem_id
=
'txt2img_preview'
,
visible
=
False
)
txt2img_gallery
=
gr
.
Gallery
(
label
=
'Output'
,
show_label
=
False
,
elem_id
=
'txt2img_gallery'
)
.
style
(
grid
=
4
)
txt2img_gallery
=
gr
.
Gallery
(
label
=
'Output'
,
show_label
=
False
,
elem_id
=
'txt2img_gallery'
)
.
style
(
grid
=
4
)
with
gr
.
Group
():
with
gr
.
Column
():
with
gr
.
Row
():
with
gr
.
Row
():
save
=
gr
.
Button
(
'Save'
)
save
=
gr
.
Button
(
'Save'
)
send_to_img2img
=
gr
.
Button
(
'Send to img2img'
)
send_to_img2img
=
gr
.
Button
(
'Send to img2img'
)
...
@@ -862,7 +859,7 @@ def create_ui(wrap_gradio_gpu_call):
...
@@ -862,7 +859,7 @@ def create_ui(wrap_gradio_gpu_call):
img2img_preview
=
gr
.
Image
(
elem_id
=
'img2img_preview'
,
visible
=
False
)
img2img_preview
=
gr
.
Image
(
elem_id
=
'img2img_preview'
,
visible
=
False
)
img2img_gallery
=
gr
.
Gallery
(
label
=
'Output'
,
show_label
=
False
,
elem_id
=
'img2img_gallery'
)
.
style
(
grid
=
4
)
img2img_gallery
=
gr
.
Gallery
(
label
=
'Output'
,
show_label
=
False
,
elem_id
=
'img2img_gallery'
)
.
style
(
grid
=
4
)
with
gr
.
Group
():
with
gr
.
Column
():
with
gr
.
Row
():
with
gr
.
Row
():
save
=
gr
.
Button
(
'Save'
)
save
=
gr
.
Button
(
'Save'
)
img2img_send_to_img2img
=
gr
.
Button
(
'Send to img2img'
)
img2img_send_to_img2img
=
gr
.
Button
(
'Send to img2img'
)
...
...
style.css
View file @
97ceaa23
...
@@ -237,13 +237,6 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s
...
@@ -237,13 +237,6 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s
margin
:
0
;
margin
:
0
;
}
}
.gr-panel
div
.flex-col
div
.justify-between
div
{
position
:
absolute
;
top
:
-0.1em
;
right
:
1em
;
padding
:
0
0.5em
;
}
#settings
.gr-panel
div
.flex-col
div
.justify-between
div
{
#settings
.gr-panel
div
.flex-col
div
.justify-between
div
{
position
:
relative
;
position
:
relative
;
z-index
:
200
;
z-index
:
200
;
...
@@ -316,6 +309,8 @@ input[type="range"]{
...
@@ -316,6 +309,8 @@ input[type="range"]{
height
:
100%
;
height
:
100%
;
overflow
:
auto
;
overflow
:
auto
;
background-color
:
rgba
(
20
,
20
,
20
,
0.95
);
background-color
:
rgba
(
20
,
20
,
20
,
0.95
);
user-select
:
none
;
-webkit-user-select
:
none
;
}
}
.modalControls
{
.modalControls
{
...
@@ -520,4 +515,4 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
...
@@ -520,4 +515,4 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
height
:
480px
!important
;
height
:
480px
!important
;
max-height
:
480px
!important
;
max-height
:
480px
!important
;
min-height
:
480px
!important
;
min-height
:
480px
!important
;
}
}
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment