Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
77f4237d
Commit
77f4237d
authored
Oct 08, 2022
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix bugs related to variable prompt lengths
parent
4999eb2e
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
37 additions
and
12 deletions
+37
-12
sd_hijack.py
modules/sd_hijack.py
+9
-5
sd_samplers.py
modules/sd_samplers.py
+28
-7
No files found.
modules/sd_hijack.py
View file @
77f4237d
...
...
@@ -89,7 +89,6 @@ class StableDiffusionModelHijack:
layer
.
padding_mode
=
'circular'
if
enable
else
'zeros'
def
tokenize
(
self
,
text
):
max_length
=
opts
.
max_prompt_tokens
-
2
_
,
remade_batch_tokens
,
_
,
_
,
_
,
token_count
=
self
.
clip
.
process_text
([
text
])
return
remade_batch_tokens
[
0
],
token_count
,
get_target_prompt_token_count
(
token_count
)
...
...
@@ -174,7 +173,8 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
if
line
in
cache
:
remade_tokens
,
fixes
,
multipliers
=
cache
[
line
]
else
:
remade_tokens
,
fixes
,
multipliers
,
token_count
=
self
.
tokenize_line
(
line
,
used_custom_terms
,
hijack_comments
)
remade_tokens
,
fixes
,
multipliers
,
current_token_count
=
self
.
tokenize_line
(
line
,
used_custom_terms
,
hijack_comments
)
token_count
=
max
(
current_token_count
,
token_count
)
cache
[
line
]
=
(
remade_tokens
,
fixes
,
multipliers
)
...
...
@@ -265,15 +265,19 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
if
len
(
used_custom_terms
)
>
0
:
self
.
hijack
.
comments
.
append
(
"Used embeddings: "
+
", "
.
join
([
f
'{word} [{checksum}]'
for
word
,
checksum
in
used_custom_terms
]))
position_ids_array
=
[
min
(
x
,
75
)
for
x
in
range
(
len
(
remade_batch_tokens
[
0
])
-
1
)]
+
[
76
]
target_token_count
=
get_target_prompt_token_count
(
token_count
)
+
2
position_ids_array
=
[
min
(
x
,
75
)
for
x
in
range
(
target_token_count
-
1
)]
+
[
76
]
position_ids
=
torch
.
asarray
(
position_ids_array
,
device
=
devices
.
device
)
.
expand
((
1
,
-
1
))
tokens
=
torch
.
asarray
(
remade_batch_tokens
)
.
to
(
device
)
remade_batch_tokens_of_same_length
=
[
x
+
[
self
.
wrapped
.
tokenizer
.
eos_token_id
]
*
(
target_token_count
-
len
(
x
))
for
x
in
remade_batch_tokens
]
tokens
=
torch
.
asarray
(
remade_batch_tokens_of_same_length
)
.
to
(
device
)
outputs
=
self
.
wrapped
.
transformer
(
input_ids
=
tokens
,
position_ids
=
position_ids
)
z
=
outputs
.
last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers
=
torch
.
asarray
(
batch_multipliers
)
.
to
(
device
)
batch_multipliers_of_same_length
=
[
x
+
[
1.0
]
*
(
target_token_count
-
len
(
x
))
for
x
in
batch_multipliers
]
batch_multipliers
=
torch
.
asarray
(
batch_multipliers_of_same_length
)
.
to
(
device
)
original_mean
=
z
.
mean
()
z
*=
batch_multipliers
.
reshape
(
batch_multipliers
.
shape
+
(
1
,))
.
expand
(
z
.
shape
)
new_mean
=
z
.
mean
()
...
...
modules/sd_samplers.py
View file @
77f4237d
...
...
@@ -142,6 +142,16 @@ class VanillaStableDiffusionSampler:
assert
all
([
len
(
conds
)
==
1
for
conds
in
conds_list
]),
'composition via AND is not supported for DDIM/PLMS samplers'
cond
=
tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently;
# filling unconditional_conditioning with repeats of the last vector to match length is
# not 100% correct but should work well enough
if
unconditional_conditioning
.
shape
[
1
]
<
cond
.
shape
[
1
]:
last_vector
=
unconditional_conditioning
[:,
-
1
:]
last_vector_repeated
=
last_vector
.
repeat
([
1
,
cond
.
shape
[
1
]
-
unconditional_conditioning
.
shape
[
1
],
1
])
unconditional_conditioning
=
torch
.
hstack
([
unconditional_conditioning
,
last_vector_repeated
])
elif
unconditional_conditioning
.
shape
[
1
]
>
cond
.
shape
[
1
]:
unconditional_conditioning
=
unconditional_conditioning
[:,
:
cond
.
shape
[
1
]]
if
self
.
mask
is
not
None
:
img_orig
=
self
.
sampler
.
model
.
q_sample
(
self
.
init_latent
,
ts
)
x_dec
=
img_orig
*
self
.
mask
+
self
.
nmask
*
x_dec
...
...
@@ -221,18 +231,29 @@ class CFGDenoiser(torch.nn.Module):
x_in
=
torch
.
cat
([
torch
.
stack
([
x
[
i
]
for
_
in
range
(
n
)])
for
i
,
n
in
enumerate
(
repeats
)]
+
[
x
])
sigma_in
=
torch
.
cat
([
torch
.
stack
([
sigma
[
i
]
for
_
in
range
(
n
)])
for
i
,
n
in
enumerate
(
repeats
)]
+
[
sigma
])
cond_in
=
torch
.
cat
([
tensor
,
uncond
])
if
shared
.
batch_cond_uncond
:
x_out
=
self
.
inner_model
(
x_in
,
sigma_in
,
cond
=
cond_in
)
if
tensor
.
shape
[
1
]
==
uncond
.
shape
[
1
]:
cond_in
=
torch
.
cat
([
tensor
,
uncond
])
if
shared
.
batch_cond_uncond
:
x_out
=
self
.
inner_model
(
x_in
,
sigma_in
,
cond
=
cond_in
)
else
:
x_out
=
torch
.
zeros_like
(
x_in
)
for
batch_offset
in
range
(
0
,
x_out
.
shape
[
0
],
batch_size
):
a
=
batch_offset
b
=
a
+
batch_size
x_out
[
a
:
b
]
=
self
.
inner_model
(
x_in
[
a
:
b
],
sigma_in
[
a
:
b
],
cond
=
cond_in
[
a
:
b
])
else
:
x_out
=
torch
.
zeros_like
(
x_in
)
for
batch_offset
in
range
(
0
,
x_out
.
shape
[
0
],
batch_size
):
batch_size
=
batch_size
*
2
if
shared
.
batch_cond_uncond
else
batch_size
for
batch_offset
in
range
(
0
,
tensor
.
shape
[
0
],
batch_size
):
a
=
batch_offset
b
=
a
+
batch_size
x_out
[
a
:
b
]
=
self
.
inner_model
(
x_in
[
a
:
b
],
sigma_in
[
a
:
b
],
cond
=
cond_in
[
a
:
b
])
b
=
min
(
a
+
batch_size
,
tensor
.
shape
[
0
])
x_out
[
a
:
b
]
=
self
.
inner_model
(
x_in
[
a
:
b
],
sigma_in
[
a
:
b
],
cond
=
tensor
[
a
:
b
])
x_out
[
-
uncond
.
shape
[
0
]:]
=
self
.
inner_model
(
x_in
[
-
uncond
.
shape
[
0
]:],
sigma_in
[
-
uncond
.
shape
[
0
]:],
cond
=
uncond
)
denoised_uncond
=
x_out
[
-
batch_size
:]
denoised_uncond
=
x_out
[
-
uncond
.
shape
[
0
]
:]
denoised
=
torch
.
clone
(
denoised_uncond
)
for
i
,
conds
in
enumerate
(
conds_list
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment