Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
79e39fae
Commit
79e39fae
authored
Jan 06, 2023
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
CLIP hijack rework
parent
3246a2d6
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
256 additions
and
182 deletions
+256
-182
sd_hijack.py
modules/sd_hijack.py
+3
-3
sd_hijack_clip.py
modules/sd_hijack_clip.py
+171
-177
sd_hijack_clip_old.py
modules/sd_hijack_clip_old.py
+81
-0
textual_inversion.py
modules/textual_inversion/textual_inversion.py
+0
-1
ui.py
modules/ui.py
+1
-1
No files found.
modules/sd_hijack.py
View file @
79e39fae
...
...
@@ -150,10 +150,10 @@ class StableDiffusionModelHijack:
def
clear_comments
(
self
):
self
.
comments
=
[]
def
tokenize
(
self
,
text
):
_
,
remade_batch_tokens
,
_
,
_
,
_
,
token_count
=
self
.
clip
.
process_text
([
text
])
def
get_prompt_lengths
(
self
,
text
):
_
,
token_count
=
self
.
clip
.
process_texts
([
text
])
return
remade_batch_tokens
[
0
],
token_count
,
sd_hijack_
clip
.
get_target_prompt_token_count
(
token_count
)
return
token_count
,
self
.
clip
.
get_target_prompt_token_count
(
token_count
)
class
EmbeddingsWithFixes
(
torch
.
nn
.
Module
):
...
...
modules/sd_hijack_clip.py
View file @
79e39fae
This diff is collapsed.
Click to expand it.
modules/sd_hijack_clip_old.py
0 → 100644
View file @
79e39fae
from
modules
import
sd_hijack_clip
from
modules
import
shared
def
process_text_old
(
self
:
sd_hijack_clip
.
FrozenCLIPEmbedderWithCustomWordsBase
,
texts
):
id_start
=
self
.
id_start
id_end
=
self
.
id_end
maxlen
=
self
.
wrapped
.
max_length
# you get to stay at 77
used_custom_terms
=
[]
remade_batch_tokens
=
[]
hijack_comments
=
[]
hijack_fixes
=
[]
token_count
=
0
cache
=
{}
batch_tokens
=
self
.
tokenize
(
texts
)
batch_multipliers
=
[]
for
tokens
in
batch_tokens
:
tuple_tokens
=
tuple
(
tokens
)
if
tuple_tokens
in
cache
:
remade_tokens
,
fixes
,
multipliers
=
cache
[
tuple_tokens
]
else
:
fixes
=
[]
remade_tokens
=
[]
multipliers
=
[]
mult
=
1.0
i
=
0
while
i
<
len
(
tokens
):
token
=
tokens
[
i
]
embedding
,
embedding_length_in_tokens
=
self
.
hijack
.
embedding_db
.
find_embedding_at_position
(
tokens
,
i
)
mult_change
=
self
.
token_mults
.
get
(
token
)
if
shared
.
opts
.
enable_emphasis
else
None
if
mult_change
is
not
None
:
mult
*=
mult_change
i
+=
1
elif
embedding
is
None
:
remade_tokens
.
append
(
token
)
multipliers
.
append
(
mult
)
i
+=
1
else
:
emb_len
=
int
(
embedding
.
vec
.
shape
[
0
])
fixes
.
append
((
len
(
remade_tokens
),
embedding
))
remade_tokens
+=
[
0
]
*
emb_len
multipliers
+=
[
mult
]
*
emb_len
used_custom_terms
.
append
((
embedding
.
name
,
embedding
.
checksum
()))
i
+=
embedding_length_in_tokens
if
len
(
remade_tokens
)
>
maxlen
-
2
:
vocab
=
{
v
:
k
for
k
,
v
in
self
.
wrapped
.
tokenizer
.
get_vocab
()
.
items
()}
ovf
=
remade_tokens
[
maxlen
-
2
:]
overflowing_words
=
[
vocab
.
get
(
int
(
x
),
""
)
for
x
in
ovf
]
overflowing_text
=
self
.
wrapped
.
tokenizer
.
convert_tokens_to_string
(
''
.
join
(
overflowing_words
))
hijack_comments
.
append
(
f
"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:
\n
{overflowing_text}
\n
"
)
token_count
=
len
(
remade_tokens
)
remade_tokens
=
remade_tokens
+
[
id_end
]
*
(
maxlen
-
2
-
len
(
remade_tokens
))
remade_tokens
=
[
id_start
]
+
remade_tokens
[
0
:
maxlen
-
2
]
+
[
id_end
]
cache
[
tuple_tokens
]
=
(
remade_tokens
,
fixes
,
multipliers
)
multipliers
=
multipliers
+
[
1.0
]
*
(
maxlen
-
2
-
len
(
multipliers
))
multipliers
=
[
1.0
]
+
multipliers
[
0
:
maxlen
-
2
]
+
[
1.0
]
remade_batch_tokens
.
append
(
remade_tokens
)
hijack_fixes
.
append
(
fixes
)
batch_multipliers
.
append
(
multipliers
)
return
batch_multipliers
,
remade_batch_tokens
,
used_custom_terms
,
hijack_comments
,
hijack_fixes
,
token_count
def
forward_old
(
self
:
sd_hijack_clip
.
FrozenCLIPEmbedderWithCustomWordsBase
,
texts
):
batch_multipliers
,
remade_batch_tokens
,
used_custom_terms
,
hijack_comments
,
hijack_fixes
,
token_count
=
process_text_old
(
self
,
texts
)
self
.
hijack
.
comments
+=
hijack_comments
if
len
(
used_custom_terms
)
>
0
:
self
.
hijack
.
comments
.
append
(
"Used embeddings: "
+
", "
.
join
([
f
'{word} [{checksum}]'
for
word
,
checksum
in
used_custom_terms
]))
self
.
hijack
.
fixes
=
hijack_fixes
return
self
.
process_tokens
(
remade_batch_tokens
,
batch_multipliers
)
modules/textual_inversion/textual_inversion.py
View file @
79e39fae
...
...
@@ -79,7 +79,6 @@ class EmbeddingDatabase:
self
.
word_embeddings
[
embedding
.
name
]
=
embedding
# TODO changing between clip and open clip changes tokenization, which will cause embeddings to stop working
ids
=
model
.
cond_stage_model
.
tokenize
([
embedding
.
name
])[
0
]
first_id
=
ids
[
0
]
...
...
modules/ui.py
View file @
79e39fae
...
...
@@ -368,7 +368,7 @@ def update_token_counter(text, steps):
flat_prompts
=
reduce
(
lambda
list1
,
list2
:
list1
+
list2
,
prompt_schedules
)
prompts
=
[
prompt_text
for
step
,
prompt_text
in
flat_prompts
]
token
s
,
token_count
,
max_length
=
max
([
model_hijack
.
tokenize
(
prompt
)
for
prompt
in
prompts
],
key
=
lambda
args
:
args
[
1
])
token
_count
,
max_length
=
max
([
model_hijack
.
get_prompt_lengths
(
prompt
)
for
prompt
in
prompts
],
key
=
lambda
args
:
args
[
0
])
style_class
=
' class="red"'
if
(
token_count
>
max_length
)
else
""
return
f
"<span {style_class}>{token_count}/{max_length}</span>"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment