Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
94853395
Commit
94853395
authored
Oct 11, 2022
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
replace duplicate code with a function
parent
5e2627a1
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
29 additions
and
38 deletions
+29
-38
hypernetwork.py
modules/hypernetwork.py
+14
-9
sd_hijack_optimizations.py
modules/sd_hijack_optimizations.py
+15
-29
No files found.
modules/hypernetwork.py
View file @
94853395
...
...
@@ -64,21 +64,26 @@ def load_hypernetwork(filename):
shared
.
loaded_hypernetwork
=
None
def
apply_hypernetwork
(
hypernetwork
,
context
):
hypernetwork_layers
=
(
hypernetwork
.
layers
if
hypernetwork
is
not
None
else
{})
.
get
(
context
.
shape
[
2
],
None
)
if
hypernetwork_layers
is
None
:
return
context
,
context
context_k
=
hypernetwork_layers
[
0
](
context
)
context_v
=
hypernetwork_layers
[
1
](
context
)
return
context_k
,
context_v
def
attention_CrossAttention_forward
(
self
,
x
,
context
=
None
,
mask
=
None
):
h
=
self
.
heads
q
=
self
.
to_q
(
x
)
context
=
default
(
context
,
x
)
hypernetwork
=
shared
.
loaded_hypernetwork
hypernetwork_layers
=
(
hypernetwork
.
layers
if
hypernetwork
is
not
None
else
{})
.
get
(
context
.
shape
[
2
],
None
)
if
hypernetwork_layers
is
not
None
:
k
=
self
.
to_k
(
hypernetwork_layers
[
0
](
context
))
v
=
self
.
to_v
(
hypernetwork_layers
[
1
](
context
))
else
:
k
=
self
.
to_k
(
context
)
v
=
self
.
to_v
(
context
)
context_k
,
context_v
=
apply_hypernetwork
(
shared
.
loaded_hypernetwork
,
context
)
k
=
self
.
to_k
(
context_k
)
v
=
self
.
to_v
(
context_v
)
q
,
k
,
v
=
map
(
lambda
t
:
rearrange
(
t
,
'b n (h d) -> (b h) n d'
,
h
=
h
),
(
q
,
k
,
v
))
...
...
modules/sd_hijack_optimizations.py
View file @
94853395
...
...
@@ -8,7 +8,8 @@ from torch import einsum
from
ldm.util
import
default
from
einops
import
rearrange
from
modules
import
shared
from
modules
import
shared
,
hypernetwork
if
shared
.
cmd_opts
.
xformers
or
shared
.
cmd_opts
.
force_enable_xformers
:
try
:
...
...
@@ -26,16 +27,10 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
q_in
=
self
.
to_q
(
x
)
context
=
default
(
context
,
x
)
hypernetwork
=
shared
.
loaded_hypernetwork
hypernetwork_layers
=
(
hypernetwork
.
layers
if
hypernetwork
is
not
None
else
{})
.
get
(
context
.
shape
[
2
],
None
)
if
hypernetwork_layers
is
not
None
:
k_in
=
self
.
to_k
(
hypernetwork_layers
[
0
](
context
))
v_in
=
self
.
to_v
(
hypernetwork_layers
[
1
](
context
))
else
:
k_in
=
self
.
to_k
(
context
)
v_in
=
self
.
to_v
(
context
)
del
context
,
x
context_k
,
context_v
=
hypernetwork
.
apply_hypernetwork
(
shared
.
loaded_hypernetwork
,
context
)
k_in
=
self
.
to_k
(
context_k
)
v_in
=
self
.
to_v
(
context_v
)
del
context
,
context_k
,
context_v
,
x
q
,
k
,
v
=
map
(
lambda
t
:
rearrange
(
t
,
'b n (h d) -> (b h) n d'
,
h
=
h
),
(
q_in
,
k_in
,
v_in
))
del
q_in
,
k_in
,
v_in
...
...
@@ -59,22 +54,16 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
return
self
.
to_out
(
r2
)
# taken from https://github.com/Doggettx/stable-diffusion
# taken from https://github.com/Doggettx/stable-diffusion
and modified
def
split_cross_attention_forward
(
self
,
x
,
context
=
None
,
mask
=
None
):
h
=
self
.
heads
q_in
=
self
.
to_q
(
x
)
context
=
default
(
context
,
x
)
hypernetwork
=
shared
.
loaded_hypernetwork
hypernetwork_layers
=
(
hypernetwork
.
layers
if
hypernetwork
is
not
None
else
{})
.
get
(
context
.
shape
[
2
],
None
)
if
hypernetwork_layers
is
not
None
:
k_in
=
self
.
to_k
(
hypernetwork_layers
[
0
](
context
))
v_in
=
self
.
to_v
(
hypernetwork_layers
[
1
](
context
))
else
:
k_in
=
self
.
to_k
(
context
)
v_in
=
self
.
to_v
(
context
)
context_k
,
context_v
=
hypernetwork
.
apply_hypernetwork
(
shared
.
loaded_hypernetwork
,
context
)
k_in
=
self
.
to_k
(
context_k
)
v_in
=
self
.
to_v
(
context_v
)
k_in
*=
self
.
scale
...
...
@@ -130,14 +119,11 @@ def xformers_attention_forward(self, x, context=None, mask=None):
h
=
self
.
heads
q_in
=
self
.
to_q
(
x
)
context
=
default
(
context
,
x
)
hypernetwork
=
shared
.
loaded_hypernetwork
hypernetwork_layers
=
(
hypernetwork
.
layers
if
hypernetwork
is
not
None
else
{})
.
get
(
context
.
shape
[
2
],
None
)
if
hypernetwork_layers
is
not
None
:
k_in
=
self
.
to_k
(
hypernetwork_layers
[
0
](
context
))
v_in
=
self
.
to_v
(
hypernetwork_layers
[
1
](
context
))
else
:
k_in
=
self
.
to_k
(
context
)
v_in
=
self
.
to_v
(
context
)
context_k
,
context_v
=
hypernetwork
.
apply_hypernetwork
(
shared
.
loaded_hypernetwork
,
context
)
k_in
=
self
.
to_k
(
context_k
)
v_in
=
self
.
to_v
(
context_v
)
q
,
k
,
v
=
map
(
lambda
t
:
rearrange
(
t
,
'b n (h d) -> b n h d'
,
h
=
h
),
(
q_in
,
k_in
,
v_in
))
del
q_in
,
k_in
,
v_in
out
=
xformers
.
ops
.
memory_efficient_attention
(
q
,
k
,
v
,
attn_bias
=
None
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment