Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
S
stable-diffusion-webui
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Administrator
stable-diffusion-webui
Commits
59146621
Commit
59146621
authored
Jan 23, 2023
by
AUTOMATIC
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
better support for xformers flash attention on older versions of torch
parent
3fa48207
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
30 additions
and
24 deletions
+30
-24
errors.py
modules/errors.py
+12
-0
sd_hijack_optimizations.py
modules/sd_hijack_optimizations.py
+18
-24
No files found.
modules/errors.py
View file @
59146621
...
...
@@ -24,6 +24,18 @@ See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable
"""
)
already_displayed
=
{}
def
display_once
(
e
:
Exception
,
task
):
if
task
in
already_displayed
:
return
display
(
e
,
task
)
already_displayed
[
task
]
=
1
def
run
(
code
,
task
):
try
:
code
()
...
...
modules/sd_hijack_optimizations.py
View file @
59146621
...
...
@@ -9,7 +9,7 @@ from torch import einsum
from
ldm.util
import
default
from
einops
import
rearrange
from
modules
import
shared
from
modules
import
shared
,
errors
from
modules.hypernetworks
import
hypernetwork
from
.sub_quadratic_attention
import
efficient_dot_product_attention
...
...
@@ -279,6 +279,21 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_
)
def
get_xformers_flash_attention_op
(
q
,
k
,
v
):
if
not
shared
.
cmd_opts
.
xformers_flash_attention
:
return
None
try
:
flash_attention_op
=
xformers
.
ops
.
MemoryEfficientAttentionFlashAttentionOp
fw
,
bw
=
flash_attention_op
if
fw
.
supports
(
xformers
.
ops
.
fmha
.
Inputs
(
query
=
q
,
key
=
k
,
value
=
v
,
attn_bias
=
None
)):
return
flash_attention_op
except
Exception
as
e
:
errors
.
display_once
(
e
,
"enabling flash attention"
)
return
None
def
xformers_attention_forward
(
self
,
x
,
context
=
None
,
mask
=
None
):
h
=
self
.
heads
q_in
=
self
.
to_q
(
x
)
...
...
@@ -291,18 +306,7 @@ def xformers_attention_forward(self, x, context=None, mask=None):
q
,
k
,
v
=
map
(
lambda
t
:
rearrange
(
t
,
'b n (h d) -> b n h d'
,
h
=
h
),
(
q_in
,
k_in
,
v_in
))
del
q_in
,
k_in
,
v_in
if
shared
.
cmd_opts
.
xformers_flash_attention
:
op
=
xformers
.
ops
.
MemoryEfficientAttentionFlashAttentionOp
fw
,
bw
=
op
if
not
fw
.
supports
(
xformers
.
ops
.
fmha
.
Inputs
(
query
=
q
,
key
=
k
,
value
=
v
,
attn_bias
=
None
)):
# print('xformers_attention_forward', q.shape, k.shape, v.shape)
# Flash Attention is not availabe for the input arguments.
# Fallback to default xFormers' backend.
op
=
None
else
:
op
=
None
out
=
xformers
.
ops
.
memory_efficient_attention
(
q
,
k
,
v
,
attn_bias
=
None
,
op
=
op
)
out
=
xformers
.
ops
.
memory_efficient_attention
(
q
,
k
,
v
,
attn_bias
=
None
,
op
=
get_xformers_flash_attention_op
(
q
,
k
,
v
))
out
=
rearrange
(
out
,
'b n h d -> b n (h d)'
,
h
=
h
)
return
self
.
to_out
(
out
)
...
...
@@ -377,17 +381,7 @@ def xformers_attnblock_forward(self, x):
q
=
q
.
contiguous
()
k
=
k
.
contiguous
()
v
=
v
.
contiguous
()
if
shared
.
cmd_opts
.
xformers_flash_attention
:
op
=
xformers
.
ops
.
MemoryEfficientAttentionFlashAttentionOp
fw
,
bw
=
op
if
not
fw
.
supports
(
xformers
.
ops
.
fmha
.
Inputs
(
query
=
q
,
key
=
k
,
value
=
v
)):
# print('xformers_attnblock_forward', q.shape, k.shape, v.shape)
# Flash Attention is not availabe for the input arguments.
# Fallback to default xFormers' backend.
op
=
None
else
:
op
=
None
out
=
xformers
.
ops
.
memory_efficient_attention
(
q
,
k
,
v
,
op
=
op
)
out
=
xformers
.
ops
.
memory_efficient_attention
(
q
,
k
,
v
,
op
=
get_xformers_flash_attention_op
(
q
,
k
,
v
))
out
=
rearrange
(
out
,
'b (h w) c -> b c h w'
,
h
=
h
)
out
=
self
.
proj_out
(
out
)
return
x
+
out
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment