Unverified Commit 5d54f35c authored by C43H66N12O12S2's avatar C43H66N12O12S2 Committed by GitHub

add xformers attnblock and hypernetwork support

parent b70eaeb2
...@@ -98,6 +98,12 @@ def xformers_attention_forward(self, x, context=None, mask=None): ...@@ -98,6 +98,12 @@ def xformers_attention_forward(self, x, context=None, mask=None):
h = self.heads h = self.heads
q_in = self.to_q(x) q_in = self.to_q(x)
context = default(context, x) context = default(context, x)
hypernetwork = shared.selected_hypernetwork()
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
k_in = self.to_k(hypernetwork_layers[0](context))
v_in = self.to_v(hypernetwork_layers[1](context))
else:
k_in = self.to_k(context) k_in = self.to_k(context)
v_in = self.to_v(context) v_in = self.to_v(context)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))
...@@ -169,3 +175,13 @@ def cross_attention_attnblock_forward(self, x): ...@@ -169,3 +175,13 @@ def cross_attention_attnblock_forward(self, x):
h3 += x h3 += x
return h3 return h3
def xformers_attnblock_forward(self, x):
h_ = x
h_ = self.norm(h_)
q1 = self.q(h_).contiguous()
k1 = self.k(h_).contiguous()
v = self.v(h_).contiguous()
out = xformers.ops.memory_efficient_attention(q1, k1, v)
out = self.proj_out(out)
return x+out
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment