Skip to content

Commit

Permalink
Remove unnecessary code.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed May 18, 2024
1 parent 1c4af59 commit 98f828f
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 10 deletions.
11 changes: 2 additions & 9 deletions comfy/ldm/modules/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,11 +318,7 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None):
return attention_pytorch(q, k, v, heads, mask)

q, k, v = map(
lambda t: t.unsqueeze(3)
.reshape(b, -1, heads, dim_head)
.permute(0, 2, 1, 3)
.reshape(b * heads, -1, dim_head)
.contiguous(),
lambda t: t.reshape(b, -1, heads, dim_head),
(q, k, v),
)

Expand All @@ -335,10 +331,7 @@ def attention_xformers(q, k, v, heads, mask=None, attn_precision=None):
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)

out = (
out.unsqueeze(0)
.reshape(b, heads, -1, dim_head)
.permute(0, 2, 1, 3)
.reshape(b, -1, heads * dim_head)
out.reshape(b, -1, heads * dim_head)
)
return out

Expand Down
1 change: 0 additions & 1 deletion comfy/ldm/modules/diffusionmodules/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
from typing import Optional, Any
import logging

Expand Down

0 comments on commit 98f828f

Please sign in to comment.