We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents d342565 + 2ae814f commit 96256aaCopy full SHA for 96256aa
timm/layers/attention.py
@@ -154,8 +154,8 @@ def __init__(
154
self.k_proj = nn.Linear(dim, attn_dim, bias=qkv_bias)
155
self.v_proj = nn.Linear(dim, attn_dim, bias=qkv_bias)
156
157
- self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
158
- self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
+ self.q_norm = norm_layer(head_dim) if qk_norm else nn.Identity()
+ self.k_norm = norm_layer(head_dim) if qk_norm else nn.Identity()
159
self.attn_drop = nn.Dropout(attn_drop)
160
self.norm = norm_layer(attn_dim) if scale_norm else nn.Identity()
161
self.proj = nn.Linear(attn_dim, dim)
0 commit comments