From e35c22e3fe7e8973a2a4522ed1a360d9bc53d5ba Mon Sep 17 00:00:00 2001 From: lsb Date: Sun, 21 Jan 2024 16:38:30 -0800 Subject: [PATCH] Ensure the `temb` kwarg is not passed unexpectedly to attn --- src/diffusers/models/attention_processor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index ac9563e186bb..8aa6797c9211 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -1610,6 +1610,7 @@ def __call__( hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, + temb = None, # other users of attention pass a kwarg for token embedding (like UNetMidBlock2D), which python requires to be expected; this is not the right place to fix this problem ) -> torch.FloatTensor: residual = hidden_states