diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index ac9563e186bb..8aa6797c9211 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -1610,6 +1610,7 @@ def __call__( hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, + temb = None, # other users of attention pass a kwarg for token embedding (like UNetMidBlock2D), which python requires to be expected; this is not the right place to fix this problem ) -> torch.FloatTensor: residual = hidden_states