From 7a3f4e2933a7bb35a68f8714c8b154f1efef5895 Mon Sep 17 00:00:00 2001 From: yiyixuxu Date: Thu, 8 Feb 2024 23:57:39 +0000 Subject: [PATCH] add --- src/diffusers/models/attention_processor.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index acf2a4882b58..d501213956bd 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import inspect from importlib import import_module from typing import Callable, Optional, Union @@ -509,6 +510,15 @@ def forward( # The `Attention` class can call different attention processors / attention functions # here we simply pass along all tensors to the selected processor class # For standard processors that are defined here, `**cross_attention_kwargs` is empty + + attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) + unused_kwargs = [k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters] + if len(unused_kwargs) > 0: + logger.warning( + f"cross_attention_kwargs {unused_kwargs} are not expected by {self.processor.__class__.__name__} and will be ignored." + ) + cross_attention_kwargs = {k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters} + return self.processor( self, hidden_states,