Describe the bug
pipelines StableDiffusionAdapterPipeline and StableDiffusionXLAdapterPipeline produce error when running with more than one T2IAdapter.
this can be a list of adapters or via using MultiAdapter
this impacts for SD15 and SDXL variants and its not specific to exact adapter model.
Reproduction
import torch
import diffusers
from PIL import Image
from rich import print
model_id = "runwayml/stable-diffusion-v1-5"
print(f'torch=={torch.__version__} diffusers=={diffusers.__version__}')
print(f'loading: {model_id}')
base = diffusers.StableDiffusionPipeline.from_pretrained(model_id, variant="fp16", cache_dir='/mnt/d/Models/Diffusers').to('cuda')
print('loaded')
txt2img = diffusers.AutoPipelineForText2Image.from_pipe(base)
output = txt2img(prompt='test', negative_prompt='test', num_inference_steps=10) # ok
print(f'txt2img: {output}')
img2img = diffusers.AutoPipelineForImage2Image.from_pipe(base)
image = Image.new('RGB', (512,512), 0) # input is irrelevant, so just creating blank image
output = img2img(prompt='test', negative_prompt='test', num_inference_steps=10, image=image) # ok
print(f'img2img: {output}')
adapter1 = diffusers.T2IAdapter.from_pretrained('TencentARC/t2iadapter_depth_sd15v2', cache_dir='/mnt/d/Models/Diffusers')
pipe = diffusers.StableDiffusionAdapterPipeline(
vae=base.vae,
text_encoder=base.text_encoder,
tokenizer=base.tokenizer,
unet=base.unet,
scheduler=base.scheduler,
requires_safety_checker=False,
safety_checker=None,
feature_extractor=None,
adapter=adapter1,
).to('cuda')
output = pipe(prompt='test', negative_prompt='test', num_inference_steps=10, image=image) # ok
print(f'adapter: {output}')
adapter2 = diffusers.T2IAdapter.from_pretrained('TencentARC/t2iadapter_zoedepth_sd15v1', cache_dir='/mnt/d/Models/Diffusers')
pipe = diffusers.StableDiffusionAdapterPipeline(
vae=base.vae,
text_encoder=base.text_encoder,
tokenizer=base.tokenizer,
unet=base.unet,
scheduler=base.scheduler,
requires_safety_checker=False,
safety_checker=None,
feature_extractor=None,
adapter=[adapter1, adapter2],
).to('cuda')
output = pipe(prompt='test', negative_prompt='test', num_inference_steps=10, image=[image, image]) # fails
print(f'adapter-list: {output}')
pipe = diffusers.StableDiffusionAdapterPipeline(
vae=base.vae,
text_encoder=base.text_encoder,
tokenizer=base.tokenizer,
unet=base.unet,
scheduler=base.scheduler,
requires_safety_checker=False,
safety_checker=None,
feature_extractor=None,
adapter=diffusers.MultiAdapter([adapter1, adapter2])
).to('cuda')
output = pipe(prompt='test', negative_prompt='test', num_inference_steps=10, image=[image, image]) # also fails
print(f'multiadapter: {output}')
Logs
File "diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_adapter.py", line 884, in __call__
adapter_state = self.adapter(adapter_input, adapter_conditioning_scale)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "diffusers/models/adapter.py", line 92, in forward
for x, w, adapter in zip(xs, adapter_weights, self.adapters):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "torch/_tensor.py", line 990, in __iter__
raise TypeError("iteration over a 0-d tensor")
System Info
torch==2.1.2+cu121
diffusers==0.25.0.dev0
Who can help?
@sayakpaul @yiyixuxu @DN6 @patrickvonplaten
Describe the bug
pipelines
StableDiffusionAdapterPipelineandStableDiffusionXLAdapterPipelineproduce error when running with more than oneT2IAdapter.this can be a list of adapters or via using
MultiAdapterthis impacts for SD15 and SDXL variants and its not specific to exact adapter model.
Reproduction
Logs
System Info
torch==2.1.2+cu121
diffusers==0.25.0.dev0
Who can help?
@sayakpaul @yiyixuxu @DN6 @patrickvonplaten