Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -314,9 +314,9 @@ def encode_prompt(
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)

if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt = [prompt] if isinstance(prompt, str) else prompt

if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
Expand All @@ -329,6 +329,8 @@ def encode_prompt(

if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2

# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
Expand Down Expand Up @@ -378,14 +380,18 @@ def encode_prompt(
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt

# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)

uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
Expand Down
16 changes: 11 additions & 5 deletions src/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,9 +287,9 @@ def encode_prompt(
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)

if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt = [prompt] if isinstance(prompt, str) else prompt

if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
Expand All @@ -302,6 +302,8 @@ def encode_prompt(

if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2

# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
Expand Down Expand Up @@ -351,14 +353,18 @@ def encode_prompt(
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt

# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)

uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -325,9 +325,9 @@ def encode_prompt(
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)

if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt = [prompt] if isinstance(prompt, str) else prompt

if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
Expand All @@ -340,6 +340,8 @@ def encode_prompt(

if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2

# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
Expand Down Expand Up @@ -389,14 +391,18 @@ def encode_prompt(
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt

# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)

uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -263,9 +263,9 @@ def encode_prompt(
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)

if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt = [prompt] if isinstance(prompt, str) else prompt

if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
Expand All @@ -278,6 +278,8 @@ def encode_prompt(

if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2

# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
Expand Down Expand Up @@ -327,14 +329,18 @@ def encode_prompt(
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt

# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)

uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -270,9 +270,9 @@ def encode_prompt(
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)

if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt = [prompt] if isinstance(prompt, str) else prompt

if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
Expand All @@ -285,6 +285,8 @@ def encode_prompt(

if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2

# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
Expand Down Expand Up @@ -334,14 +336,18 @@ def encode_prompt(
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt

# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)

uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -419,9 +419,9 @@ def encode_prompt(
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)

if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt = [prompt] if isinstance(prompt, str) else prompt

if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
Expand All @@ -434,6 +434,8 @@ def encode_prompt(

if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2

# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
Expand Down Expand Up @@ -483,14 +485,18 @@ def encode_prompt(
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt

# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)

uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -287,9 +287,9 @@ def encode_prompt(
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)

if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
prompt = [prompt] if isinstance(prompt, str) else prompt

if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
Expand All @@ -302,6 +302,8 @@ def encode_prompt(

if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2

# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
Expand Down Expand Up @@ -351,14 +353,18 @@ def encode_prompt(
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt

# normalize str to list
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
negative_prompt_2 = (
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
)

uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
Expand Down
36 changes: 36 additions & 0 deletions tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,42 @@ def test_stable_diffusion_xl_offloads(self):
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3

def test_stable_diffusion_xl_img2img_prompt_embeds_only(self):
components = self.get_dummy_components()
sd_pipe = StableDiffusionXLPipeline(**components)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)

# forward without prompt embeds
generator_device = "cpu"
inputs = self.get_dummy_inputs(generator_device)
inputs["prompt"] = 3 * [inputs["prompt"]]

output = sd_pipe(**inputs)
image_slice_1 = output.images[0, -3:, -3:, -1]

# forward with prompt embeds
generator_device = "cpu"
inputs = self.get_dummy_inputs(generator_device)
prompt = 3 * [inputs.pop("prompt")]

(
prompt_embeds,
_,
pooled_prompt_embeds,
_,
) = sd_pipe.encode_prompt(prompt)

output = sd_pipe(
**inputs,
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
)
image_slice_2 = output.images[0, -3:, -3:, -1]

# make sure that it's equal
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4

def test_stable_diffusion_two_xl_mixture_of_denoiser(self):
components = self.get_dummy_components()
pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -559,6 +559,42 @@ def test_stable_diffusion_xl_img2img_negative_prompt_embeds(self):
# make sure that it's equal
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4

def test_stable_diffusion_xl_img2img_prompt_embeds_only(self):
components = self.get_dummy_components()
sd_pipe = StableDiffusionXLImg2ImgPipeline(**components)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)

# forward without prompt embeds
generator_device = "cpu"
inputs = self.get_dummy_inputs(generator_device)
inputs["prompt"] = 3 * [inputs["prompt"]]

output = sd_pipe(**inputs)
image_slice_1 = output.images[0, -3:, -3:, -1]

# forward with prompt embeds
generator_device = "cpu"
inputs = self.get_dummy_inputs(generator_device)
prompt = 3 * [inputs.pop("prompt")]

(
prompt_embeds,
_,
pooled_prompt_embeds,
_,
) = sd_pipe.encode_prompt(prompt)

output = sd_pipe(
**inputs,
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
)
image_slice_2 = output.images[0, -3:, -3:, -1]

# make sure that it's equal
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4

def test_attention_slicing_forward_pass(self):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)

Expand Down