From 1dc231d14a48bd5ac48e53a5fa283e59da48673a Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Tue, 7 Nov 2023 22:51:33 +0530 Subject: [PATCH 01/14] [PixArt-Alpha] Support non-square images (#5672) * debug * support non-square images * add: test * fix: test --------- Co-authored-by: Patrick von Platen --- src/diffusers/models/transformer_2d.py | 4 +++- tests/pipelines/pixart/test_pixart.py | 18 +++++++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/diffusers/models/transformer_2d.py b/src/diffusers/models/transformer_2d.py index 7c0cd12d1c67..24abf54d6da7 100644 --- a/src/diffusers/models/transformer_2d.py +++ b/src/diffusers/models/transformer_2d.py @@ -339,6 +339,7 @@ def forward( elif self.is_input_vectorized: hidden_states = self.latent_image_embedding(hidden_states) elif self.is_input_patches: + height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size hidden_states = self.pos_embed(hidden_states) if self.adaln_single is not None: @@ -425,7 +426,8 @@ def forward( hidden_states = hidden_states.squeeze(1) # unpatchify - height = width = int(hidden_states.shape[1] ** 0.5) + if self.adaln_single is None: + height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) diff --git a/tests/pipelines/pixart/test_pixart.py b/tests/pipelines/pixart/test_pixart.py index 2e033896d900..70548092fedf 100644 --- a/tests/pipelines/pixart/test_pixart.py +++ b/tests/pipelines/pixart/test_pixart.py @@ -174,13 +174,29 @@ def test_inference(self): inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] - print(torch.from_numpy(image_slice.flatten())) self.assertEqual(image.shape, (1, 8, 8, 3)) expected_slice = np.array([0.5303, 0.2658, 0.7979, 0.1182, 0.3304, 0.4608, 0.5195, 0.4261, 0.4675]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) + def test_inference_non_square_images(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs, height=32, width=48).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 32, 48, 3)) + expected_slice = np.array([0.3859, 0.2987, 0.2333, 0.5243, 0.6721, 0.4436, 0.5292, 0.5373, 0.4416]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + def test_inference_with_embeddings_and_multiple_images(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) From aab6de22c33cc01fb7bc81c0807d6109e2c998c9 Mon Sep 17 00:00:00 2001 From: dg845 <58458699+dg845@users.noreply.github.com> Date: Tue, 7 Nov 2023 09:48:18 -0800 Subject: [PATCH 02/14] Improve LCMScheduler (#5681) * Refactor LCMScheduler.step such that prev_sample == denoised at the last timestep in the schedule. * Make timestep scaling when calculating boundary conditions configurable. * Reparameterize timestep_scaling to be a multiplicative rather than division scaling. * make style * fix dtype conversion * make style --------- Co-authored-by: Patrick von Platen --- src/diffusers/schedulers/scheduling_lcm.py | 22 +++++++++++++++------- tests/schedulers/test_scheduler_lcm.py | 6 +++--- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/diffusers/schedulers/scheduling_lcm.py b/src/diffusers/schedulers/scheduling_lcm.py index 8e2627b6f477..adcc092a816f 100644 --- a/src/diffusers/schedulers/scheduling_lcm.py +++ b/src/diffusers/schedulers/scheduling_lcm.py @@ -182,6 +182,10 @@ class LCMScheduler(SchedulerMixin, ConfigMixin): timestep_spacing (`str`, defaults to `"leading"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + timestep_scaling (`float`, defaults to 10.0): + The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions + `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation + error at the default of `10.0` is already pretty small). rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to @@ -208,6 +212,7 @@ def __init__( dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, timestep_spacing: str = "leading", + timestep_scaling: float = 10.0, rescale_betas_zero_snr: bool = False, ): if trained_betas is not None: @@ -380,12 +385,12 @@ def set_timesteps( self._step_index = None - def get_scalings_for_boundary_condition_discrete(self, t): + def get_scalings_for_boundary_condition_discrete(self, timestep): self.sigma_data = 0.5 # Default: 0.5 + scaled_timestep = timestep * self.config.timestep_scaling - # By dividing 0.1: This is almost a delta function at t=0. - c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2) - c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5 + c_skip = self.sigma_data**2 / (scaled_timestep**2 + self.sigma_data**2) + c_out = scaled_timestep / (scaled_timestep**2 + self.sigma_data**2) ** 0.5 return c_skip, c_out def step( @@ -466,9 +471,12 @@ def step( denoised = c_out * predicted_original_sample + c_skip * sample # 7. Sample and inject noise z ~ N(0, I) for MultiStep Inference - # Noise is not used for one-step sampling. - if len(self.timesteps) > 1: - noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device) + # Noise is not used on the final timestep of the timestep schedule. + # This also means that noise is not used for one-step sampling. + if self.step_index != self.num_inference_steps - 1: + noise = randn_tensor( + model_output.shape, generator=generator, device=model_output.device, dtype=denoised.dtype + ) prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise else: prev_sample = denoised diff --git a/tests/schedulers/test_scheduler_lcm.py b/tests/schedulers/test_scheduler_lcm.py index 48b68fa47ddc..f7d511ff0573 100644 --- a/tests/schedulers/test_scheduler_lcm.py +++ b/tests/schedulers/test_scheduler_lcm.py @@ -230,7 +230,7 @@ def test_full_loop_onestep(self): result_mean = torch.mean(torch.abs(sample)) # TODO: get expected sum and mean - assert abs(result_sum.item() - 18.7097) < 1e-2 + assert abs(result_sum.item() - 18.7097) < 1e-3 assert abs(result_mean.item() - 0.0244) < 1e-3 def test_full_loop_multistep(self): @@ -240,5 +240,5 @@ def test_full_loop_multistep(self): result_mean = torch.mean(torch.abs(sample)) # TODO: get expected sum and mean - assert abs(result_sum.item() - 280.5618) < 1e-2 - assert abs(result_mean.item() - 0.3653) < 1e-3 + assert abs(result_sum.item() - 197.7616) < 1e-3 + assert abs(result_mean.item() - 0.2575) < 1e-3 From 7942bb8dc2516e131879a446653db2b219157a3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Tolga=20Cang=C3=B6z?= <46008593+standardAI@users.noreply.github.com> Date: Wed, 8 Nov 2023 02:42:20 +0300 Subject: [PATCH 03/14] [`Docs`] Fix typos, improve, update at Using Diffusers' Task page (#5611) * Fix typos, improve, update; kandinsky doesn't want fp16 due to deprecation; ogkalu and kohbanye don't have safetensor; add make_image_grid for better visualization * Update inpaint.md * Remove erronous Space * Update docs/source/en/using-diffusers/conditional_image_generation.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update img2img.md * load_image() already converts to RGB * Update depth2img.md * Update img2img.md * Update inpaint.md --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- .../conditional_image_generation.md | 28 +++- docs/source/en/using-diffusers/depth2img.md | 23 +-- docs/source/en/using-diffusers/img2img.md | 157 +++++++++--------- docs/source/en/using-diffusers/inpaint.md | 126 ++++++++------ .../unconditional_image_generation.md | 15 +- 5 files changed, 187 insertions(+), 162 deletions(-) diff --git a/docs/source/en/using-diffusers/conditional_image_generation.md b/docs/source/en/using-diffusers/conditional_image_generation.md index d07658e4f58e..9832f53cffe6 100644 --- a/docs/source/en/using-diffusers/conditional_image_generation.md +++ b/docs/source/en/using-diffusers/conditional_image_generation.md @@ -30,6 +30,7 @@ You can generate images from a prompt in ๐Ÿค— Diffusers in two steps: ```py from diffusers import AutoPipelineForText2Image +import torch pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16" @@ -42,6 +43,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( image = pipeline( "stained glass of darth vader, backlight, centered composition, masterpiece, photorealistic, 8k" ).images[0] +image ```
@@ -65,6 +67,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] +image ``` ### Stable Diffusion XL @@ -80,6 +83,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] +image ``` ### Kandinsky 2.2 @@ -93,15 +97,16 @@ from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained( - "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, variant="fp16" + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator).images[0] +image ``` ### ControlNet -ControlNet are auxiliary models or adapters that are finetuned on top of text-to-image models, such as [Stable Diffusion V1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5). Using ControlNet models in combination with text-to-image models offers diverse options for more explicit control over how to generate an image. With ControlNet's, you add an additional conditioning input image to the model. For example, if you provide an image of a human pose (usually represented as multiple keypoints that are connected into a skeleton) as a conditioning input, the model generates an image that follows the pose of the image. Check out the more in-depth [ControlNet](controlnet) guide to learn more about other conditioning inputs and how to use them. +ControlNet models are auxiliary models or adapters that are finetuned on top of text-to-image models, such as [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5). Using ControlNet models in combination with text-to-image models offers diverse options for more explicit control over how to generate an image. With ControlNet, you add an additional conditioning input image to the model. For example, if you provide an image of a human pose (usually represented as multiple keypoints that are connected into a skeleton) as a conditioning input, the model generates an image that follows the pose of the image. Check out the more in-depth [ControlNet](controlnet) guide to learn more about other conditioning inputs and how to use them. In this example, let's condition the ControlNet with a human pose estimation image. Load the ControlNet model pretrained on human pose estimations: @@ -124,6 +129,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( ).to("cuda") generator = torch.Generator("cuda").manual_seed(31) image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=pose_image, generator=generator).images[0] +image ```
@@ -163,6 +169,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", height=768, width=512 ).images[0] +image ```
@@ -171,7 +178,7 @@ image = pipeline( -Other models may have different default image sizes depending on the image size's in the training dataset. For example, SDXL's default image size is 1024x1024 and using lower `height` and `width` values may result in lower quality images. Make sure you check the model's API reference first! +Other models may have different default image sizes depending on the image sizes in the training dataset. For example, SDXL's default image size is 1024x1024 and using lower `height` and `width` values may result in lower quality images. Make sure you check the model's API reference first! @@ -189,6 +196,7 @@ pipeline = AutoPipelineForText2Image.from_pretrained( image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", guidance_scale=3.5 ).images[0] +image ```
@@ -221,16 +229,17 @@ image = pipeline( prompt="Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", negative_prompt="ugly, deformed, disfigured, poor details, bad anatomy", ).images[0] +image ```
-
negative prompt = "ugly, deformed, disfigured, poor details, bad anatomy"
+
negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy"
-
negative prompt = "astronaut"
+
negative_prompt = "astronaut"
@@ -252,6 +261,7 @@ image = pipeline( "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", generator=generator, ).images[0] +image ``` ## Control image generation @@ -278,14 +288,14 @@ pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") image = pipeline( - prompt_emebds=prompt_embeds, # generated from Compel + prompt_embeds=prompt_embeds, # generated from Compel negative_prompt_embeds=negative_prompt_embeds, # generated from Compel ).images[0] ``` ### ControlNet -As you saw in the [ControlNet](#controlnet) section, these models offer a more flexible and accurate way to generate images by incorporating an additional conditioning image input. Each ControlNet model is pretrained on a particular type of conditioning image to generate new images that resemble it. For example, if you take a ControlNet pretrained on depth maps, you can give the model a depth map as a conditioning input and it'll generate an image that preserves the spatial information in it. This is quicker and easier than specifying the depth information in a prompt. You can even combine multiple conditioning inputs with a [MultiControlNet](controlnet#multicontrolnet)! +As you saw in the [ControlNet](#controlnet) section, these models offer a more flexible and accurate way to generate images by incorporating an additional conditioning image input. Each ControlNet model is pretrained on a particular type of conditioning image to generate new images that resemble it. For example, if you take a ControlNet model pretrained on depth maps, you can give the model a depth map as a conditioning input and it'll generate an image that preserves the spatial information in it. This is quicker and easier than specifying the depth information in a prompt. You can even combine multiple conditioning inputs with a [MultiControlNet](controlnet#multicontrolnet)! There are many types of conditioning inputs you can use, and ๐Ÿค— Diffusers supports ControlNet for Stable Diffusion and SDXL models. Take a look at the more comprehensive [ControlNet](controlnet) guide to learn how you can use these models. @@ -300,7 +310,7 @@ from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16").to("cuda") -pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overheard", fullgraph=True) +pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` -For more tips on how to optimize your code to save memory and speed up inference, read the [Memory and speed](../optimization/fp16) and [Torch 2.0](../optimization/torch2.0) guides. \ No newline at end of file +For more tips on how to optimize your code to save memory and speed up inference, read the [Memory and speed](../optimization/fp16) and [Torch 2.0](../optimization/torch2.0) guides. diff --git a/docs/source/en/using-diffusers/depth2img.md b/docs/source/en/using-diffusers/depth2img.md index 0a6df2258235..84c613b0dade 100644 --- a/docs/source/en/using-diffusers/depth2img.md +++ b/docs/source/en/using-diffusers/depth2img.md @@ -20,12 +20,10 @@ Start by creating an instance of the [`StableDiffusionDepth2ImgPipeline`]: ```python import torch -import requests -from PIL import Image - from diffusers import StableDiffusionDepth2ImgPipeline +from diffusers.utils import load_image, make_image_grid -pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( +pipeline = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", torch_dtype=torch.float16, use_safetensors=True, @@ -36,22 +34,13 @@ Now pass your prompt to the pipeline. You can also pass a `negative_prompt` to p ```python url = "http://images.cocodataset.org/val2017/000000039769.jpg" -init_image = Image.open(requests.get(url, stream=True).raw) +init_image = load_image(url) prompt = "two tigers" -n_prompt = "bad, deformed, ugly, bad anatomy" -image = pipe(prompt=prompt, image=init_image, negative_prompt=n_prompt, strength=0.7).images[0] -image +negative_prompt = "bad, deformed, ugly, bad anatomy" +image = pipeline(prompt=prompt, image=init_image, negative_prompt=negative_prompt, strength=0.7).images[0] +make_image_grid([init_image, image], rows=1, cols=2) ``` | Input | Output | |---------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------| | | | - -Play around with the Spaces below and see if you notice a difference between generated images with and without a depth map! - - diff --git a/docs/source/en/using-diffusers/img2img.md b/docs/source/en/using-diffusers/img2img.md index 53d7c46b79c8..5caba021f39e 100644 --- a/docs/source/en/using-diffusers/img2img.md +++ b/docs/source/en/using-diffusers/img2img.md @@ -21,13 +21,15 @@ With ๐Ÿค— Diffusers, this is as easy as 1-2-3: 1. Load a checkpoint into the [`AutoPipelineForImage2Image`] class; this pipeline automatically handles loading the correct pipeline class based on the checkpoint: ```py +import torch from diffusers import AutoPipelineForImage2Image -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForImage2Image.from_pretrained( - "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, variant="fp16", use_safetensors=True + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() ``` @@ -48,7 +50,7 @@ init_image = load_image("https://huggingface.co/datasets/huggingface/documentati ```py prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k" image = pipeline(prompt, image=init_image).images[0] -image +make_image_grid([init_image, image], rows=1, cols=2) ```
@@ -72,27 +74,25 @@ Stable Diffusion v1.5 is a latent diffusion model initialized from an earlier ch ```py import torch -import requests -from PIL import Image -from io import BytesIO from diffusers import AutoPipelineForImage2Image +from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image).images[0] -image +make_image_grid([init_image, image], rows=1, cols=2) ```
@@ -112,27 +112,25 @@ SDXL is a more powerful version of the Stable Diffusion model. It uses a larger ```py import torch -import requests -from PIL import Image -from io import BytesIO from diffusers import AutoPipelineForImage2Image +from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image, strength=0.5).images[0] -image +make_image_grid([init_image, image], rows=1, cols=2) ```
@@ -154,27 +152,25 @@ The simplest way to use Kandinsky 2.2 is: ```py import torch -import requests -from PIL import Image -from io import BytesIO from diffusers import AutoPipelineForImage2Image +from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( - "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, variant="fp16", use_safetensors=True + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image).images[0] -image +make_image_grid([init_image, image], rows=1, cols=2) ```
@@ -199,32 +195,29 @@ There are several important parameters you can configure in the pipeline that'll - ๐Ÿ“ˆ a higher `strength` value gives the model more "creativity" to generate an image that's different from the initial image; a `strength` value of 1.0 means the initial image is more or less ignored - ๐Ÿ“‰ a lower `strength` value means the generated image is more similar to the initial image -The `strength` and `num_inference_steps` parameter are related because `strength` determines the number of noise steps to add. For example, if the `num_inference_steps` is 50 and `strength` is 0.8, then this means adding 40 (50 * 0.8) steps of noise to the initial image and then denoising for 40 steps to get the newly generated image. +The `strength` and `num_inference_steps` parameters are related because `strength` determines the number of noise steps to add. For example, if the `num_inference_steps` is 50 and `strength` is 0.8, then this means adding 40 (50 * 0.8) steps of noise to the initial image and then denoising for 40 steps to get the newly generated image. ```py import torch -import requests -from PIL import Image -from io import BytesIO from diffusers import AutoPipelineForImage2Image +from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" -image = init_image # pass prompt and image to pipeline image = pipeline(prompt, image=init_image, strength=0.8).images[0] -image +make_image_grid([init_image, image], rows=1, cols=2) ```
@@ -250,27 +243,25 @@ You can combine `guidance_scale` with `strength` for even more precise control o ```py import torch -import requests -from PIL import Image -from io import BytesIO from diffusers import AutoPipelineForImage2Image +from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image, guidance_scale=8.0).images[0] -image +make_image_grid([init_image, image], rows=1, cols=2) ```
@@ -294,38 +285,36 @@ A negative prompt conditions the model to *not* include things in an image, and ```py import torch -import requests -from PIL import Image -from io import BytesIO from diffusers import AutoPipelineForImage2Image +from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy" # pass prompt and image to pipeline image = pipeline(prompt, negative_prompt=negative_prompt, image=init_image).images[0] -image +make_image_grid([init_image, image], rows=1, cols=2) ```
-
negative prompt = "ugly, deformed, disfigured, poor details, bad anatomy"
+
negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy"
-
negative prompt = "jungle"
+
negative_prompt = "jungle"
@@ -342,52 +331,54 @@ Start by generating an image with the text-to-image pipeline: ```py from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image import torch +from diffusers.utils import make_image_grid pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() -image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k").images[0] +text2image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k").images[0] +text2image ``` Now you can pass this generated image to the image-to-image pipeline: ```py pipeline = AutoPipelineForImage2Image.from_pretrained( - "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, variant="fp16", use_safetensors=True + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() -image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=image).images[0] -image +image2image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=text2image).images[0] +make_image_grid([text2image, image2image], rows=1, cols=2) ``` ### Image-to-image-to-image -You can also chain multiple image-to-image pipelines together to create more interesting images. This can be useful for iteratively performing style transfer on an image, generate short GIFs, restore color to an image, or restore missing areas of an image. +You can also chain multiple image-to-image pipelines together to create more interesting images. This can be useful for iteratively performing style transfer on an image, generating short GIFs, restoring color to an image, or restoring missing areas of an image. Start by generating an image: ```py import torch -import requests -from PIL import Image -from io import BytesIO from diffusers import AutoPipelineForImage2Image +from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" @@ -404,10 +395,11 @@ It is important to specify `output_type="latent"` in the pipeline to keep all th Pass the latent output from this pipeline to the next pipeline to generate an image in a [comic book art style](https://huggingface.co/ogkalu/Comic-Diffusion): ```py -pipelne = AutoPipelineForImage2Image.from_pretrained( - "ogkalu/Comic-Diffusion", torch_dtype=torch.float16, variant="fp16", use_safetensors=True +pipeline = AutoPipelineForImage2Image.from_pretrained( + "ogkalu/Comic-Diffusion", torch_dtype=torch.float16 ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # need to include the token "charliebo artstyle" in the prompt to use this checkpoint @@ -418,14 +410,15 @@ Repeat one more time to generate the final image in a [pixel art style](https:// ```py pipeline = AutoPipelineForImage2Image.from_pretrained( - "kohbanye/pixel-art-style", torch_dtype=torch.float16, variant="fp16", use_safetensors=True + "kohbanye/pixel-art-style", torch_dtype=torch.float16 ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # need to include the token "pixelartstyle" in the prompt to use this checkpoint image = pipeline("Astronaut in a jungle, pixelartstyle", image=image).images[0] -image +make_image_grid([init_image, image], rows=1, cols=2) ``` ### Image-to-upscaler-to-super-resolution @@ -436,21 +429,19 @@ Start with an image-to-image pipeline: ```py import torch -import requests -from PIL import Image -from io import BytesIO from diffusers import AutoPipelineForImage2Image +from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" @@ -467,7 +458,9 @@ It is important to specify `output_type="latent"` in the pipeline to keep all th Chain it to an upscaler pipeline to increase the image resolution: ```py -upscaler = AutoPipelineForImage2Image.from_pretrained( +from diffusers import StableDiffusionLatentUpscalePipeline + +upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") upscaler.enable_model_cpu_offload() @@ -479,14 +472,16 @@ image_2 = upscaler(prompt, image=image_1, output_type="latent").images[0] Finally, chain it to a super-resolution pipeline to further enhance the resolution: ```py -super_res = AutoPipelineForImage2Image.from_pretrained( +from diffusers import StableDiffusionUpscalePipeline + +super_res = StableDiffusionUpscalePipeline.from_pretrained( "stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") super_res.enable_model_cpu_offload() super_res.enable_xformers_memory_efficient_attention() -image_3 = upscaler(prompt, image=image_2).images[0] -image_3 +image_3 = super_res(prompt, image=image_2).images[0] +make_image_grid([init_image, image_3.resize((512, 512))], rows=1, cols=2) ``` ## Control image generation @@ -504,13 +499,14 @@ from diffusers import AutoPipelineForImage2Image import torch pipeline = AutoPipelineForImage2Image.from_pretrained( - "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True + "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() -image = pipeline(prompt_emebds=prompt_embeds, # generated from Compel - negative_prompt_embeds, # generated from Compel +image = pipeline(prompt_embeds=prompt_embeds, # generated from Compel + negative_prompt_embeds=negative_prompt_embeds, # generated from Compel image=init_image, ).images[0] ``` @@ -522,19 +518,20 @@ ControlNets provide a more flexible and accurate way to control image generation For example, let's condition an image with a depth map to keep the spatial information in the image. ```py +from diffusers.utils import load_image, make_image_grid + # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" -response = requests.get(url) -init_image = Image.open(BytesIO(response.content)).convert("RGB") +init_image = load_image(url) init_image = init_image.resize((958, 960)) # resize to depth image dimensions depth_image = load_image("https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png") +make_image_grid([init_image, depth_image], rows=1, cols=2) ``` Load a ControlNet model conditioned on depth maps and the [`AutoPipelineForImage2Image`]: ```py from diffusers import ControlNetModel, AutoPipelineForImage2Image -from diffusers.utils import load_image import torch controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) @@ -542,6 +539,7 @@ pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() ``` @@ -549,8 +547,8 @@ Now generate a new image conditioned on the depth map, initial image, and prompt ```py prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" -image = pipeline(prompt, image=init_image, control_image=depth_image).images[0] -image +image_control_net = pipeline(prompt, image=init_image, control_image=depth_image).images[0] +make_image_grid([init_image, depth_image, image_control_net], rows=1, cols=3) ```
@@ -575,13 +573,14 @@ pipeline = AutoPipelineForImage2Image.from_pretrained( "nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() prompt = "elden ring style astronaut in a jungle" # include the token "elden ring style" in the prompt negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy" -image = pipeline(prompt, negative_prompt=negative_prompt, image=init_image, strength=0.45, guidance_scale=10.5).images[0] -image +image_elden_ring = pipeline(prompt, negative_prompt=negative_prompt, image=image_control_net, strength=0.45, guidance_scale=10.5).images[0] +make_image_grid([init_image, depth_image, image_control_net, image_elden_ring], rows=2, cols=2) ```
@@ -597,10 +596,10 @@ Running diffusion models is computationally expensive and intensive, but with a + pipeline.enable_xformers_memory_efficient_attention() ``` -With [`torch.compile`](../optimization/torch2.0#torch.compile), you can boost your inference speed even more by wrapping your UNet with it: +With [`torch.compile`](../optimization/torch2.0#torchcompile), you can boost your inference speed even more by wrapping your UNet with it: ```py -pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) +pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` To learn more, take a look at the [Reduce memory usage](../optimization/memory) and [Torch 2.0](../optimization/torch2.0) guides. diff --git a/docs/source/en/using-diffusers/inpaint.md b/docs/source/en/using-diffusers/inpaint.md index 42bfb8984d9e..3d03d4e0e4d0 100644 --- a/docs/source/en/using-diffusers/inpaint.md +++ b/docs/source/en/using-diffusers/inpaint.md @@ -23,12 +23,13 @@ With ๐Ÿค— Diffusers, here is how you can do inpainting: ```py import torch from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() ``` @@ -41,8 +42,8 @@ You'll notice throughout the guide, we use [`~DiffusionPipeline.enable_model_cpu 2. Load the base and mask images: ```py -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") ``` 3. Create a prompt to inpaint the image with and pass it to the pipeline with the base and mask images: @@ -51,6 +52,7 @@ mask_image = load_image("https://huggingface.co/datasets/huggingface/documentati prompt = "a black cat with glowing eyes, cute, adorable, disney, pixar, highly detailed, 8k" negative_prompt = "bad anatomy, deformed, ugly, disfigured" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask_image).images[0] +make_image_grid([init_image, mask_image, image], rows=1, cols=3) ```
@@ -58,6 +60,10 @@ image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_imag
base image
+
+ +
mask image
+
generated image
@@ -79,7 +85,7 @@ Upload a base image to inpaint on and use the sketch tool to draw a mask. Once y ## Popular models -[Stable Diffusion Inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting), [Stable Diffusion XL (SDXL) Inpainting](https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1), and [Kandinsky 2.2](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder-inpaint) are among the most popular models for inpainting. SDXL typically produces higher resolution images than Stable Diffusion v1.5, and Kandinsky 2.2 is also capable of generating high-quality images. +[Stable Diffusion Inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting), [Stable Diffusion XL (SDXL) Inpainting](https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1), and [Kandinsky 2.2 Inpainting](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder-inpaint) are among the most popular models for inpainting. SDXL typically produces higher resolution images than Stable Diffusion v1.5, and Kandinsky 2.2 is also capable of generating high-quality images. ### Stable Diffusion Inpainting @@ -88,21 +94,23 @@ Stable Diffusion Inpainting is a latent diffusion model finetuned on 512x512 ima ```py import torch from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] +make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### Stable Diffusion XL (SDXL) Inpainting @@ -112,21 +120,23 @@ SDXL is a larger and more powerful version of Stable Diffusion v1.5. This model ```py import torch from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] +make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### Kandinsky 2.2 Inpainting @@ -136,21 +146,23 @@ The Kandinsky model family is similar to SDXL because it uses two models as well ```py import torch from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") generator = torch.Generator("cuda").manual_seed(92) prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, generator=generator).images[0] +make_image_grid([init_image, mask_image, image], rows=1, cols=3) ```
@@ -186,20 +198,22 @@ Image features - like quality and "creativity" - are dependent on pipeline param ```py import torch from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, strength=0.6).images[0] +make_image_grid([init_image, mask_image, image], rows=1, cols=3) ```
@@ -229,20 +243,22 @@ You can use `strength` and `guidance_scale` together for more control over how e ```py import torch from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, guidance_scale=2.5).images[0] +make_image_grid([init_image, mask_image, image], rows=1, cols=3) ```
@@ -267,22 +283,23 @@ A negative prompt assumes the opposite role of a prompt; it guides the model awa ```py import torch from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" negative_prompt = "bad architecture, unstable, poor details, blurry" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask_image).images[0] -image +make_image_grid([init_image, mask_image, image], rows=1, cols=3) ```
@@ -302,7 +319,7 @@ import numpy as np import torch from diffusers import AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid device = "cuda" pipeline = AutoPipelineForInpainting.from_pretrained( @@ -334,6 +351,7 @@ mask_image_arr[mask_image_arr >= 0.5] = 1 unmasked_unchanged_image_arr = (1 - mask_image_arr) * init_image + mask_image_arr * repainted_image unmasked_unchanged_image = PIL.Image.fromarray(unmasked_unchanged_image_arr.round().astype("uint8")) unmasked_unchanged_image.save("force_unmasked_unchanged.png") +make_image_grid([init_image, mask_image, repainted_image, unmasked_unchanged_image], rows=2, cols=2) ``` ## Chained inpainting pipelines @@ -349,35 +367,37 @@ Start with the text-to-image pipeline to create a castle: ```py import torch from diffusers import AutoPipelineForText2Image, AutoPipelineForInpainting -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() -image = pipeline("concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k").images[0] +text2image = pipeline("concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k").images[0] ``` Load the mask image of the output from above: ```py -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_text-chain-mask.png").convert("RGB") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_text-chain-mask.png") ``` And let's inpaint the masked area with a waterfall: ```py pipeline = AutoPipelineForInpainting.from_pretrained( - "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16, variant="fp16" + "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() prompt = "digital painting of a fantasy waterfall, cloudy" -image = pipeline(prompt=prompt, image=image, mask_image=mask_image).images[0] -image +image = pipeline(prompt=prompt, image=text2image, mask_image=mask_image).images[0] +make_image_grid([text2image, mask_image, image], rows=1, cols=3) ```
@@ -391,7 +411,6 @@ image
- ### Inpaint-to-image-to-image You can also chain an inpainting pipeline before another pipeline like image-to-image or an upscaler to improve the quality. @@ -401,23 +420,24 @@ Begin by inpainting an image: ```py import torch from diffusers import AutoPipelineForInpainting, AutoPipelineForImage2Image -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" -image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image).images[0] +image_inpainting = pipeline(prompt=prompt, image=init_image, mask_image=mask_image).images[0] # resize image to 1024x1024 for SDXL -image = image.resize((1024, 1024)) +image_inpainting = image_inpainting.resize((1024, 1024)) ``` Now let's pass the image to another inpainting pipeline with SDXL's refiner model to enhance the image details and quality: @@ -427,9 +447,10 @@ pipeline = AutoPipelineForInpainting.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() -image = pipeline(prompt=prompt, image=image, mask_image=mask_image, output_type="latent").images[0] +image = pipeline(prompt=prompt, image=image_inpainting, mask_image=mask_image, output_type="latent").images[0] ``` @@ -442,9 +463,11 @@ Finally, you can pass this image to an image-to-image pipeline to put the finish ```py pipeline = AutoPipelineForImage2Image.from_pipe(pipeline) +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image = pipeline(prompt=prompt, image=image).images[0] +make_image_grid([init_image, mask_image, image_inpainting, image], rows=2, cols=2) ```
@@ -477,18 +500,21 @@ Once you've generated the embeddings, pass them to the `prompt_embeds` (and `neg ```py import torch from diffusers import AutoPipelineForInpainting +from diffusers.utils import make_image_grid pipeline = AutoPipelineForInpainting.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() -image = pipeline(prompt_emebds=prompt_embeds, # generated from Compel - negative_prompt_embeds, # generated from Compel +image = pipeline(prompt_embeds=prompt_embeds, # generated from Compel + negative_prompt_embeds=negative_prompt_embeds, # generated from Compel image=init_image, mask_image=mask_image ).images[0] +make_image_grid([init_image, mask_image, image], rows=1, cols=3) ``` ### ControlNet @@ -501,7 +527,7 @@ For example, let's condition an image with a ControlNet pretrained on inpaint im import torch import numpy as np from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline -from diffusers.utils import load_image +from diffusers.utils import load_image, make_image_grid # load ControlNet controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16, variant="fp16") @@ -511,11 +537,12 @@ pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16" ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # load base and mask image -init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png").convert("RGB") -mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png").convert("RGB") +init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint.png") +mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/inpaint_mask.png") # prepare control image def make_inpaint_condition(init_image, mask_image): @@ -536,7 +563,7 @@ Now generate an image from the base, mask and control images. You'll notice feat ```py prompt = "concept art digital painting of an elven castle, inspired by lord of the rings, highly detailed, 8k" image = pipeline(prompt=prompt, image=init_image, mask_image=mask_image, control_image=control_image).images[0] -image +make_image_grid([init_image, mask_image, PIL.Image.fromarray(np.uint8(control_image[0][0])).convert('RGB'), image], rows=2, cols=2) ``` You can take this a step further and chain it with an image-to-image pipeline to apply a new [style](https://huggingface.co/nitrosocke/elden-ring-diffusion): @@ -548,13 +575,14 @@ pipeline = AutoPipelineForImage2Image.from_pretrained( "nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, ).to("cuda") pipeline.enable_model_cpu_offload() +# remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() prompt = "elden ring style castle" # include the token "elden ring style" in the prompt negative_prompt = "bad architecture, deformed, disfigured, poor details" -image = pipeline(prompt, negative_prompt=negative_prompt, image=image).images[0] -image +image_elden_ring = pipeline(prompt, negative_prompt=negative_prompt, image=image).images[0] +make_image_grid([init_image, mask_image, image, image_elden_ring], rows=2, cols=2) ```
@@ -576,17 +604,17 @@ image It can be difficult and slow to run diffusion models if you're resource constrained, but it doesn't have to be with a few optimization tricks. One of the biggest (and easiest) optimizations you can enable is switching to memory-efficient attention. If you're using PyTorch 2.0, [scaled-dot product attention](../optimization/torch2.0#scaled-dot-product-attention) is automatically enabled and you don't need to do anything else. For non-PyTorch 2.0 users, you can install and use [xFormers](../optimization/xformers)'s implementation of memory-efficient attention. Both options reduce memory usage and accelerate inference. -You can also offload the model to the GPU to save even more memory: +You can also offload the model to the CPU to save even more memory: ```diff + pipeline.enable_xformers_memory_efficient_attention() + pipeline.enable_model_cpu_offload() ``` -To speed-up your inference code even more, use [`torch_compile`](../optimization/torch2.0#torch.compile). You should wrap `torch.compile` around the most intensive component in the pipeline which is typically the UNet: +To speed-up your inference code even more, use [`torch_compile`](../optimization/torch2.0#torchcompile). You should wrap `torch.compile` around the most intensive component in the pipeline which is typically the UNet: ```py -pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) +pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` -Learn more in the [Reduce memory usage](../optimization/memory) and [Torch 2.0](../optimization/torch2.0) guides. \ No newline at end of file +Learn more in the [Reduce memory usage](../optimization/memory) and [Torch 2.0](../optimization/torch2.0) guides. diff --git a/docs/source/en/using-diffusers/unconditional_image_generation.md b/docs/source/en/using-diffusers/unconditional_image_generation.md index 3893f7cce276..c055bc75c5a4 100644 --- a/docs/source/en/using-diffusers/unconditional_image_generation.md +++ b/docs/source/en/using-diffusers/unconditional_image_generation.md @@ -23,16 +23,16 @@ You can use any of the ๐Ÿงจ Diffusers [checkpoints](https://huggingface.co/model -๐Ÿ’ก Want to train your own unconditional image generation model? Take a look at the training [guide](training/unconditional_training) to learn how to generate your own images. +๐Ÿ’ก Want to train your own unconditional image generation model? Take a look at the training [guide](../training/unconditional_training) to learn how to generate your own images. In this guide, you'll use [`DiffusionPipeline`] for unconditional image generation with [DDPM](https://arxiv.org/abs/2006.11239): ```python ->>> from diffusers import DiffusionPipeline +from diffusers import DiffusionPipeline ->>> generator = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128", use_safetensors=True) +generator = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128", use_safetensors=True) ``` The [`DiffusionPipeline`] downloads and caches all modeling, tokenization, and scheduling components. @@ -40,13 +40,14 @@ Because the model consists of roughly 1.4 billion parameters, we strongly recomm You can move the generator object to a GPU, just like you would in PyTorch: ```python ->>> generator.to("cuda") +generator.to("cuda") ``` Now you can use the `generator` to generate an image: ```python ->>> image = generator().images[0] +image = generator().images[0] +image ``` The output is by default wrapped into a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) object. @@ -54,7 +55,7 @@ The output is by default wrapped into a [`PIL.Image`](https://pillow.readthedocs You can save the image by calling: ```python ->>> image.save("generated_image.png") +image.save("generated_image.png") ``` Try out the Spaces below, and feel free to play around with the inference steps parameter to see how it affects the image quality! @@ -65,5 +66,3 @@ Try out the Spaces below, and feel free to play around with the inference steps width="850" height="500" > - - From 9ae90593c0af749d722bfc4b846399a7f07ab32c Mon Sep 17 00:00:00 2001 From: Chi Date: Wed, 8 Nov 2023 07:58:21 +0530 Subject: [PATCH 04/14] Replacing the nn.Mish activation function with a get_activation function. (#5651) * I added a new doc string to the class. This is more flexible to understanding other developers what are doing and where it's using. * Update src/diffusers/models/unet_2d_blocks.py This changes suggest by maintener. Co-authored-by: Sayak Paul * Update src/diffusers/models/unet_2d_blocks.py Add suggested text Co-authored-by: Sayak Paul * Update unet_2d_blocks.py I changed the Parameter to Args text. * Update unet_2d_blocks.py proper indentation set in this file. * Update unet_2d_blocks.py a little bit of change in the act_fun argument line. * I run the black command to reformat style in the code * Update unet_2d_blocks.py similar doc-string add to have in the original diffusion repository. * I removed the dummy variable defined in both the encoder and decoder. * Now, I run black package to reformat my file * Remove the redundant line from the adapter.py file. * Black package using to reformated my file * Replacing the nn.Mish activation function with a get_activation function allows developers to more easily choose the right activation function for their task. Additionally, removing redundant variables can improve code readability and maintainability. * I try to fix this: Fast tests for PRs / Fast PyTorch Models & Schedulers CPU tests (pull_request) * Update src/diffusers/models/resnet.py Co-authored-by: YiYi Xu --------- Co-authored-by: Sayak Paul Co-authored-by: Dhruv Nair Co-authored-by: YiYi Xu --- src/diffusers/models/resnet.py | 20 ++++++++++++++++---- src/diffusers/models/vq_model.py | 4 ++-- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/diffusers/models/resnet.py b/src/diffusers/models/resnet.py index 8fe66aacf5db..868e2e5fae2c 100644 --- a/src/diffusers/models/resnet.py +++ b/src/diffusers/models/resnet.py @@ -778,16 +778,22 @@ class Conv1dBlock(nn.Module): out_channels (`int`): Number of output channels. kernel_size (`int` or `tuple`): Size of the convolving kernel. n_groups (`int`, default `8`): Number of groups to separate the channels into. + activation (`str`, defaults `mish`): Name of the activation function. """ def __init__( - self, inp_channels: int, out_channels: int, kernel_size: Union[int, Tuple[int, int]], n_groups: int = 8 + self, + inp_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int, int]], + n_groups: int = 8, + activation: str = "mish", ): super().__init__() self.conv1d = nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2) self.group_norm = nn.GroupNorm(n_groups, out_channels) - self.mish = nn.Mish() + self.mish = get_activation(activation) def forward(self, inputs: torch.Tensor) -> torch.Tensor: intermediate_repr = self.conv1d(inputs) @@ -808,16 +814,22 @@ class ResidualTemporalBlock1D(nn.Module): out_channels (`int`): Number of output channels. embed_dim (`int`): Embedding dimension. kernel_size (`int` or `tuple`): Size of the convolving kernel. + activation (`str`, defaults `mish`): It is possible to choose the right activation function. """ def __init__( - self, inp_channels: int, out_channels: int, embed_dim: int, kernel_size: Union[int, Tuple[int, int]] = 5 + self, + inp_channels: int, + out_channels: int, + embed_dim: int, + kernel_size: Union[int, Tuple[int, int]] = 5, + activation: str = "mish", ): super().__init__() self.conv_in = Conv1dBlock(inp_channels, out_channels, kernel_size) self.conv_out = Conv1dBlock(out_channels, out_channels, kernel_size) - self.time_emb_act = nn.Mish() + self.time_emb_act = get_activation(activation) self.time_emb = nn.Linear(embed_dim, out_channels) self.residual_conv = ( diff --git a/src/diffusers/models/vq_model.py b/src/diffusers/models/vq_model.py index 08ad122c3891..0c93b9142bea 100644 --- a/src/diffusers/models/vq_model.py +++ b/src/diffusers/models/vq_model.py @@ -162,8 +162,8 @@ def forward(self, sample: torch.FloatTensor, return_dict: bool = True) -> Union[ If return_dict is True, a [`~models.vq_model.VQEncoderOutput`] is returned, otherwise a plain `tuple` is returned. """ - x = sample - h = self.encode(x).latents + + h = self.encode(sample).latents dec = self.decode(h).sample if not return_dict: From 69996938cf7b41d4a69aea7aa6ab08a9b76a115d Mon Sep 17 00:00:00 2001 From: YiYi Xu Date: Wed, 8 Nov 2023 00:43:20 -1000 Subject: [PATCH 05/14] speed up Shap-E fast test (#5686) skip rendering Co-authored-by: yiyixuxu --- tests/pipelines/shap_e/test_shap_e.py | 24 +++++-------------- tests/pipelines/shap_e/test_shap_e_img2img.py | 18 ++++---------- tests/pipelines/test_pipelines_common.py | 4 ++-- 3 files changed, 12 insertions(+), 34 deletions(-) diff --git a/tests/pipelines/shap_e/test_shap_e.py b/tests/pipelines/shap_e/test_shap_e.py index 7b95fdd9e669..c7792f097ed5 100644 --- a/tests/pipelines/shap_e/test_shap_e.py +++ b/tests/pipelines/shap_e/test_shap_e.py @@ -160,7 +160,7 @@ def get_dummy_inputs(self, device, seed=0): "generator": generator, "num_inference_steps": 1, "frame_size": 32, - "output_type": "np", + "output_type": "latent", } return inputs @@ -176,24 +176,12 @@ def test_shap_e(self): output = pipe(**self.get_dummy_inputs(device)) image = output.images[0] - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (20, 32, 32, 3) - - expected_slice = np.array( - [ - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - ] - ) + image = image.cpu().numpy() + image_slice = image[-3:, -3:] + + assert image.shape == (32, 16) + expected_slice = np.array([-1.0000, -0.6241, 1.0000, -0.8978, -0.6866, 0.7876, -0.7473, -0.2874, 0.6103]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_consistent(self): diff --git a/tests/pipelines/shap_e/test_shap_e_img2img.py b/tests/pipelines/shap_e/test_shap_e_img2img.py index 055dbe7a97d4..ee8d9d07cd77 100644 --- a/tests/pipelines/shap_e/test_shap_e_img2img.py +++ b/tests/pipelines/shap_e/test_shap_e_img2img.py @@ -181,7 +181,7 @@ def get_dummy_inputs(self, device, seed=0): "generator": generator, "num_inference_steps": 1, "frame_size": 32, - "output_type": "np", + "output_type": "latent", } return inputs @@ -197,22 +197,12 @@ def test_shap_e(self): output = pipe(**self.get_dummy_inputs(device)) image = output.images[0] - image_slice = image[0, -3:, -3:, -1] + image_slice = image[-3:, -3:].cpu().numpy() - assert image.shape == (20, 32, 32, 3) + assert image.shape == (32, 16) expected_slice = np.array( - [ - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - 0.00039216, - ] + [-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 1795c83b58a1..b9fe4d190f23 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -493,7 +493,7 @@ def _test_inference_batch_single_identical( assert output_batch[0].shape[0] == batch_size - max_diff = np.abs(output_batch[0][0] - output[0][0]).max() + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): @@ -702,7 +702,7 @@ def _test_attention_slicing_forward_pass( self.assertLess(max_diff, expected_max_diff, "Attention slicing should not affect the inference results") if test_mean_pixel_difference: - assert_mean_pixel_difference(output_with_slicing[0], output_without_slicing[0]) + assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0])) @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), From 11c125667b28fc9d43ee8e9168a1ac68b3c3c1df Mon Sep 17 00:00:00 2001 From: Kirill Date: Wed, 8 Nov 2023 14:49:03 +0400 Subject: [PATCH 06/14] Fix the misaligned pipeline usage in dreamshaper docstrings (#5700) Fix the misaligned pipeline usage --- .../pipeline_latent_consistency_img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py b/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py index ccc84e22c252..a0211e15113d 100644 --- a/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +++ b/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py @@ -60,7 +60,7 @@ def retrieve_latents(encoder_output, generator): >>> import torch >>> import PIL - >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") + >>> pipe = AutoPipelineForImage2Image.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32) From d384265df7c72ba66f2c47af6277a5a9bc7f012e Mon Sep 17 00:00:00 2001 From: Philipp Hasper Date: Wed, 8 Nov 2023 11:51:15 +0100 Subject: [PATCH 07/14] Fixed is_safetensors_compatible() handling of windows path separators (#5650) Closes #4665 --- src/diffusers/pipelines/pipeline_utils.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 9e019794692e..6437732d0315 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -158,9 +158,9 @@ def is_safetensors_compatible(filenames, variant=None, passed_components=None) - continue if extension == ".bin": - pt_filenames.append(filename) + pt_filenames.append(os.path.normpath(filename)) elif extension == ".safetensors": - sf_filenames.add(filename) + sf_filenames.add(os.path.normpath(filename)) for filename in pt_filenames: # filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extention = '.bam' @@ -172,9 +172,8 @@ def is_safetensors_compatible(filenames, variant=None, passed_components=None) - else: filename = filename - expected_sf_filename = os.path.join(path, filename) + expected_sf_filename = os.path.normpath(os.path.join(path, filename)) expected_sf_filename = f"{expected_sf_filename}.safetensors" - if expected_sf_filename not in sf_filenames: logger.warning(f"{expected_sf_filename} not found") return False @@ -1774,7 +1773,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: ) ): raise EnvironmentError( - f"Could not found the necessary `safetensors` weights in {model_filenames} (variant={variant})" + f"Could not find the necessary `safetensors` weights in {model_filenames} (variant={variant})" ) if from_flax: ignore_patterns = ["*.bin", "*.safetensors", "*.onnx", "*.pb"] From c803a8f8c0d2dd40ccad40ae4dc30cd113680698 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 8 Nov 2023 11:51:46 +0100 Subject: [PATCH 08/14] [LCM] Fix img2img (#5698) * [LCM] Fix img2img * make fix-copies * make fix-copies * make fix-copies * up --- src/diffusers/models/attention_processor.py | 2 +- .../pipeline_latent_consistency_img2img.py | 2 +- .../test_latent_consistency_models_img2img.py | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index e1ae2c1c064b..7fb524110ea1 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -378,7 +378,7 @@ def set_processor(self, processor: "AttnProcessor", _remove_lora: bool = False) _remove_lora (`bool`, *optional*, defaults to `False`): Set to `True` to remove LoRA layers from the model. """ - if hasattr(self, "processor") and _remove_lora and self.to_q.lora_layer is not None: + if not USE_PEFT_BACKEND and hasattr(self, "processor") and _remove_lora and self.to_q.lora_layer is not None: deprecate( "set_processor to offload LoRA", "0.26.0", diff --git a/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py b/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py index a0211e15113d..679415db7f3a 100644 --- a/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py +++ b/src/diffusers/pipelines/latent_consistency_models/pipeline_latent_consistency_img2img.py @@ -738,7 +738,7 @@ def __call__( if original_inference_steps is not None else self.scheduler.config.original_inference_steps ) - latent_timestep = torch.tensor(int(strength * original_inference_steps)) + latent_timestep = timesteps[:1] latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator ) diff --git a/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py b/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py index 82a2944aeda4..53702925534d 100644 --- a/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py +++ b/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py @@ -133,7 +133,7 @@ def test_lcm_onestep(self): assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] - expected_slice = np.array([0.5865, 0.2854, 0.2828, 0.7473, 0.6006, 0.4580, 0.4397, 0.6415, 0.6069]) + expected_slice = np.array([0.4388, 0.3717, 0.2202, 0.7213, 0.6370, 0.3664, 0.5815, 0.6080, 0.4977]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_multistep(self): @@ -150,7 +150,7 @@ def test_lcm_multistep(self): assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] - expected_slice = np.array([0.4903, 0.3304, 0.3503, 0.5241, 0.5153, 0.4585, 0.3222, 0.4764, 0.4891]) + expected_slice = np.array([0.4150, 0.3719, 0.2479, 0.6333, 0.6024, 0.3778, 0.5036, 0.5420, 0.4678]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): @@ -237,7 +237,7 @@ def test_lcm_onestep(self): assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() - expected_slice = np.array([0.1025, 0.0911, 0.0984, 0.0981, 0.0901, 0.0918, 0.1055, 0.0940, 0.0730]) + expected_slice = np.array([0.1950, 0.1961, 0.2308, 0.1786, 0.1837, 0.2320, 0.1898, 0.1885, 0.2309]) assert np.abs(image_slice - expected_slice).max() < 1e-3 def test_lcm_multistep(self): @@ -253,5 +253,5 @@ def test_lcm_multistep(self): assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() - expected_slice = np.array([0.01855, 0.01855, 0.01489, 0.01392, 0.01782, 0.01465, 0.01831, 0.02539, 0.0]) + expected_slice = np.array([0.3756, 0.3816, 0.3767, 0.3718, 0.3739, 0.3735, 0.3863, 0.3803, 0.3563]) assert np.abs(image_slice - expected_slice).max() < 1e-3 From 78be400761007b346f6d600c14343752d6c5ef2e Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 8 Nov 2023 17:42:46 +0530 Subject: [PATCH 09/14] [PixArt-Alpha] fix mask feature condition. (#5695) * fix mask feature condition. * debug * remove identical test * set correct * Empty-Commit --- src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py | 4 +++- tests/pipelines/pixart/test_pixart.py | 2 -- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py index 6f2bed9ce2cf..147e2b76e6c6 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py @@ -156,6 +156,8 @@ def encode_prompt( mask_feature: (bool, defaults to `True`): If `True`, the function will mask the text embeddings. """ + embeds_initially_provided = prompt_embeds is not None and negative_prompt_embeds is not None + if device is None: device = self._execution_device @@ -253,7 +255,7 @@ def encode_prompt( negative_prompt_embeds = None # Perform additional masking. - if mask_feature and prompt_embeds is None and negative_prompt_embeds is None: + if mask_feature and not embeds_initially_provided: prompt_embeds = prompt_embeds.unsqueeze(1) masked_prompt_embeds, keep_indices = self.mask_text_embeddings(prompt_embeds, prompt_embeds_attention_mask) masked_prompt_embeds = masked_prompt_embeds.squeeze(1) diff --git a/tests/pipelines/pixart/test_pixart.py b/tests/pipelines/pixart/test_pixart.py index 70548092fedf..a04f4e1a8804 100644 --- a/tests/pipelines/pixart/test_pixart.py +++ b/tests/pipelines/pixart/test_pixart.py @@ -120,7 +120,6 @@ def test_save_load_optional_components(self): "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, - "mask_feature": False, } # set all optional components to None @@ -155,7 +154,6 @@ def test_save_load_optional_components(self): "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, - "mask_feature": False, } output_loaded = pipe_loaded(**inputs)[0] From 17528afcba583fa0e49504208af33b1a62ff1294 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 8 Nov 2023 13:19:06 +0100 Subject: [PATCH 10/14] Fix styling issues (#5699) * up * up * up * Empty-Commit * fix keyword argument call. --------- Co-authored-by: Sayak Paul --- .../pipeline_stable_unclip.py | 26 ++++++---------- .../pipelines/unclip/pipeline_unclip.py | 30 +++++++++---------- .../versatile_diffusion/modeling_text_unet.py | 2 -- 3 files changed, 24 insertions(+), 34 deletions(-) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py index c81dd85f0e46..eb4542888c1f 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py @@ -206,17 +206,15 @@ def _encode_prior_prompt( prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device)) prompt_embeds = prior_text_encoder_output.text_embeds - prior_text_encoder_hidden_states = prior_text_encoder_output.last_hidden_state + text_enc_hid_states = prior_text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] - prompt_embeds, prior_text_encoder_hidden_states = text_model_output[0], text_model_output[1] + prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) - prior_text_encoder_hidden_states = prior_text_encoder_hidden_states.repeat_interleave( - num_images_per_prompt, dim=0 - ) + text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: @@ -235,9 +233,7 @@ def _encode_prior_prompt( ) negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds - uncond_prior_text_encoder_hidden_states = ( - negative_prompt_embeds_prior_text_encoder_output.last_hidden_state - ) + uncond_text_enc_hid_states = negative_prompt_embeds_prior_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method @@ -245,11 +241,9 @@ def _encode_prior_prompt( negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) - seq_len = uncond_prior_text_encoder_hidden_states.shape[1] - uncond_prior_text_encoder_hidden_states = uncond_prior_text_encoder_hidden_states.repeat( - 1, num_images_per_prompt, 1 - ) - uncond_prior_text_encoder_hidden_states = uncond_prior_text_encoder_hidden_states.view( + seq_len = uncond_text_enc_hid_states.shape[1] + uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) + uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) @@ -260,13 +254,11 @@ def _encode_prior_prompt( # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - prior_text_encoder_hidden_states = torch.cat( - [uncond_prior_text_encoder_hidden_states, prior_text_encoder_hidden_states] - ) + text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) - return prompt_embeds, prior_text_encoder_hidden_states, text_mask + return prompt_embeds, text_enc_hid_states, text_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( diff --git a/src/diffusers/pipelines/unclip/pipeline_unclip.py b/src/diffusers/pipelines/unclip/pipeline_unclip.py index c4a25c865d88..7bebed73c106 100644 --- a/src/diffusers/pipelines/unclip/pipeline_unclip.py +++ b/src/diffusers/pipelines/unclip/pipeline_unclip.py @@ -156,15 +156,15 @@ def _encode_prompt( text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds - text_encoder_hidden_states = text_encoder_output.last_hidden_state + text_enc_hid_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] - prompt_embeds, text_encoder_hidden_states = text_model_output[0], text_model_output[1] + prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) - text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) + text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: @@ -181,7 +181,7 @@ def _encode_prompt( negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds - uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state + uncond_text_enc_hid_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method @@ -189,9 +189,9 @@ def _encode_prompt( negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) - seq_len = uncond_text_encoder_hidden_states.shape[1] - uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) - uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( + seq_len = uncond_text_enc_hid_states.shape[1] + uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) + uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) @@ -202,11 +202,11 @@ def _encode_prompt( # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) + text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) - return prompt_embeds, text_encoder_hidden_states, text_mask + return prompt_embeds, text_enc_hid_states, text_mask @torch.no_grad() def __call__( @@ -293,7 +293,7 @@ def __call__( do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 - prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( + prompt_embeds, text_enc_hid_states, text_mask = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask ) @@ -321,7 +321,7 @@ def __call__( latent_model_input, timestep=t, proj_embedding=prompt_embeds, - encoder_hidden_states=text_encoder_hidden_states, + encoder_hidden_states=text_enc_hid_states, attention_mask=text_mask, ).predicted_image_embedding @@ -352,10 +352,10 @@ def __call__( # decoder - text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( + text_enc_hid_states, additive_clip_time_embeddings = self.text_proj( image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, - text_encoder_hidden_states=text_encoder_hidden_states, + text_encoder_hidden_states=text_enc_hid_states, do_classifier_free_guidance=do_classifier_free_guidance, ) @@ -377,7 +377,7 @@ def __call__( decoder_latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), - text_encoder_hidden_states.dtype, + text_enc_hid_states.dtype, device, generator, decoder_latents, @@ -391,7 +391,7 @@ def __call__( noise_pred = self.decoder( sample=latent_model_input, timestep=t, - encoder_hidden_states=text_encoder_hidden_states, + encoder_hidden_states=text_enc_hid_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask, ).sample diff --git a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py b/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py index 32147ffa455b..60ea3d814b3a 100644 --- a/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +++ b/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py @@ -1494,7 +1494,6 @@ def forward(self, input_tensor, temb): return output_tensor -# Copied from diffusers.models.unet_2d_blocks.DownBlock2D with DownBlock2D->DownBlockFlat, ResnetBlock2D->ResnetBlockFlat, Downsample2D->LinearMultiDim class DownBlockFlat(nn.Module): def __init__( self, @@ -1583,7 +1582,6 @@ def custom_forward(*inputs): return hidden_states, output_states -# Copied from diffusers.models.unet_2d_blocks.CrossAttnDownBlock2D with CrossAttnDownBlock2D->CrossAttnDownBlockFlat, ResnetBlock2D->ResnetBlockFlat, Downsample2D->LinearMultiDim class CrossAttnDownBlockFlat(nn.Module): def __init__( self, From 6e68c71503682c8693cb5b06a4da4911dfd655ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?apolin=C3=A1rio?= Date: Wed, 8 Nov 2023 18:26:53 +0100 Subject: [PATCH 11/14] Add adapter fusing + PEFT to the docs (#5662) * Add adapter fusing + PEFT to the docs * Update docs/source/en/tutorials/using_peft_for_inference.md Co-authored-by: Sayak Paul * Update docs/source/en/tutorials/using_peft_for_inference.md Co-authored-by: Sayak Paul * Update docs/source/en/tutorials/using_peft_for_inference.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/tutorials/using_peft_for_inference.md Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/tutorials/using_peft_for_inference.md * Update docs/source/en/tutorials/using_peft_for_inference.md --------- Co-authored-by: Sayak Paul Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- .../en/tutorials/using_peft_for_inference.md | 28 +++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/docs/source/en/tutorials/using_peft_for_inference.md b/docs/source/en/tutorials/using_peft_for_inference.md index 2e3337519caa..da69b712a989 100644 --- a/docs/source/en/tutorials/using_peft_for_inference.md +++ b/docs/source/en/tutorials/using_peft_for_inference.md @@ -12,9 +12,9 @@ specific language governing permissions and limitations under the License. [[open-in-colab]] -# Inference with PEFT +# Load LoRAs for inference -There are many adapters trained in different styles to achieve different effects. You can even combine multiple adapters to create new and unique images. With the ๐Ÿค— [PEFT](https://huggingface.co/docs/peft/index) integration in ๐Ÿค— Diffusers, it is really easy to load and manage adapters for inference. In this guide, you'll learn how to use different adapters with [Stable Diffusion XL (SDXL)](../api/pipelines/stable_diffusion/stable_diffusion_xl) for inference. +There are many adapters (with LoRAs being the most common type) trained in different styles to achieve different effects. You can even combine multiple adapters to create new and unique images. With the ๐Ÿค— [PEFT](https://huggingface.co/docs/peft/index) integration in ๐Ÿค— Diffusers, it is really easy to load and manage adapters for inference. In this guide, you'll learn how to use different adapters with [Stable Diffusion XL (SDXL)](../api/pipelines/stable_diffusion/stable_diffusion_xl) for inference. Throughout this guide, you'll use LoRA as the main adapter technique, so we'll use the terms LoRA and adapter interchangeably. You should have some familiarity with LoRA, and if you don't, we welcome you to check out the [LoRA guide](https://huggingface.co/docs/peft/conceptual_guides/lora). @@ -22,9 +22,8 @@ Let's first install all the required libraries. ```bash !pip install -q transformers accelerate -# Will be updated once the stable releases are done. -!pip install -q git+https://github.com/huggingface/peft.git -!pip install -q git+https://github.com/huggingface/diffusers.git +!pip install peft +!pip install diffusers ``` Now, let's load a pipeline with a SDXL checkpoint: @@ -165,3 +164,22 @@ list_adapters_component_wise = pipe.get_list_adapters() list_adapters_component_wise {"text_encoder": ["toy", "pixel"], "unet": ["toy", "pixel"], "text_encoder_2": ["toy", "pixel"]} ``` + +## Fusing adapters into the model + +You can use PEFT to easily fuse/unfuse multiple adapters directly into the model weights (both UNet and text encoder) using the [`~diffusers.loaders.LoraLoaderMixin.fuse_lora`] method, which can lead to a speed-up in inference and lower VRAM usage. + +```py +pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") +pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy") + +pipe.set_adapters(["pixel", "toy"], adapter_weights=[0.5, 1.0]) +# Fuses the LoRAs into the Unet +pipe.fuse_lora() + +prompt = "toy_face of a hacker with a hoodie, pixel art" +image = pipe(prompt, num_inference_steps=30, generator=torch.manual_seed(0)).images[0] + +# Gets the Unet back to the original state +pipe.unfuse_lora() +``` From 65ef7a0c5c594b4f84092e328fbdd73183613b30 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 9 Nov 2023 02:19:09 +0530 Subject: [PATCH 12/14] Fix prompt bug in AnimateDiff (#5702) * fix prompt bug * add test --- .../pipelines/animatediff/pipeline_animatediff.py | 2 +- tests/pipelines/animatediff/test_animatediff.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/animatediff/pipeline_animatediff.py b/src/diffusers/pipelines/animatediff/pipeline_animatediff.py index 49947f9dbf32..b63acb9a5f30 100644 --- a/src/diffusers/pipelines/animatediff/pipeline_animatediff.py +++ b/src/diffusers/pipelines/animatediff/pipeline_animatediff.py @@ -498,7 +498,7 @@ def prepare_latents( @torch.no_grad() def __call__( self, - prompt: Union[str, List[str]], + prompt: Union[str, List[str]] = None, num_frames: Optional[int] = 16, height: Optional[int] = None, width: Optional[int] = None, diff --git a/tests/pipelines/animatediff/test_animatediff.py b/tests/pipelines/animatediff/test_animatediff.py index baba8ba4d655..3c9390f2d1b6 100644 --- a/tests/pipelines/animatediff/test_animatediff.py +++ b/tests/pipelines/animatediff/test_animatediff.py @@ -220,6 +220,17 @@ def test_to_dtype(self): model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + def test_prompt_embeds(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("prompt") + inputs["prompt_embeds"] = torch.randn((1, 4, 32), device=torch_device) + pipe(**inputs) + @slow @require_torch_gpu From 6110d7c95f630479cf01340cc8a8141c1e359f09 Mon Sep 17 00:00:00 2001 From: takuoko Date: Thu, 9 Nov 2023 06:55:36 +0900 Subject: [PATCH 13/14] [Bugfix] fix error of peft lora when xformers enabled (#5697) * bugfix peft lor * Apply suggestions from code review --------- Co-authored-by: Patrick von Platen --- src/diffusers/models/attention_processor.py | 40 +++++++++++---------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/src/diffusers/models/attention_processor.py b/src/diffusers/models/attention_processor.py index 7fb524110ea1..1234dbd2d5ce 100644 --- a/src/diffusers/models/attention_processor.py +++ b/src/diffusers/models/attention_processor.py @@ -879,6 +879,9 @@ def __call__( scale: float = 1.0, ) -> torch.Tensor: residual = hidden_states + + args = () if USE_PEFT_BACKEND else (scale,) + hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape @@ -891,17 +894,17 @@ def __call__( hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) - query = attn.to_q(hidden_states, scale=scale) + query = attn.to_q(hidden_states, *args) query = attn.head_to_batch_dim(query) - encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states, scale=scale) - encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states, scale=scale) + encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states, *args) + encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states, *args) encoder_hidden_states_key_proj = attn.head_to_batch_dim(encoder_hidden_states_key_proj) encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj) if not attn.only_cross_attention: - key = attn.to_k(hidden_states, scale=scale) - value = attn.to_v(hidden_states, scale=scale) + key = attn.to_k(hidden_states, *args) + value = attn.to_v(hidden_states, *args) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) key = torch.cat([encoder_hidden_states_key_proj, key], dim=1) @@ -915,7 +918,7 @@ def __call__( hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj - hidden_states = attn.to_out[0](hidden_states, scale=scale) + hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) @@ -946,6 +949,9 @@ def __call__( scale: float = 1.0, ) -> torch.Tensor: residual = hidden_states + + args = () if USE_PEFT_BACKEND else (scale,) + hidden_states = hidden_states.view(hidden_states.shape[0], hidden_states.shape[1], -1).transpose(1, 2) batch_size, sequence_length, _ = hidden_states.shape @@ -958,7 +964,7 @@ def __call__( hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) - query = attn.to_q(hidden_states, scale=scale) + query = attn.to_q(hidden_states, *args) query = attn.head_to_batch_dim(query, out_dim=4) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) @@ -967,8 +973,8 @@ def __call__( encoder_hidden_states_value_proj = attn.head_to_batch_dim(encoder_hidden_states_value_proj, out_dim=4) if not attn.only_cross_attention: - key = attn.to_k(hidden_states, scale=scale) - value = attn.to_v(hidden_states, scale=scale) + key = attn.to_k(hidden_states, *args) + value = attn.to_v(hidden_states, *args) key = attn.head_to_batch_dim(key, out_dim=4) value = attn.head_to_batch_dim(value, out_dim=4) key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) @@ -985,7 +991,7 @@ def __call__( hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, residual.shape[1]) # linear proj - hidden_states = attn.to_out[0](hidden_states, scale=scale) + hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) @@ -1177,6 +1183,8 @@ def __call__( ) -> torch.FloatTensor: residual = hidden_states + args = () if USE_PEFT_BACKEND else (scale,) + if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) @@ -1207,12 +1215,8 @@ def __call__( elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) - key = ( - attn.to_k(encoder_hidden_states, scale=scale) if not USE_PEFT_BACKEND else attn.to_k(encoder_hidden_states) - ) - value = ( - attn.to_v(encoder_hidden_states, scale=scale) if not USE_PEFT_BACKEND else attn.to_v(encoder_hidden_states) - ) + key = attn.to_k(encoder_hidden_states, *args) + value = attn.to_v(encoder_hidden_states, *args) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads @@ -1232,9 +1236,7 @@ def __call__( hidden_states = hidden_states.to(query.dtype) # linear proj - hidden_states = ( - attn.to_out[0](hidden_states, scale=scale) if not USE_PEFT_BACKEND else attn.to_out[0](hidden_states) - ) + hidden_states = attn.to_out[0](hidden_states, *args) # dropout hidden_states = attn.to_out[1](hidden_states) From 9753088c4558955fa4b9bfc3f45124f4f5bbfba4 Mon Sep 17 00:00:00 2001 From: jschen Date: Wed, 8 Nov 2023 23:42:55 +0800 Subject: [PATCH 14/14] inference using height-widths in bin --- .../pixart_alpha/pipeline_pixart_alpha.py | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py index 147e2b76e6c6..386971b2ac3d 100644 --- a/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +++ b/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py @@ -20,6 +20,7 @@ import torch from transformers import T5EncoderModel, T5Tokenizer +from torchvision import transforms as T from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, Transformer2DModel @@ -61,6 +62,20 @@ """ +ASPECT_RATIO_1024_TEST = { + '0.25': [512., 2048.], '0.28': [512., 1856.], + '0.32': [576., 1792.], '0.33': [576., 1728.], '0.35': [576., 1664.], '0.4': [640., 1600.], + '0.42': [640., 1536.], '0.48': [704., 1472.], '0.5': [704., 1408.], '0.52': [704., 1344.], + '0.57': [768., 1344.], '0.6': [768., 1280.], '0.68': [832., 1216.], '0.72': [832., 1152.], + '0.78': [896., 1152.], '0.82': [896., 1088.], '0.88': [960., 1088.], '0.94': [960., 1024.], + '1.0': [1024., 1024.], '1.07': [1024., 960.], '1.13': [1088., 960.], '1.21': [1088., 896.], + '1.29': [1152., 896.], '1.38': [1152., 832.], '1.46': [1216., 832.], '1.67': [1280., 768.], + '1.75': [1344., 768.], '2.0': [1408., 704.], '2.09': [1472., 704.], '2.4': [1536., 640.], + '2.5': [1600., 640.], '3.0': [1728., 576.], + '4.0': [2048., 512.], +} + + class PixArtAlphaPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using PixArt-Alpha. @@ -116,6 +131,13 @@ def mask_text_embeddings(self, emb, mask): masked_feature = emb * mask[:, None, :, None] return masked_feature, emb.shape[2] + @staticmethod + def classify_height_width_bin(height: int, width: int, ratios: dict): + ar = float(height / width) + closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar)) + default_hw = ratios[closest_ratio] + return int(default_hw[0]), int(default_hw[1]) + # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt def encode_prompt( self, @@ -495,6 +517,24 @@ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype latents = latents * self.scheduler.init_noise_sigma return latents + @staticmethod + def resize_and_crop_tensor(samples: torch.Tensor, new_width: int, new_height: int): + orig_hw = torch.tensor([samples.shape[2], samples.shape[3]]) + custom_hw = torch.tensor([new_height, new_width]) + + if (orig_hw != custom_hw).all(): + ratio = max(custom_hw[0] / orig_hw[0], custom_hw[1] / orig_hw[1]) + resized_width = int(orig_hw[1] * ratio) + resized_height = int(orig_hw[0] * ratio) + + transform = T.Compose([ + T.Resize((resized_height, resized_width)), + T.CenterCrop(custom_hw.tolist()) + ]) + return transform(samples) + else: + return samples + @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( @@ -518,6 +558,7 @@ def __call__( callback_steps: int = 1, clean_caption: bool = True, mask_feature: bool = True, + use_bin_classifier: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: """ Function invoked when calling the pipeline for generation. @@ -591,6 +632,9 @@ def __call__( # 1. Check inputs. Raise error if not correct height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor + if use_bin_classifier: + orig_height, orig_width = height, width + height, width = self.classify_height_width_bin(height, width, ratios=ASPECT_RATIO_1024_TEST) self.check_inputs( prompt, height, width, negative_prompt, callback_steps, prompt_embeds, negative_prompt_embeds ) @@ -709,6 +753,8 @@ def __call__( if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + if use_bin_classifier: + image = self.resize_and_crop_tensor(image, orig_width, orig_height) else: image = latents