From 6dd9bcd78aa92ccb5027b83818019ea0ac4b4d4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Tolga=20Cang=C3=B6z?= <46008593+standardAI@users.noreply.github.com> Date: Wed, 15 Mar 2023 17:16:25 +0300 Subject: [PATCH] Update img2img.mdx Fix typos --- docs/source/en/using-diffusers/img2img.mdx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/source/en/using-diffusers/img2img.mdx b/docs/source/en/using-diffusers/img2img.mdx index 3ddba53655e5..6ebe1f0633f0 100644 --- a/docs/source/en/using-diffusers/img2img.mdx +++ b/docs/source/en/using-diffusers/img2img.mdx @@ -33,7 +33,7 @@ from io import BytesIO from diffusers import StableDiffusionImg2ImgPipeline ``` -Load the pipeline +Load the pipeline: ```python device = "cuda" @@ -42,7 +42,7 @@ pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion ) ``` -Download an initial image and preprocess it so we can pass it to the pipeline. +Download an initial image and preprocess it so we can pass it to the pipeline: ```python url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" @@ -55,7 +55,7 @@ init_image ![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/image_2_image_using_diffusers_cell_8_output_0.jpeg) -Define the prompt and run the pipeline. +Define the prompt and run the pipeline: ```python prompt = "A fantasy landscape, trending on artstation" @@ -67,7 +67,7 @@ prompt = "A fantasy landscape, trending on artstation" -Let's generate two images with same pipeline and seed, but with different values for `strength` +Let's generate two images with same pipeline and seed, but with different values for `strength`: ```python generator = torch.Generator(device=device).manual_seed(1024) @@ -89,9 +89,9 @@ image ![img](https://huggingface.co/datasets/YiYiXu/test-doc-assets/resolve/main/image_2_image_using_diffusers_cell_14_output_1.jpeg) -As you can see, when using a lower value for `strength`, the generated image is more closer to the original `image` +As you can see, when using a lower value for `strength`, the generated image is more closer to the original `image`. -Now let's use a different scheduler - [LMSDiscreteScheduler](https://huggingface.co/docs/diffusers/api/schedulers#diffusers.LMSDiscreteScheduler) +Now let's use a different scheduler - [LMSDiscreteScheduler](https://huggingface.co/docs/diffusers/api/schedulers#diffusers.LMSDiscreteScheduler): ```python from diffusers import LMSDiscreteScheduler