Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source/en/api/pipelines/animatediff.md
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ from PIL import Image
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
# load SD 1.5 based finetuned model
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16).to("cuda")
pipe = AnimateDiffVideoToVideoPipeline.from_pretrained(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
scheduler = DDIMScheduler.from_pretrained(
model_id,
subfolder="scheduler",
Expand Down
1 change: 0 additions & 1 deletion tests/pipelines/stable_diffusion/test_stable_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -1135,7 +1135,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.reset_peak_memory_stats()

pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -705,7 +705,6 @@ def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self):
pipe = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down Expand Up @@ -911,7 +910,6 @@ def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self):
"runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16
)
pipe.vae = vae
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
pipe = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
scheduler=pndm,
torch_dtype=torch.float16,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
model_id,
torch_dtype=torch.float16,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading_v_pred(self):
prompt = "Andromeda galaxy in a bottle"

pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16)
pipeline = pipeline.to(torch_device)
pipeline.enable_attention_slicing(1)
pipeline.enable_sequential_cpu_offload()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -809,7 +809,6 @@ def test_stable_diffusion_adapter_zoedepth_sd_v15(self):
adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16)

pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_model_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
Expand Down Expand Up @@ -942,7 +941,6 @@ def test_stable_diffusion_adapter_pipeline_with_sequential_cpu_offloading(self):
pipe = StableDiffusionAdapterPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", adapter=adapter, safety_checker=None
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
pipe = StableDiffusionImageVariationPipeline.from_pretrained(
"lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,6 @@ def test_stable_diffusion_panorama_pipeline_with_sequential_cpu_offloading(self)
model_ckpt = "stabilityai/stable-diffusion-2-base"
scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler")
pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
Expand Down
2 changes: 0 additions & 2 deletions tests/pipelines/stable_unclip/test_stable_unclip.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,6 @@ def test_stable_unclip(self):
)

pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
Expand All @@ -228,7 +227,6 @@ def test_stable_unclip_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.reset_peak_memory_stats()

pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
Expand Down
3 changes: 0 additions & 3 deletions tests/pipelines/stable_unclip/test_stable_unclip_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,6 @@ def test_stable_unclip_l_img2img(self):
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
Expand Down Expand Up @@ -261,7 +260,6 @@ def test_stable_unclip_h_img2img(self):
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
Expand Down Expand Up @@ -289,7 +287,6 @@ def test_stable_unclip_img2img_pipeline_with_sequential_cpu_offloading(self):
pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
Expand Down
1 change: 0 additions & 1 deletion tests/pipelines/unclip/test_unclip.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,6 @@ def test_unclip_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.reset_peak_memory_stats()

pipe = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
Expand Down