diff --git a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py index 62a5785beb2f..d8948862845d 100644 --- a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +++ b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py @@ -662,6 +662,8 @@ def custom_forward(*inputs): class LDMBertModel(LDMBertPreTrainedModel): + _no_split_modules = [] + def __init__(self, config: LDMBertConfig): super().__init__(config) self.model = LDMBertEncoder(config) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py index e80cc1360b33..2a33074a2473 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py @@ -208,7 +208,6 @@ def __call__( list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ - if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index 464d5ef33bfd..6b23aca33bc6 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -740,7 +740,7 @@ def test_stable_diffusion_accelerate_auto_device(self): start_time = time.time() pipeline_normal_load = StableDiffusionPipeline.from_pretrained( - pipeline_id, revision="fp16", torch_dtype=torch.float16, device_map="auto" + pipeline_id, revision="fp16", torch_dtype=torch.float16 ) pipeline_normal_load.to(torch_device) normal_load_time = time.time() - start_time @@ -761,9 +761,7 @@ def test_stable_diffusion_pipeline_with_unet_on_gpu_only(self): pipeline_id = "CompVis/stable-diffusion-v1-4" prompt = "Andromeda galaxy in a bottle" - pipeline = StableDiffusionPipeline.from_pretrained( - pipeline_id, revision="fp16", torch_dtype=torch.float16, device_map="auto" - ) + pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, revision="fp16", torch_dtype=torch.float16) pipeline.enable_attention_slicing(1) pipeline.enable_sequential_cpu_offload() diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index 5ce07dee8d57..e355a19493ed 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -77,6 +77,7 @@ def test_load_custom_pipeline(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" ) + pipeline = pipeline.to(torch_device) # NOTE that `"CustomPipeline"` is not a class that is defined in this library, but solely on the Hub # under https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L24 assert pipeline.__class__.__name__ == "CustomPipeline" @@ -85,6 +86,7 @@ def test_run_custom_pipeline(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" ) + pipeline = pipeline.to(torch_device) images, output_str = pipeline(num_inference_steps=2, output_type="np") assert images[0].shape == (1, 32, 32, 3) @@ -96,6 +98,7 @@ def test_local_custom_pipeline(self): pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path ) + pipeline = pipeline.to(torch_device) images, output_str = pipeline(num_inference_steps=2, output_type="np") assert pipeline.__class__.__name__ == "CustomLocalPipeline" @@ -109,7 +112,7 @@ def test_load_pipeline_from_git(self): clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id, device_map="auto") - clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16, device_map="auto") + clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16) pipeline = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", @@ -380,10 +383,11 @@ def test_from_pretrained_hub(self): scheduler = DDPMScheduler(num_train_timesteps=10) ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler, device_map="auto") - ddpm.to(torch_device) + ddpm = ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) + ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, device_map="auto") - ddpm_from_hub.to(torch_device) + ddpm_from_hub = ddpm_from_hub.to(torch_device) ddpm_from_hub.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) @@ -404,11 +408,11 @@ def test_from_pretrained_hub_pass_model(self): ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained( model_path, unet=unet, scheduler=scheduler, device_map="auto" ) - ddpm_from_hub_custom_model.to(torch_device) + ddpm_from_hub_custom_model = ddpm_from_hub_custom_model.to(torch_device) ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, device_map="auto") - ddpm_from_hub.to(torch_device) + ddpm_from_hub = ddpm_from_hub.to(torch_device) ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) generator = torch.manual_seed(0)