From 8f8666f5c26ff4a4c64d2b4a18cd72f34084b633 Mon Sep 17 00:00:00 2001 From: Aryan Date: Fri, 20 Sep 2024 04:34:19 +0200 Subject: [PATCH 1/5] refactor scheduler class usage --- tests/lora/test_lora_layers_cogvideox.py | 4 +- tests/lora/test_lora_layers_flux.py | 2 +- tests/lora/test_lora_layers_sd3.py | 2 +- tests/lora/utils.py | 132 +++++------------------ 4 files changed, 31 insertions(+), 109 deletions(-) diff --git a/tests/lora/test_lora_layers_cogvideox.py b/tests/lora/test_lora_layers_cogvideox.py index 17b1cc8e764a..290a562c4de1 100644 --- a/tests/lora/test_lora_layers_cogvideox.py +++ b/tests/lora/test_lora_layers_cogvideox.py @@ -48,6 +48,7 @@ class CogVideoXLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = CogVideoXPipeline scheduler_cls = CogVideoXDPMScheduler scheduler_kwargs = {"timestep_spacing": "trailing"} + scheduler_classes = [CogVideoXDDIMScheduler, CogVideoXDPMScheduler] transformer_kwargs = { "num_attention_heads": 4, @@ -126,8 +127,7 @@ def get_dummy_inputs(self, with_generator=True): @skip_mps def test_lora_fuse_nan(self): - scheduler_classes = [CogVideoXDDIMScheduler, CogVideoXDPMScheduler] - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index e849396f7c67..1a8af3a9f713 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -47,7 +47,7 @@ class FluxLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = FluxPipeline scheduler_cls = FlowMatchEulerDiscreteScheduler() scheduler_kwargs = {} - uses_flow_matching = True + scheduler_classes = [FlowMatchEulerDiscreteScheduler] transformer_kwargs = { "patch_size": 1, "in_channels": 4, diff --git a/tests/lora/test_lora_layers_sd3.py b/tests/lora/test_lora_layers_sd3.py index 063ff4c8b05d..6bf4b42d5608 100644 --- a/tests/lora/test_lora_layers_sd3.py +++ b/tests/lora/test_lora_layers_sd3.py @@ -34,7 +34,7 @@ class SD3LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = StableDiffusion3Pipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_kwargs = {} - uses_flow_matching = True + scheduler_classes = [FlowMatchEulerDiscreteScheduler] transformer_kwargs = { "sample_size": 32, "patch_size": 1, diff --git a/tests/lora/utils.py b/tests/lora/utils.py index adf7cb24470f..aecd4935d494 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -24,7 +24,6 @@ from diffusers import ( AutoencoderKL, DDIMScheduler, - FlowMatchEulerDiscreteScheduler, LCMScheduler, UNet2DConditionModel, ) @@ -69,9 +68,10 @@ def check_if_lora_correctly_set(model) -> bool: @require_peft_backend class PeftLoraLoaderMixinTests: pipeline_class = None + scheduler_cls = None scheduler_kwargs = None - uses_flow_matching = False + scheduler_classes = [DDIMScheduler, LCMScheduler] has_two_text_encoders = False has_three_text_encoders = False @@ -205,13 +205,7 @@ def test_simple_inference(self): """ Tests a simple inference and makes sure it works as expected """ - # TODO(aryan): Some of the assumptions made here in many different tests are incorrect for CogVideoX. - # For example, we need to test with CogVideoXDDIMScheduler and CogVideoDPMScheduler instead of DDIMScheduler - # and LCMScheduler, which are not supported by it. - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -226,10 +220,7 @@ def test_simple_inference_with_text_lora(self): Tests a simple inference with lora attached on the text encoder and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -260,9 +251,6 @@ def test_simple_inference_with_text_lora_and_scale(self): Tests a simple inference with lora attached on the text encoder + scale argument and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) call_signature_keys = inspect.signature(self.pipeline_class.__call__).parameters.keys() for possible_attention_kwargs in ["cross_attention_kwargs", "joint_attention_kwargs", "attention_kwargs"]: if possible_attention_kwargs in call_signature_keys: @@ -270,7 +258,7 @@ def test_simple_inference_with_text_lora_and_scale(self): break assert attention_kwargs_name is not None - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -317,10 +305,7 @@ def test_simple_inference_with_text_lora_fused(self): Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -360,10 +345,7 @@ def test_simple_inference_with_text_lora_unloaded(self): Tests a simple inference with lora attached to text encoder, then unloads the lora weights and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -410,10 +392,7 @@ def test_simple_inference_with_text_lora_save_load(self): """ Tests a simple usecase where users could use saving utilities for LoRA. """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -490,10 +469,7 @@ def test_simple_inference_with_partial_text_lora(self): with different ranks and some adapters removed and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, _, _ = self.get_dummy_components(scheduler_cls) # Verify `StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder` handles different ranks per module (PR#8324). text_lora_config = LoraConfig( @@ -555,10 +531,7 @@ def test_simple_inference_save_pretrained(self): """ Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -609,10 +582,7 @@ def test_simple_inference_with_text_denoiser_lora_save_load(self): """ Tests a simple usecase where users could use saving utilities for LoRA for Unet + text encoder """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -708,9 +678,6 @@ def test_simple_inference_with_text_denoiser_lora_and_scale(self): Tests a simple inference with lora attached on the text encoder + Unet + scale argument and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) call_signature_keys = inspect.signature(self.pipeline_class.__call__).parameters.keys() for possible_attention_kwargs in ["cross_attention_kwargs", "joint_attention_kwargs", "attention_kwargs"]: if possible_attention_kwargs in call_signature_keys: @@ -718,7 +685,7 @@ def test_simple_inference_with_text_denoiser_lora_and_scale(self): break assert attention_kwargs_name is not None - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -781,10 +748,7 @@ def test_simple_inference_with_text_lora_denoiser_fused(self): Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model and makes sure it works as expected - with unet """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -842,10 +806,7 @@ def test_simple_inference_with_text_denoiser_lora_unloaded(self): Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -905,10 +866,7 @@ def test_simple_inference_with_text_denoiser_lora_unfused( Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -966,10 +924,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter(self): Tests a simple inference with lora attached to text encoder and unet, attaches multiple adapters and set them """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1046,10 +1001,7 @@ def test_simple_inference_with_text_denoiser_block_scale(self): if self.pipeline_class.__name__ in ["StableDiffusion3Pipeline", "CogVideoXPipeline"]: return - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1112,10 +1064,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): if self.pipeline_class.__name__ == "StableDiffusion3Pipeline": return - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1288,10 +1237,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self): Tests a simple inference with lora attached to text encoder and unet, attaches multiple adapters and set/delete them """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1397,10 +1343,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): Tests a simple inference with lora attached to text encoder and unet, attaches multiple adapters and set them """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1481,10 +1424,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): @skip_mps def test_lora_fuse_nan(self): - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1530,10 +1470,7 @@ def test_get_adapters(self): Tests a simple usecase where we attach multiple adapters and check if the results are the expected results """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1566,10 +1503,7 @@ def test_get_list_adapters(self): Tests a simple usecase where we attach multiple adapters and check if the results are the expected results """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1653,10 +1587,7 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi( Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model and makes sure it works as expected - with unet and multi-adapter case """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1729,10 +1660,7 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi( @require_peft_version_greater(peft_version="0.9.0") def test_simple_inference_with_dora(self): - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components( scheduler_cls, use_dora=True ) @@ -1775,10 +1703,7 @@ def test_simple_inference_with_text_denoiser_lora_unfused_torch_compile(self): Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights and makes sure it works as expected """ - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -1819,10 +1744,7 @@ def set_pad_mode(network, mode="circular"): if isinstance(module, torch.nn.Conv2d): module.padding_mode = mode - scheduler_classes = ( - [FlowMatchEulerDiscreteScheduler] if self.uses_flow_matching else [DDIMScheduler, LCMScheduler] - ) - for scheduler_cls in scheduler_classes: + for scheduler_cls in self.scheduler_classes: components, _, _ = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) From c22a38f46b2883430c39706a5da68e30a9977abc Mon Sep 17 00:00:00 2001 From: Aryan Date: Fri, 20 Sep 2024 04:41:54 +0200 Subject: [PATCH 2/5] reorder to make tests more readable --- tests/lora/utils.py | 40 +++++++++++----------------------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/tests/lora/utils.py b/tests/lora/utils.py index aecd4935d494..668626eba001 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -252,6 +252,8 @@ def test_simple_inference_with_text_lora_and_scale(self): and makes sure it works as expected """ call_signature_keys = inspect.signature(self.pipeline_class.__call__).parameters.keys() + + # TODO(diffusers): Discuss a common naming convention across library for 1.0.0 release for possible_attention_kwargs in ["cross_attention_kwargs", "joint_attention_kwargs", "attention_kwargs"]: if possible_attention_kwargs in call_signature_keys: attention_kwargs_name = possible_attention_kwargs @@ -996,7 +998,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter(self): def test_simple_inference_with_text_denoiser_block_scale(self): """ Tests a simple inference with lora attached to text encoder and unet, attaches - one adapter and set differnt weights for different blocks (i.e. block lora) + one adapter and set different weights for different blocks (i.e. block lora) """ if self.pipeline_class.__name__ in ["StableDiffusion3Pipeline", "CogVideoXPipeline"]: return @@ -1082,11 +1084,9 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer @@ -1255,12 +1255,9 @@ def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self): if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - - if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer @@ -1321,11 +1318,9 @@ def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self): if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") pipe.set_adapters(["adapter-1", "adapter-2"]) @@ -1361,11 +1356,9 @@ def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer @@ -1414,7 +1407,6 @@ def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): ) pipe.disable_lora() - output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] self.assertTrue( @@ -1460,7 +1452,6 @@ def test_lora_fuse_nan(self): # without we should not see an error, but every image will be black pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) - out = pipe("test", num_inference_steps=2, output_type="np")[0] self.assertTrue(np.isnan(out).all()) @@ -1517,12 +1508,9 @@ def test_get_list_adapters(self): if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - - if self.unet_kwargs is not None: dicts_to_be_checked.update({"unet": ["adapter-1"]}) else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") dicts_to_be_checked.update({"transformer": ["adapter-1"]}) self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) @@ -1535,12 +1523,9 @@ def test_get_list_adapters(self): if self.unet_kwargs is not None: pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") - - if self.unet_kwargs is not None: dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]}) else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]}) self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) @@ -1563,18 +1548,15 @@ def test_get_list_adapters(self): ) # 4. - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-3") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-3") - dicts_to_be_checked = {} if "text_encoder" in self.pipeline_class._lora_loadable_modules: dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]} if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-3") dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2", "adapter-3"]}) else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-3") dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2", "adapter-3"]}) self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) From b5fb4fe48fb2520ba412155dfa21e10c6916b68a Mon Sep 17 00:00:00 2001 From: Aryan Date: Fri, 20 Sep 2024 04:46:35 +0200 Subject: [PATCH 3/5] remove pipeline specific checks and skip tests directly --- tests/lora/test_lora_layers_cogvideox.py | 12 ++++++++++++ tests/lora/test_lora_layers_flux.py | 8 ++++++++ tests/lora/test_lora_layers_sd3.py | 16 ++++++++++++++++ tests/lora/utils.py | 11 ----------- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/tests/lora/test_lora_layers_cogvideox.py b/tests/lora/test_lora_layers_cogvideox.py index 290a562c4de1..0e4a998be457 100644 --- a/tests/lora/test_lora_layers_cogvideox.py +++ b/tests/lora/test_lora_layers_cogvideox.py @@ -161,6 +161,18 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self): def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=5e-3) + @unittest.skip("Not supported in CogVideoX.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in CogVideoX.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in CogVideoX.") + def test_modify_padding_mode(self): + pass + @unittest.skip("Text encoder LoRA is not supported in CogVideoX.") def test_simple_inference_with_partial_text_lora(self): pass diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 1a8af3a9f713..0c336ebc3cbf 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -154,6 +154,14 @@ def test_with_alpha_in_state_dict(self): ) self.assertFalse(np.allclose(images_lora_with_alpha, images_lora, atol=1e-3, rtol=1e-3)) + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Flux.") + def test_modify_padding_mode(self): + pass + @slow @require_torch_gpu diff --git a/tests/lora/test_lora_layers_sd3.py b/tests/lora/test_lora_layers_sd3.py index 6bf4b42d5608..8f61c95c2fc8 100644 --- a/tests/lora/test_lora_layers_sd3.py +++ b/tests/lora/test_lora_layers_sd3.py @@ -92,3 +92,19 @@ def test_sd3_lora(self): lora_filename = "lora_peft_format.safetensors" pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + @unittest.skip("Not supported in SD3.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in SD3.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass + + @unittest.skip("Not supported in SD3.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in SD3.") + def test_modify_padding_mode(self): + pass diff --git a/tests/lora/utils.py b/tests/lora/utils.py index 668626eba001..c094cda588fd 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -1000,9 +1000,6 @@ def test_simple_inference_with_text_denoiser_block_scale(self): Tests a simple inference with lora attached to text encoder and unet, attaches one adapter and set different weights for different blocks (i.e. block lora) """ - if self.pipeline_class.__name__ in ["StableDiffusion3Pipeline", "CogVideoXPipeline"]: - return - for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) @@ -1063,9 +1060,6 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): Tests a simple inference with lora attached to text encoder and unet, attaches multiple adapters and set differnt weights for different blocks (i.e. block lora) """ - if self.pipeline_class.__name__ == "StableDiffusion3Pipeline": - return - for scheduler_cls in self.scheduler_classes: components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components) @@ -1142,8 +1136,6 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): """Tests that any valid combination of lora block scales can be used in pipe.set_adapter""" - if self.pipeline_class.__name__ in ["StableDiffusion3Pipeline", "FluxPipeline", "CogVideoXPipeline"]: - return def updown_options(blocks_with_tf, layers_per_block, value): """ @@ -1718,9 +1710,6 @@ def test_simple_inference_with_text_denoiser_lora_unfused_torch_compile(self): _ = pipe(**inputs, generator=torch.manual_seed(0))[0] def test_modify_padding_mode(self): - if self.pipeline_class.__name__ in ["StableDiffusion3Pipeline", "FluxPipeline", "CogVideoXPipeline"]: - return - def set_pad_mode(network, mode="circular"): for _, module in network.named_modules(): if isinstance(module, torch.nn.Conv2d): From e4208b392df7653e545254c4136e5833100979ec Mon Sep 17 00:00:00 2001 From: Aryan Date: Fri, 20 Sep 2024 05:08:37 +0200 Subject: [PATCH 4/5] rewrite denoiser conditions cleaner --- tests/lora/utils.py | 208 ++++++++++++++------------------------------ 1 file changed, 66 insertions(+), 142 deletions(-) diff --git a/tests/lora/utils.py b/tests/lora/utils.py index c094cda588fd..939b749c286a 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -600,13 +600,9 @@ def test_simple_inference_with_text_denoiser_lora_save_load(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config) - else: - pipe.transformer.add_adapter(denoiser_lora_config) - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in Unet") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -624,10 +620,7 @@ def test_simple_inference_with_text_denoiser_lora_save_load(self): else None ) - if self.unet_kwargs is not None: - denoiser_state_dict = get_peft_model_state_dict(pipe.unet) - else: - denoiser_state_dict = get_peft_model_state_dict(pipe.transformer) + denoiser_state_dict = get_peft_model_state_dict(denoiser) saving_kwargs = { "save_directory": tmpdirname, @@ -661,8 +654,7 @@ def test_simple_inference_with_text_denoiser_lora_save_load(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -703,13 +695,9 @@ def test_simple_inference_with_text_denoiser_lora_and_scale(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config) - else: - pipe.transformer.add_adapter(denoiser_lora_config) - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -766,13 +754,9 @@ def test_simple_inference_with_text_lora_denoiser_fused(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config) - else: - pipe.transformer.add_adapter(denoiser_lora_config) - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -789,8 +773,7 @@ def test_simple_inference_with_text_lora_denoiser_fused(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -824,12 +807,9 @@ def test_simple_inference_with_text_denoiser_lora_unloaded(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config) - else: - pipe.transformer.add_adapter(denoiser_lora_config) - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -843,10 +823,7 @@ def test_simple_inference_with_text_denoiser_lora_unloaded(self): self.assertFalse( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder" ) - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertFalse( - check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly unloaded in denoiser" - ) + self.assertFalse(check_if_lora_correctly_set(denoiser), "Lora not correctly unloaded in denoiser") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -881,13 +858,9 @@ def test_simple_inference_with_text_denoiser_lora_unfused( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config) - else: - pipe.transformer.add_adapter(denoiser_lora_config) - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -906,8 +879,7 @@ def test_simple_inference_with_text_denoiser_lora_unfused( if "text_encoder" in self.pipeline_class._lora_loadable_modules: self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Unfuse should still keep LoRA layers") - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Unfuse should still keep LoRA layers") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Unfuse should still keep LoRA layers") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -942,17 +914,10 @@ def test_simple_inference_with_text_denoiser_multi_adapter(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -1010,14 +975,11 @@ def test_simple_inference_with_text_denoiser_block_scale(self): output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -1076,15 +1038,10 @@ def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: @@ -1207,10 +1164,9 @@ def all_possible_dict_opts(unet, value): _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules @@ -1245,15 +1201,10 @@ def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules @@ -1308,12 +1259,10 @@ def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self): pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") pipe.set_adapters(["adapter-1", "adapter-2"]) pipe.delete_adapters(["adapter-1", "adapter-2"]) @@ -1346,15 +1295,10 @@ def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules @@ -1421,13 +1365,9 @@ def test_lora_fuse_nan(self): check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") # corrupt one LoRA weight with `inf` values with torch.no_grad(): @@ -1461,19 +1401,15 @@ def test_get_adapters(self): _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") adapter_names = pipe.get_active_adapters() self.assertListEqual(adapter_names, ["adapter-1"]) pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") adapter_names = pipe.get_active_adapters() self.assertListEqual(adapter_names, ["adapter-2"]) @@ -1577,22 +1513,16 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi( check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" ) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") # Attach a second adapter if "text_encoder" in self.pipeline_class._lora_loadable_modules: pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") - else: - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules @@ -1647,14 +1577,11 @@ def test_simple_inference_with_dora(self): self.assertTrue(output_no_dora_lora.shape == self.output_shape) pipe.text_encoder.add_adapter(text_lora_config) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config) - else: - pipe.transformer.add_adapter(denoiser_lora_config) - self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: lora_loadable_components = self.pipeline_class._lora_loadable_modules @@ -1685,14 +1612,11 @@ def test_simple_inference_with_text_denoiser_lora_unfused_torch_compile(self): _, _, inputs = self.get_dummy_inputs(with_generator=False) pipe.text_encoder.add_adapter(text_lora_config) - if self.unet_kwargs is not None: - pipe.unet.add_adapter(denoiser_lora_config) - else: - pipe.transformer.add_adapter(denoiser_lora_config) - self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") - denoiser_to_checked = pipe.unet if self.unet_kwargs is not None else pipe.transformer - self.assertTrue(check_if_lora_correctly_set(denoiser_to_checked), "Lora not correctly set in denoiser") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") if self.has_two_text_encoders or self.has_three_text_encoders: pipe.text_encoder_2.add_adapter(text_lora_config) From 95c084de5f35133dfc0045ff81d3898364be6831 Mon Sep 17 00:00:00 2001 From: Aryan Date: Fri, 20 Sep 2024 05:18:33 +0200 Subject: [PATCH 5/5] bump tolerance for cog test --- tests/lora/test_lora_layers_cogvideox.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/lora/test_lora_layers_cogvideox.py b/tests/lora/test_lora_layers_cogvideox.py index 0e4a998be457..c141ebc96b3e 100644 --- a/tests/lora/test_lora_layers_cogvideox.py +++ b/tests/lora/test_lora_layers_cogvideox.py @@ -156,10 +156,10 @@ def test_lora_fuse_nan(self): self.assertTrue(np.isnan(out).all()) def test_simple_inference_with_text_lora_denoiser_fused_multi(self): - super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=5e-3) + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) def test_simple_inference_with_text_denoiser_lora_unfused(self): - super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=5e-3) + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) @unittest.skip("Not supported in CogVideoX.") def test_simple_inference_with_text_denoiser_block_scale(self):