From 2c250e740f2e0b6b5cac65390df8153daa5506c2 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Oct 2024 16:20:26 +0530 Subject: [PATCH 01/11] make set_adapters() robust on silent failures. --- src/diffusers/loaders/lora_base.py | 12 +++++++++- tests/lora/utils.py | 35 ++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/src/diffusers/loaders/lora_base.py b/src/diffusers/loaders/lora_base.py index e124b6eeacf3..a1b2ec5f235e 100644 --- a/src/diffusers/loaders/lora_base.py +++ b/src/diffusers/loaders/lora_base.py @@ -518,8 +518,18 @@ def set_adapters( adapter_names: Union[List[str], str], adapter_weights: Optional[Union[float, Dict, List[float], List[Dict]]] = None, ): - adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names + if isinstance(adapter_weights, dict): + components_passed = set(adapter_weights.keys()) + lora_components = set(self._lora_loadable_modules) + invalid_components = components_passed - lora_components + if invalid_components: + raise ValueError( + f"The following components in `adapter_weights` are not part of the pipeline: {invalid_components}. " + f"Available components that are LoRA-compatible: {self._lora_loadable_modules}" + ) + + adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names adapter_weights = copy.deepcopy(adapter_weights) # Expand weights into a list, one entry per adapter diff --git a/tests/lora/utils.py b/tests/lora/utils.py index 9c982e8de37f..21b99a39bfa6 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -1109,6 +1109,41 @@ def test_wrong_adapter_name_raises_error(self): pipe.set_adapters("adapter-1") _ = pipe(**inputs, generator=torch.manual_seed(0))[0] + def test_multiple_wrong_adapter_name_raises_error(self): + scheduler_cls = self.scheduler_classes[0] + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + scale_with_wrong_components = {"foo": 0.0, "bar": 0.0, "tik": 0.0} + with self.assertRaises(ValueError) as err_context: + pipe.set_adapters("adapter-1", adapter_weights=scale_with_wrong_components) + + wrong_components = set(scale_with_wrong_components.keys()) + msg = f"The following components in `adapter_weights` are not part of the pipeline: {wrong_components}" + self.assertTrue(msg in str(err_context.exception)) + + # test this works. + pipe.set_adapters("adapter-1") + _ = pipe(**inputs, generator=torch.manual_seed(0))[0] + def test_simple_inference_with_text_denoiser_block_scale(self): """ Tests a simple inference with lora attached to text encoder and unet, attaches From 45c15d328865322c64003aaadc0b87d661fbdd54 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Oct 2024 16:46:17 +0530 Subject: [PATCH 02/11] fixes to tests --- tests/lora/test_lora_layers_cogvideox.py | 4 ++++ tests/lora/test_lora_layers_flux.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/tests/lora/test_lora_layers_cogvideox.py b/tests/lora/test_lora_layers_cogvideox.py index c141ebc96b3e..939749636e5c 100644 --- a/tests/lora/test_lora_layers_cogvideox.py +++ b/tests/lora/test_lora_layers_cogvideox.py @@ -192,3 +192,7 @@ def test_simple_inference_with_text_lora_fused(self): @unittest.skip("Text encoder LoRA is not supported in CogVideoX.") def test_simple_inference_with_text_lora_save_load(self): pass + + @unittest.skip("Not supported in CogVideoX.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 4629c24c8cd8..d0c98a6081b5 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -162,6 +162,10 @@ def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(se def test_modify_padding_mode(self): pass + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass + @slow @require_torch_gpu From ce4fcbf76fde5e0f2f27d7ade577a4594e7c152e Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Oct 2024 17:09:50 +0530 Subject: [PATCH 03/11] flaky decorator. --- tests/lora/test_lora_layers_sdxl.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/lora/test_lora_layers_sdxl.py b/tests/lora/test_lora_layers_sdxl.py index 8deecd770c31..3be71dcd0d0d 100644 --- a/tests/lora/test_lora_layers_sdxl.py +++ b/tests/lora/test_lora_layers_sdxl.py @@ -37,6 +37,7 @@ from diffusers.utils.import_utils import is_accelerate_available from diffusers.utils.testing_utils import ( CaptureLogger, + is_flaky, load_image, nightly, numpy_cosine_similarity_distance, @@ -111,6 +112,10 @@ def tearDown(self): gc.collect() torch.cuda.empty_cache() + @is_flaky + def test_multiple_wrong_adapter_name_raises_error(self): + super().test_multiple_wrong_adapter_name_raises_error() + @slow @require_torch_gpu From a6f7c906f96f6a0c57c6d5df929057a5945396b3 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Oct 2024 17:19:36 +0530 Subject: [PATCH 04/11] fix --- tests/lora/test_lora_layers_flux.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index d0c98a6081b5..20a2e2a5f00c 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -154,6 +154,10 @@ def test_with_alpha_in_state_dict(self): ) self.assertFalse(np.allclose(images_lora_with_alpha, images_lora, atol=1e-3, rtol=1e-3)) + @unittest.skip("Not supported in SD3.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + @unittest.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass From 473bbad20c55e8b46ce7b522bb787f086ab5bcab Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Oct 2024 17:30:27 +0530 Subject: [PATCH 05/11] flaky to sd3. --- tests/lora/test_lora_layers_sd3.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/lora/test_lora_layers_sd3.py b/tests/lora/test_lora_layers_sd3.py index 8f61c95c2fc8..cc10fef43205 100644 --- a/tests/lora/test_lora_layers_sd3.py +++ b/tests/lora/test_lora_layers_sd3.py @@ -18,7 +18,13 @@ from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from diffusers import FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3Pipeline -from diffusers.utils.testing_utils import is_peft_available, require_peft_backend, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import ( + is_flaky, + is_peft_available, + require_peft_backend, + require_torch_gpu, + torch_device, +) if is_peft_available(): @@ -108,3 +114,7 @@ def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(se @unittest.skip("Not supported in SD3.") def test_modify_padding_mode(self): pass + + @is_flaky + def test_multiple_wrong_adapter_name_raises_error(self): + super().test_multiple_wrong_adapter_name_raises_error() From 271404336f7f44d1960d6d1eaf9e3a0859f0e260 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Oct 2024 18:15:30 +0530 Subject: [PATCH 06/11] remove warning. --- src/diffusers/loaders/lora_base.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/diffusers/loaders/lora_base.py b/src/diffusers/loaders/lora_base.py index a1b2ec5f235e..d5477a7c566b 100644 --- a/src/diffusers/loaders/lora_base.py +++ b/src/diffusers/loaders/lora_base.py @@ -564,12 +564,6 @@ def set_adapters( for adapter_name, weights in zip(adapter_names, adapter_weights): if isinstance(weights, dict): component_adapter_weights = weights.pop(component, None) - - if component_adapter_weights is not None and not hasattr(self, component): - logger.warning( - f"Lora weight dict contains {component} weights but will be ignored because pipeline does not have {component}." - ) - if component_adapter_weights is not None and component not in invert_list_adapters[adapter_name]: logger.warning( ( From 47ef8023451b694b5fb8acf043f36d9dbcf244cc Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 9 Oct 2024 18:20:09 +0530 Subject: [PATCH 07/11] sort --- src/diffusers/loaders/lora_base.py | 2 +- tests/lora/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/loaders/lora_base.py b/src/diffusers/loaders/lora_base.py index d5477a7c566b..dcae1f822a73 100644 --- a/src/diffusers/loaders/lora_base.py +++ b/src/diffusers/loaders/lora_base.py @@ -522,7 +522,7 @@ def set_adapters( components_passed = set(adapter_weights.keys()) lora_components = set(self._lora_loadable_modules) - invalid_components = components_passed - lora_components + invalid_components = sorted(components_passed - lora_components) if invalid_components: raise ValueError( f"The following components in `adapter_weights` are not part of the pipeline: {invalid_components}. " diff --git a/tests/lora/utils.py b/tests/lora/utils.py index 21b99a39bfa6..a84dcaab74d2 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -1136,7 +1136,7 @@ def test_multiple_wrong_adapter_name_raises_error(self): with self.assertRaises(ValueError) as err_context: pipe.set_adapters("adapter-1", adapter_weights=scale_with_wrong_components) - wrong_components = set(scale_with_wrong_components.keys()) + wrong_components = sorted(set(scale_with_wrong_components.keys())) msg = f"The following components in `adapter_weights` are not part of the pipeline: {wrong_components}" self.assertTrue(msg in str(err_context.exception)) From 533827550df6b7367a58a96110e982aaeac1468e Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Fri, 18 Oct 2024 11:35:52 +0530 Subject: [PATCH 08/11] quality --- tests/lora/test_lora_layers_sd3.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/lora/test_lora_layers_sd3.py b/tests/lora/test_lora_layers_sd3.py index 5aa923995a4d..fa2e7679293f 100644 --- a/tests/lora/test_lora_layers_sd3.py +++ b/tests/lora/test_lora_layers_sd3.py @@ -20,7 +20,6 @@ import torch from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel -from diffusers import FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3Pipeline from diffusers import ( FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, @@ -30,13 +29,12 @@ from diffusers.utils import load_image from diffusers.utils.import_utils import is_accelerate_available from diffusers.utils.testing_utils import ( + is_flaky, is_peft_available, numpy_cosine_similarity_distance, require_peft_backend, require_torch_gpu, torch_device, - is_flaky, - is_peft_available, ) @@ -136,6 +134,7 @@ def test_modify_padding_mode(self): def test_multiple_wrong_adapter_name_raises_error(self): super().test_multiple_wrong_adapter_name_raises_error() + @require_torch_gpu @require_peft_backend class LoraSD3IntegrationTests(unittest.TestCase): From 0c9d532b146b6c468a67f0e5735cb3e449e6a543 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Mon, 25 Nov 2024 20:59:11 +0530 Subject: [PATCH 09/11] skip test_simple_inference_with_text_denoiser_multi_adapter_block_lora --- tests/lora/test_lora_layers_mochi.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/lora/test_lora_layers_mochi.py b/tests/lora/test_lora_layers_mochi.py index 910b126c147b..b11cdc0762c4 100644 --- a/tests/lora/test_lora_layers_mochi.py +++ b/tests/lora/test_lora_layers_mochi.py @@ -178,3 +178,7 @@ def test_simple_inference_with_text_lora_fused(self): @unittest.skip("Text encoder LoRA is not supported in Mochi.") def test_simple_inference_with_text_lora_save_load(self): pass + + @unittest.skip("Not supported in CogVideoX.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass From 47172fd57669adeaff5deca1854f3cb4a87b3cd8 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Mon, 16 Dec 2024 08:49:13 +0530 Subject: [PATCH 10/11] skip testing unsupported features. --- tests/lora/test_lora_layers_flux.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index a29632b51def..97b86f8fb76a 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -163,7 +163,7 @@ def test_with_alpha_in_state_dict(self): ) self.assertFalse(np.allclose(images_lora_with_alpha, images_lora, atol=1e-3, rtol=1e-3)) - @unittest.skip("Not supported in SD3.") + @unittest.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @@ -554,6 +554,10 @@ def test_lora_expanding_shape_with_normal_lora_raises_error(self): "adapter-2", ) + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + @unittest.skip("Not supported in Flux.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @@ -562,6 +566,10 @@ def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(se def test_modify_padding_mode(self): pass + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass + @slow @nightly From 1c864a1a60ac905451923ca5b616f32045625749 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Mon, 16 Dec 2024 16:03:54 +0530 Subject: [PATCH 11/11] raise warning instead of error. --- src/diffusers/loaders/lora_base.py | 6 ++++-- tests/lora/utils.py | 8 +++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/diffusers/loaders/lora_base.py b/src/diffusers/loaders/lora_base.py index 3fc0cf0e9766..4fb3a95302c4 100644 --- a/src/diffusers/loaders/lora_base.py +++ b/src/diffusers/loaders/lora_base.py @@ -532,10 +532,12 @@ def set_adapters( invalid_components = sorted(components_passed - lora_components) if invalid_components: - raise ValueError( + logger.warning( f"The following components in `adapter_weights` are not part of the pipeline: {invalid_components}. " - f"Available components that are LoRA-compatible: {self._lora_loadable_modules}" + f"Available components that are LoRA-compatible: {self._lora_loadable_modules}. So, weights belonging " + "to the invalid components will be removed and ignored." ) + adapter_weights = {k: v for k, v in adapter_weights.items() if k not in invalid_components} adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names adapter_weights = copy.deepcopy(adapter_weights) diff --git a/tests/lora/utils.py b/tests/lora/utils.py index 1a7026d950b3..71d4dec40591 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -1148,12 +1148,14 @@ def test_multiple_wrong_adapter_name_raises_error(self): ) scale_with_wrong_components = {"foo": 0.0, "bar": 0.0, "tik": 0.0} - with self.assertRaises(ValueError) as err_context: + logger = logging.get_logger("diffusers.loaders.lora_base") + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: pipe.set_adapters("adapter-1", adapter_weights=scale_with_wrong_components) wrong_components = sorted(set(scale_with_wrong_components.keys())) - msg = f"The following components in `adapter_weights` are not part of the pipeline: {wrong_components}" - self.assertTrue(msg in str(err_context.exception)) + msg = f"The following components in `adapter_weights` are not part of the pipeline: {wrong_components}. " + self.assertTrue(msg in str(cap_logger.out)) # test this works. pipe.set_adapters("adapter-1")