Skip to content

Commit 8049117

Browse files
committed
update
1 parent 1d686ba commit 8049117

File tree

1 file changed

+19
-17
lines changed

1 file changed

+19
-17
lines changed

tests/lora/test_lora_layers_peft.py

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
floats_tensor,
4747
load_image,
4848
nightly,
49+
numpy_cosine_similarity_distance,
4950
require_peft_backend,
5051
require_torch_gpu,
5152
slow,
@@ -1753,7 +1754,8 @@ def test_sdxl_lcm_lora(self):
17531754
image_np = pipe.image_processor.pil_to_numpy(image)
17541755
expected_image_np = pipe.image_processor.pil_to_numpy(expected_image)
17551756

1756-
self.assertTrue(np.allclose(image_np, expected_image_np, atol=1e-2))
1757+
max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten())
1758+
assert max_diff < 1e-4
17571759

17581760
pipe.unload_lora_weights()
17591761

@@ -1780,7 +1782,8 @@ def test_sdv1_5_lcm_lora(self):
17801782
image_np = pipe.image_processor.pil_to_numpy(image)
17811783
expected_image_np = pipe.image_processor.pil_to_numpy(expected_image)
17821784

1783-
self.assertTrue(np.allclose(image_np, expected_image_np, atol=1e-2))
1785+
max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten())
1786+
assert max_diff < 1e-4
17841787

17851788
pipe.unload_lora_weights()
17861789

@@ -1816,7 +1819,8 @@ def test_sdv1_5_lcm_lora_img2img(self):
18161819
image_np = pipe.image_processor.pil_to_numpy(image)
18171820
expected_image_np = pipe.image_processor.pil_to_numpy(expected_image)
18181821

1819-
self.assertTrue(np.allclose(image_np, expected_image_np, atol=1e-2))
1822+
max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten())
1823+
assert max_diff < 1e-4
18201824

18211825
pipe.unload_lora_weights()
18221826

@@ -1849,7 +1853,7 @@ def test_sdxl_1_0_lora_fusion(self):
18491853
release_memory(pipe)
18501854

18511855
def test_sdxl_1_0_lora_unfusion(self):
1852-
generator = torch.Generator().manual_seed(0)
1856+
generator = torch.Generator("cpu").manual_seed(0)
18531857

18541858
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
18551859
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
@@ -1860,16 +1864,18 @@ def test_sdxl_1_0_lora_unfusion(self):
18601864
pipe.enable_model_cpu_offload()
18611865

18621866
images = pipe(
1863-
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
1867+
"masterpiece, best quality, mountain", output_type="pil", generator=generator, num_inference_steps=3
18641868
).images
1865-
images_with_fusion = images[0, -3:, -3:, -1].flatten()
1869+
images[0].save("fused.png")
1870+
images_with_fusion = images.flatten()
18661871

18671872
pipe.unfuse_lora()
1868-
generator = torch.Generator().manual_seed(0)
1873+
generator = torch.Generator("cpu").manual_seed(0)
18691874
images = pipe(
1870-
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
1875+
"masterpiece, best quality, mountain", output_type="pil", generator=generator, num_inference_steps=3
18711876
).images
1872-
images_without_fusion = images[0, -3:, -3:, -1].flatten()
1877+
images[0].save("unfused.png")
1878+
images_without_fusion = images.flatten()
18731879

18741880
self.assertTrue(np.allclose(images_with_fusion, images_without_fusion, atol=1e-3))
18751881
release_memory(pipe)
@@ -1913,10 +1919,8 @@ def test_sdxl_1_0_lora_fusion_efficiency(self):
19131919
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
19141920
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
19151921

1916-
pipe = DiffusionPipeline.from_pretrained(
1917-
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16
1918-
)
1919-
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.bfloat16)
1922+
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
1923+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16)
19201924
pipe.enable_model_cpu_offload()
19211925

19221926
start_time = time.time()
@@ -1929,10 +1933,8 @@ def test_sdxl_1_0_lora_fusion_efficiency(self):
19291933

19301934
del pipe
19311935

1932-
pipe = DiffusionPipeline.from_pretrained(
1933-
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16
1934-
)
1935-
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.bfloat16)
1936+
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
1937+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16)
19361938
pipe.fuse_lora()
19371939
# We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being
19381940
# silently deleted - otherwise this will CPU OOM

0 commit comments

Comments
 (0)