From eae1371983efd7af6013e74add64e38d8385e35b Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Tue, 19 Sep 2023 03:37:22 +0000 Subject: [PATCH 1/2] wip --- tests/pipelines/test_pipelines_common.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 13866f997054..f7d6145a912f 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -242,7 +242,7 @@ class PipelineTesterMixin: test_xformers_attention = True def get_generator(self, seed): - device = torch_device if torch_device != "mps" else "cpu" + device = "cpu" generator = torch.Generator(device).manual_seed(seed) return generator @@ -563,8 +563,20 @@ def test_float16_inference(self, expected_max_diff=1e-2): pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) - output = pipe(**self.get_dummy_inputs(torch_device))[0] - output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + # Reset generator in case it is used inside dummy inputs + if "generator" in inputs: + inputs["generator"] = self.get_generator(0) + + output = pipe(**inputs)[0] + + fp16_inputs = self.get_dummy_inputs(generator_device) + # Reset generator in case it is used inside dummy inputs + if "generator" in fp16_inputs: + fp16_inputs["generator"] = self.get_generator(0) + + output_fp16 = pipe_fp16(**fp16_inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") From f77b7a0f2700c9f2283af6ffd1e61f6ffb8d4599 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Tue, 19 Sep 2023 04:32:19 +0000 Subject: [PATCH 2/2] fix tests --- .../pipelines/controlnet/test_controlnet_inpaint_sdxl.py | 3 +++ tests/pipelines/kandinsky/test_kandinsky_combined.py | 9 +++++++++ tests/pipelines/kandinsky/test_kandinsky_inpaint.py | 3 +++ tests/pipelines/kandinsky_v22/test_kandinsky.py | 3 +++ tests/pipelines/kandinsky_v22/test_kandinsky_combined.py | 9 +++++++++ .../pipelines/kandinsky_v22/test_kandinsky_controlnet.py | 3 +++ .../kandinsky_v22/test_kandinsky_controlnet_img2img.py | 3 +++ tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py | 3 +++ tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py | 3 +++ .../stable_diffusion/test_stable_diffusion_img2img.py | 3 +++ .../test_stable_diffusion_latent_upscale.py | 3 +++ tests/pipelines/test_pipelines_common.py | 9 ++++----- tests/pipelines/unclip/test_unclip.py | 3 +++ tests/pipelines/unclip/test_unclip_image_variation.py | 3 +++ 14 files changed, 55 insertions(+), 5 deletions(-) diff --git a/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py b/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py index 81c789e71260..0ac8996fe0ef 100644 --- a/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py +++ b/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py @@ -299,3 +299,6 @@ def test_controlnet_sdxl_guess(self): # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) diff --git a/tests/pipelines/kandinsky/test_kandinsky_combined.py b/tests/pipelines/kandinsky/test_kandinsky_combined.py index d2079d67b60e..4315c6ae9c87 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -133,6 +133,9 @@ def test_offloads(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyImg2ImgCombinedPipeline @@ -233,6 +236,9 @@ def test_offloads(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyInpaintCombinedPipeline @@ -332,3 +338,6 @@ def test_offloads(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) diff --git a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py index 73c4eadadd96..699d051d9e67 100644 --- a/tests/pipelines/kandinsky/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -290,6 +290,9 @@ def test_offloads(self): assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + @slow @require_torch_gpu diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky.py b/tests/pipelines/kandinsky_v22/test_kandinsky.py index 4f18990c2c0a..65dbf0a708eb 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky.py @@ -215,6 +215,9 @@ def test_kandinsky(self): np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + @slow @require_torch_gpu diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py index ba8888ee1fa6..3fdd79267c77 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_combined.py @@ -137,6 +137,9 @@ def test_offloads(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgCombinedPipeline @@ -237,6 +240,9 @@ def test_offloads(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintCombinedPipeline @@ -336,3 +342,6 @@ def test_offloads(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py b/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py index 575d0aaaa767..2d22b6f6ede5 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py @@ -218,6 +218,9 @@ def test_kandinsky_controlnet(self): np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + @slow @require_torch_gpu diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py b/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py index 17394316ce7a..0c7b99580085 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_controlnet_img2img.py @@ -228,6 +228,9 @@ def test_kandinsky_controlnet_img2img(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1.75e-3) + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=2e-1) + @slow @require_torch_gpu diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py index 1454b061bc90..9a5b596def58 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py @@ -232,6 +232,9 @@ def test_kandinsky_img2img(self): np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=2e-1) + @slow @require_torch_gpu diff --git a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py index d7fcf670278d..0df6320de8b3 100644 --- a/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py +++ b/tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py @@ -240,6 +240,9 @@ def test_kandinsky_inpaint(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + @slow @require_torch_gpu diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index cf22fccd8232..27d2fe8ec098 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -254,6 +254,9 @@ def test_attention_slicing_forward_pass(self): def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + @slow @require_torch_gpu diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py index 75199b55ee21..f41a066522b5 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py @@ -235,6 +235,9 @@ def test_karras_schedulers_shape(self): assert check_same_shape(outputs) + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + @require_torch_gpu @slow diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index f7d6145a912f..157ff2b69931 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -242,7 +242,7 @@ class PipelineTesterMixin: test_xformers_attention = True def get_generator(self, seed): - device = "cpu" + device = torch_device if torch_device != "mps" else "cpu" generator = torch.Generator(device).manual_seed(seed) return generator @@ -544,7 +544,7 @@ def test_components_function(self): self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") - def test_float16_inference(self, expected_max_diff=1e-2): + def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): @@ -563,15 +563,14 @@ def test_float16_inference(self, expected_max_diff=1e-2): pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) - generator_device = "cpu" - inputs = self.get_dummy_inputs(generator_device) + inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in inputs: inputs["generator"] = self.get_generator(0) output = pipe(**inputs)[0] - fp16_inputs = self.get_dummy_inputs(generator_device) + fp16_inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is used inside dummy inputs if "generator" in fp16_inputs: fp16_inputs["generator"] = self.get_generator(0) diff --git a/tests/pipelines/unclip/test_unclip.py b/tests/pipelines/unclip/test_unclip.py index 111a8b918457..3c2af62dd0dc 100644 --- a/tests/pipelines/unclip/test_unclip.py +++ b/tests/pipelines/unclip/test_unclip.py @@ -419,6 +419,9 @@ def test_save_load_local(self): def test_save_load_optional_components(self): return super().test_save_load_optional_components() + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1.0) + @nightly class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase): diff --git a/tests/pipelines/unclip/test_unclip_image_variation.py b/tests/pipelines/unclip/test_unclip_image_variation.py index 6b4e2b0fc0b4..c769e090a912 100644 --- a/tests/pipelines/unclip/test_unclip_image_variation.py +++ b/tests/pipelines/unclip/test_unclip_image_variation.py @@ -491,6 +491,9 @@ def test_save_load_local(self): def test_save_load_optional_components(self): return super().test_save_load_optional_components() + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1.0) + @slow @require_torch_gpu