Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,3 +299,6 @@ def test_controlnet_sdxl_guess(self):
# TODO(Patrick, Sayak) - skip for now as this requires more refiner tests
def test_save_load_optional_components(self):
pass

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
9 changes: 9 additions & 0 deletions tests/pipelines/kandinsky/test_kandinsky_combined.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,9 @@ def test_offloads(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)

def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)

Expand Down Expand Up @@ -236,6 +239,9 @@ def test_offloads(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)

def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)

Expand Down Expand Up @@ -339,5 +345,8 @@ def test_offloads(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)

def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)
3 changes: 3 additions & 0 deletions tests/pipelines/kandinsky/test_kandinsky_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,9 @@ def test_offloads(self):
assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)


@nightly
@require_torch_gpu
Expand Down
3 changes: 3 additions & 0 deletions tests/pipelines/kandinsky_v22/test_kandinsky.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,9 @@ def test_kandinsky(self):
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)


@slow
@require_torch_gpu
Expand Down
9 changes: 9 additions & 0 deletions tests/pipelines/kandinsky_v22/test_kandinsky_combined.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,9 @@ def test_offloads(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)

def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)

Expand Down Expand Up @@ -243,6 +246,9 @@ def test_offloads(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)

def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)

Expand Down Expand Up @@ -349,6 +355,9 @@ def test_offloads(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1e-2)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)

def test_dict_tuple_outputs_equivalent(self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4)

Expand Down
3 changes: 3 additions & 0 deletions tests/pipelines/kandinsky_v22/test_kandinsky_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,9 @@ def test_kandinsky_controlnet(self):
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)


@nightly
@require_torch_gpu
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,9 @@ def test_kandinsky_controlnet_img2img(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=1.75e-3)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=2e-1)


@slow
@require_torch_gpu
Expand Down
3 changes: 3 additions & 0 deletions tests/pipelines/kandinsky_v22/test_kandinsky_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,9 @@ def test_kandinsky_img2img(self):
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=2e-1)


@slow
@require_torch_gpu
Expand Down
3 changes: 3 additions & 0 deletions tests/pipelines/kandinsky_v22/test_kandinsky_inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,9 @@ def test_kandinsky_inpaint(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)

def test_model_cpu_offload_forward_pass(self):
super().test_inference_batch_single_identical(expected_max_diff=5e-4)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,9 @@ def test_attention_slicing_forward_pass(self):
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)


@slow
@require_torch_gpu
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,9 @@ def test_karras_schedulers_shape(self):

assert check_same_shape(outputs)

def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)


@require_torch_gpu
@slow
Expand Down
17 changes: 14 additions & 3 deletions tests/pipelines/test_pipelines_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ def test_components_function(self):
self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))

@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
def test_float16_inference(self, expected_max_diff=1e-2):
def test_float16_inference(self, expected_max_diff=5e-2):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
Expand All @@ -563,8 +563,19 @@ def test_float16_inference(self, expected_max_diff=1e-2):
pipe_fp16.to(torch_device, torch.float16)
pipe_fp16.set_progress_bar_config(disable=None)

output = pipe(**self.get_dummy_inputs(torch_device))[0]
output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0]
inputs = self.get_dummy_inputs(torch_device)
# Reset generator in case it is used inside dummy inputs
if "generator" in inputs:
inputs["generator"] = self.get_generator(0)

output = pipe(**inputs)[0]

fp16_inputs = self.get_dummy_inputs(torch_device)
# Reset generator in case it is used inside dummy inputs
if "generator" in fp16_inputs:
fp16_inputs["generator"] = self.get_generator(0)

output_fp16 = pipe_fp16(**fp16_inputs)[0]

max_diff = np.abs(to_np(output) - to_np(output_fp16)).max()
self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.")
Expand Down
4 changes: 4 additions & 0 deletions tests/pipelines/unclip/test_unclip.py
Original file line number Diff line number Diff line change
Expand Up @@ -418,6 +418,10 @@ def test_save_load_local(self):
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()

@unittest.skip("UnCLIP produces very large differences in fp16 vs fp32. Test is not useful.")
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1.0)
Copy link
Contributor

@patrickvonplaten patrickvonplaten Sep 19, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
super().test_float16_inference(expected_max_diff=1.0)
...

think values can not have a higher diff than 1.0 so this test is a bit moot I think. Let's maybe just skip it.



@nightly
class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase):
Expand Down
4 changes: 4 additions & 0 deletions tests/pipelines/unclip/test_unclip_image_variation.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,6 +491,10 @@ def test_save_load_local(self):
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()

@unittest.skip("UnCLIP produces very large difference in fp16 vs fp32. Test is not useful.")
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1.0)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
super().test_float16_inference(expected_max_diff=1.0)
...

think values can not have a higher diff than 1.0 so this test is a bit moot I think. Let's maybe just skip it.



@nightly
@require_torch_gpu
Expand Down