diff --git a/tests/models/tvp/test_image_processing_tvp.py b/tests/models/tvp/test_image_processing_tvp.py index d2f05f4473aa..0b5d4e3bcb99 100644 --- a/tests/models/tvp/test_image_processing_tvp.py +++ b/tests/models/tvp/test_image_processing_tvp.py @@ -19,7 +19,7 @@ import numpy as np from transformers.image_transforms import PaddingMode -from transformers.testing_utils import require_torch, require_vision +from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs @@ -349,6 +349,17 @@ def test_call_pytorch(self): @require_vision @require_torch + @is_flaky( + description="FIXME: @yoni probably because of an extra 'time' dimension and since image processors don't handle it well?" + ) + def test_slow_fast_equivalence(self): + super().test_slow_fast_equivalence() + + @require_vision + @require_torch + @is_flaky( + description="FIXME: @yoni probably because of an extra 'time' dimension and since image processors don't handle it well?" + ) def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test")