diff --git a/tests/models/tvp/test_image_processing_tvp.py b/tests/models/tvp/test_image_processing_tvp.py index 0b5d4e3bcb99..28581290e9d1 100644 --- a/tests/models/tvp/test_image_processing_tvp.py +++ b/tests/models/tvp/test_image_processing_tvp.py @@ -19,7 +19,7 @@ import numpy as np from transformers.image_transforms import PaddingMode -from transformers.testing_utils import is_flaky, require_torch, require_vision +from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs @@ -349,16 +349,16 @@ def test_call_pytorch(self): @require_vision @require_torch - @is_flaky( - description="FIXME: @yoni probably because of an extra 'time' dimension and since image processors don't handle it well?" + @unittest.skip( + reason="FIXME: @yoni probably because of an extra 'time' dimension and since image processors don't handle it well?" ) def test_slow_fast_equivalence(self): super().test_slow_fast_equivalence() @require_vision @require_torch - @is_flaky( - description="FIXME: @yoni probably because of an extra 'time' dimension and since image processors don't handle it well?" + @unittest.skip( + reason="FIXME: @yoni probably because of an extra 'time' dimension and since image processors don't handle it well?" ) def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: