diff --git a/test/test_models.py b/test/test_models.py index 29b57c60cca..e9eeed7c196 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -233,6 +233,8 @@ def _check_input_backprop(model, inputs): "keypointrcnn_resnet50_fpn", ) +autocast_custom_prec = {"fasterrcnn_resnet50_fpn": 0.012} if platform.system() == "Windows" else {} + # The tests for the following quantized models are flaky possibly due to inconsistent # rounding errors in different platforms. For this reason the input/output consistency # tests under test_quantized_classification_model will be skipped for the following models. @@ -738,7 +740,7 @@ def test_detection_model(model_fn, dev): out = model(model_input) assert model_input[0] is x - def check_out(out): + def check_out(out, prec=0.01): assert len(out) == 1 def compact(tensor): @@ -767,7 +769,6 @@ def compute_mean_std(tensor): return {"mean": mean, "std": std} output = map_nested_tensor_object(out, tensor_map_fn=compact) - prec = 0.01 try: # We first try to assert the entire output if possible. This is not # only the best way to assert results but also handles the cases @@ -800,7 +801,7 @@ def compute_mean_std(tensor): out = model(model_input) # See autocast_flaky_numerics comment at top of file. if model_name not in autocast_flaky_numerics: - full_validation &= check_out(out) + full_validation &= check_out(out, autocast_custom_prec.get(model_name, 0.01)) if not full_validation: msg = (