From c8d2a55273757c90989fde7c6f05e957aba9a238 Mon Sep 17 00:00:00 2001 From: Deng Weishi Date: Wed, 1 May 2024 17:45:11 +0000 Subject: [PATCH] Intel GPU: specify the tolerance for torchbench models (#125213) We encountered some model accuracy failures as the tolerance is critical. In general, we align with CUDA practice. This PR intends to adjust the tolerance for Torchbench models for training mode on Intel GPU devices and aligns with CUDA. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125213 Approved by: https://github.com/desertfire --- benchmarks/dynamo/torchbench.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/dynamo/torchbench.py b/benchmarks/dynamo/torchbench.py index a6b4edb3a4f0..274d04da1598 100755 --- a/benchmarks/dynamo/torchbench.py +++ b/benchmarks/dynamo/torchbench.py @@ -402,7 +402,7 @@ def get_tolerance_and_cosine_flag(self, is_training, current_device, name): if name in self._tolerance["higher_bf16"]: return 1e-2, cosine - if is_training and current_device == "cuda": + if is_training and (current_device == "cuda" or current_device == "xpu"): tolerance = 1e-3 if name in self._tolerance["cosine"]: cosine = True