From 78092b60ac4c7174e3ddfd77c32da855dca8cd89 Mon Sep 17 00:00:00 2001 From: Hardik Sharma Date: Wed, 26 Nov 2025 08:17:23 -0800 Subject: [PATCH] Fix implicit float-to-double promotion. (#15957) Summary: as titled Reviewed By: lucylq Differential Revision: D87749870 --- kernels/portable/cpu/scalar_utils.h | 4 ++-- kernels/portable/cpu/util/distance_util.h | 2 +- kernels/portable/cpu/util/math_util.h | 3 ++- runtime/core/exec_aten/testing_util/tensor_util.cpp | 5 +++-- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/kernels/portable/cpu/scalar_utils.h b/kernels/portable/cpu/scalar_utils.h index 312a663c0e1..18cc1f467df 100644 --- a/kernels/portable/cpu/scalar_utils.h +++ b/kernels/portable/cpu/scalar_utils.h @@ -221,8 +221,8 @@ bool extract_scalar(Scalar scalar, FLOAT_T* out_val) { // be represented when FLOAT_T == float. float can, however, represent // infinite and NaN values. if (std::isfinite(val) && - (val < std::numeric_limits::lowest() || - val > std::numeric_limits::max())) { + (val < static_cast(std::numeric_limits::lowest()) || + val > static_cast(std::numeric_limits::max()))) { // PyTorch's implementation of clamp() raises an exception if the min/max // values cannot be represented as the dtype, so we should fail too. return false; diff --git a/kernels/portable/cpu/util/distance_util.h b/kernels/portable/cpu/util/distance_util.h index 05406e35489..a54e02dab28 100644 --- a/kernels/portable/cpu/util/distance_util.h +++ b/kernels/portable/cpu/util/distance_util.h @@ -116,7 +116,7 @@ void pdist(const Tensor& in, Tensor& out, double p) { pdist>(in, out, p); } else if (p == 2.0) { pdist>(in, out, p); - } else if (p == INFINITY) { + } else if (p == static_cast(INFINITY)) { pdist>(in, out, p); } else { pdist>(in, out, p); diff --git a/kernels/portable/cpu/util/math_util.h b/kernels/portable/cpu/util/math_util.h index a3a64997a5f..434c51dcbae 100644 --- a/kernels/portable/cpu/util/math_util.h +++ b/kernels/portable/cpu/util/math_util.h @@ -47,7 +47,8 @@ template < type = true> FLOAT_T floor_divide(FLOAT_T a, FLOAT_T b) { if (b == 0) { - return std::signbit(a) ? -INFINITY : INFINITY; + return std::signbit(a) ? static_cast(-INFINITY) + : static_cast(INFINITY); } const auto mod = std::fmod(a, b); auto div = (a - mod) / b; diff --git a/runtime/core/exec_aten/testing_util/tensor_util.cpp b/runtime/core/exec_aten/testing_util/tensor_util.cpp index 218a64cf9dd..3728de49a5c 100644 --- a/runtime/core/exec_aten/testing_util/tensor_util.cpp +++ b/runtime/core/exec_aten/testing_util/tensor_util.cpp @@ -54,8 +54,9 @@ bool element_is_close(const T a, const T b, double rtol, double atol) { return false; } } else { - auto allowed_error = atol + std::abs(rtol * b); - auto actual_error = std::abs(a - b); + const double allowed_error = + atol + std::abs(rtol * static_cast(b)); + const double actual_error = static_cast(std::abs(a - b)); if (!std::isfinite(actual_error) || actual_error > allowed_error) { return false; }