diff --git a/kernels/portable/cpu/op_constant_pad_nd.cpp b/kernels/portable/cpu/op_constant_pad_nd.cpp index 6e643e1b945..be3962e018c 100644 --- a/kernels/portable/cpu/op_constant_pad_nd.cpp +++ b/kernels/portable/cpu/op_constant_pad_nd.cpp @@ -185,7 +185,10 @@ Tensor& constant_pad_nd_out( ScalarType in_type = in.scalar_type(); ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "constant_pad_nd.out", CTYPE, [&]() { - const CTYPE value_casted = utils::scalar_to(value); + auto opt_value_casted = + utils::internal::check_overflow_scalar_cast(value); + ET_KERNEL_CHECK(ctx, opt_value_casted.has_value(), InvalidArgument, ); + auto value_casted = opt_value_casted.value(); constant_pad_nd_out_impl(in, pad, value_casted, out); }); diff --git a/kernels/portable/cpu/op_full.cpp b/kernels/portable/cpu/op_full.cpp index 83ffcad45a6..b83637f2b91 100644 --- a/kernels/portable/cpu/op_full.cpp +++ b/kernels/portable/cpu/op_full.cpp @@ -37,7 +37,10 @@ Tensor& full_out( constexpr auto name = "full.out"; ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { - CTYPE_OUT val_casted = utils::scalar_to(fill_value); + auto opt_val_casted = + utils::internal::check_overflow_scalar_cast(fill_value); + ET_KERNEL_CHECK(ctx, opt_val_casted.has_value(), InvalidArgument, ); + auto val_casted = opt_val_casted.value(); auto data_out = out.mutable_data_ptr(); for (const auto i : c10::irange(out.numel())) { data_out[i] = val_casted; diff --git a/kernels/portable/cpu/op_full_like.cpp b/kernels/portable/cpu/op_full_like.cpp index 7671cd61ea9..0e263fb9c10 100644 --- a/kernels/portable/cpu/op_full_like.cpp +++ b/kernels/portable/cpu/op_full_like.cpp @@ -48,23 +48,19 @@ Tensor& full_like_out( out, "Failed to resize output tensor."); - ScalarType val_type = utils::get_scalar_dtype(fill_value); ScalarType out_type = out.scalar_type(); constexpr auto name = "scalar_tensor.out"; - ET_SWITCH_REALB_TYPES(val_type, ctx, name, CTYPE_VAL, [&] { - CTYPE_VAL val; - ET_KERNEL_CHECK( - ctx, utils::extract_scalar(fill_value, &val), InvalidArgument, ); - - ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { - CTYPE_OUT val_casted = static_cast(val); - auto data_out = out.mutable_data_ptr(); - for (const auto i : c10::irange(out.numel())) { - data_out[i] = val_casted; - } - }); + ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] { + auto opt_val_casted = + utils::internal::check_overflow_scalar_cast(fill_value); + ET_KERNEL_CHECK(ctx, opt_val_casted.has_value(), InvalidArgument, ); + auto val_casted = opt_val_casted.value(); + auto data_out = out.mutable_data_ptr(); + for (const auto i : c10::irange(out.numel())) { + data_out[i] = val_casted; + } }); return out; diff --git a/kernels/portable/cpu/op_hardtanh.cpp b/kernels/portable/cpu/op_hardtanh.cpp index 8ec73b07856..65411d5f6b0 100644 --- a/kernels/portable/cpu/op_hardtanh.cpp +++ b/kernels/portable/cpu/op_hardtanh.cpp @@ -45,8 +45,15 @@ Tensor& hardtanh_out( ET_KERNEL_CHECK(ctx, in_type == out_type, InvalidArgument, out); ET_SWITCH_REALHBF16_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() { - const CTYPE min_casted = utils::scalar_to(min); - const CTYPE max_casted = utils::scalar_to(max); + auto opt_min_casted = + utils::internal::check_overflow_scalar_cast(min); + ET_KERNEL_CHECK(ctx, opt_min_casted.has_value(), InvalidArgument, ); + auto min_casted = opt_min_casted.value(); + + auto opt_max_casted = + utils::internal::check_overflow_scalar_cast(max); + ET_KERNEL_CHECK(ctx, opt_max_casted.has_value(), InvalidArgument, ); + auto max_casted = opt_max_casted.value(); apply_unary_map_fn( [min_casted, max_casted](const CTYPE val_in) { diff --git a/kernels/portable/cpu/op_leaky_relu.cpp b/kernels/portable/cpu/op_leaky_relu.cpp index 11860c8d129..fa62a75974e 100644 --- a/kernels/portable/cpu/op_leaky_relu.cpp +++ b/kernels/portable/cpu/op_leaky_relu.cpp @@ -44,7 +44,11 @@ Tensor& leaky_relu_out( ET_KERNEL_CHECK(ctx, in_type == out_type, InvalidArgument, out); ET_SWITCH_FLOATHBF16_TYPES(in_type, ctx, "leaky_relu.out", CTYPE, [&]() { - const CTYPE negative_slope_casted = utils::scalar_to(negative_slope); + auto opt_negative_slope_casted = + utils::internal::check_overflow_scalar_cast(negative_slope); + ET_KERNEL_CHECK( + ctx, opt_negative_slope_casted.has_value(), InvalidArgument, ); + auto negative_slope_casted = opt_negative_slope_casted.value(); apply_unary_map_fn( [negative_slope_casted](const CTYPE val_in) { diff --git a/kernels/portable/cpu/op_scalar_tensor.cpp b/kernels/portable/cpu/op_scalar_tensor.cpp index e111a9ac869..bff4ecc318c 100644 --- a/kernels/portable/cpu/op_scalar_tensor.cpp +++ b/kernels/portable/cpu/op_scalar_tensor.cpp @@ -24,17 +24,11 @@ scalar_tensor_out(KernelRuntimeContext& ctx, const Scalar& s, Tensor& out) { constexpr auto name = "scalar_tensor.out"; - if (s.isFloatingPoint() && - executorch::runtime::isIntegralType(out_type, false)) { - ET_SWITCH_INT_TYPES(out_type, ctx, name, CTYPE, [&]() { - out.mutable_data_ptr()[0] = - static_cast(utils::scalar_to(s)); - }); - } else { - ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE, [&]() { - out.mutable_data_ptr()[0] = utils::scalar_to(s); - }); - } + ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE, [&]() { + auto opt_val_casted = utils::internal::check_overflow_scalar_cast(s); + ET_KERNEL_CHECK(ctx, opt_val_casted.has_value(), InvalidArgument, ); + out.mutable_data_ptr()[0] = opt_val_casted.value(); + }); return out; } diff --git a/kernels/portable/cpu/op_scatter.cpp b/kernels/portable/cpu/op_scatter.cpp index 7de0ec4d5f9..965afbb4b66 100644 --- a/kernels/portable/cpu/op_scatter.cpp +++ b/kernels/portable/cpu/op_scatter.cpp @@ -154,7 +154,9 @@ Tensor& scatter_value_out( constexpr auto name = "scatter.value_out"; ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() { - const CTYPE val = utils::scalar_to(value); + auto opt_val = utils::internal::check_overflow_scalar_cast(value); + ET_KERNEL_CHECK(ctx, opt_val.has_value(), InvalidArgument, ); + auto val = opt_val.value(); scatter_value_helper(in, dim, index, val, out); }); diff --git a/kernels/test/op_constant_pad_nd_test.cpp b/kernels/test/op_constant_pad_nd_test.cpp index 88bee1d0ad9..7f44068d9cb 100644 --- a/kernels/test/op_constant_pad_nd_test.cpp +++ b/kernels/test/op_constant_pad_nd_test.cpp @@ -7,6 +7,7 @@ */ #include // Declares the operator +#include #include #include #include @@ -347,6 +348,21 @@ class OpConstantPadNDOutTest : public OperatorTest { op_constant_pad_nd_out(self, padding_ref, 7, out); EXPECT_TENSOR_CLOSE(out, expected); } + + template + void expect_bad_scalar_value_dies(const Scalar& bad_value) { + TensorFactory tf; + const std::vector sizes = {2, 2}; + const std::vector sizes_out = {2, 4}; + const std::vector padding = {1, 1}; + + IntArrayRef padding_ref = IntArrayRef(padding.data(), padding.size()); + Tensor self = tf.ones(sizes); + Tensor out = tf.zeros(sizes_out); + + ET_EXPECT_KERNEL_FAILURE( + context_, op_constant_pad_nd_out(self, padding_ref, bad_value, out)); + } }; TEST_F(OpConstantPadNDOutTest, TestPadDim2) { @@ -465,3 +481,5 @@ TEST_F(OpConstantPadNDOutTest, IncorrectOutputShapeFail) { ET_EXPECT_KERNEL_FAILURE( context_, op_constant_pad_nd_out(self, padding_ref, 0, out)); } + +GENERATE_SCALAR_OVERFLOW_TESTS(OpConstantPadNDOutTest) diff --git a/kernels/test/op_full_like_test.cpp b/kernels/test/op_full_like_test.cpp index c0b9dcc4107..6e7692f5347 100644 --- a/kernels/test/op_full_like_test.cpp +++ b/kernels/test/op_full_like_test.cpp @@ -7,6 +7,7 @@ */ #include // Declares the operator +#include #include #include #include @@ -65,6 +66,18 @@ class OpFullLikeTest : public OperatorTest { ET_EXPECT_KERNEL_FAILURE( context_, op_full_like_out(in, value, memory_format, out)); } + + template + void expect_bad_scalar_value_dies(const Scalar& bad_value) { + TensorFactory tf; + const std::vector sizes = {2, 2}; + Tensor in = tf.zeros(sizes); + Tensor out = tf.zeros(sizes); + optional memory_format; + + ET_EXPECT_KERNEL_FAILURE( + context_, op_full_like_out(in, bad_value, memory_format, out)); + } }; template <> @@ -209,3 +222,5 @@ TEST_F(OpFullLikeTest, HalfSupport) { op_full_like_out(in, INFINITY, memory_format, out); EXPECT_TENSOR_CLOSE(out, tf.full({2, 3}, INFINITY)); } + +GENERATE_SCALAR_OVERFLOW_TESTS(OpFullLikeTest) diff --git a/kernels/test/op_full_test.cpp b/kernels/test/op_full_test.cpp index 93129679087..35115dc7ed6 100644 --- a/kernels/test/op_full_test.cpp +++ b/kernels/test/op_full_test.cpp @@ -7,6 +7,7 @@ */ #include // Declares the operator +#include #include #include #include @@ -59,6 +60,17 @@ class OpFullOutTest : public OperatorTest { op_full_out(aref, 1.0, out); EXPECT_TENSOR_EQ(out, tf.ones(size_int32_t)); } + + template + void expect_bad_scalar_value_dies(const Scalar& bad_value) { + TensorFactory tf; + std::vector sizes = {2, 2}; + std::vector sizes_int64_t(sizes.begin(), sizes.end()); + auto aref = IntArrayRef(sizes_int64_t.data(), sizes_int64_t.size()); + Tensor out = tf.zeros(sizes); + + ET_EXPECT_KERNEL_FAILURE(context_, op_full_out(aref, bad_value, out)); + } }; #define GENERATE_TEST(_, DTYPE) \ @@ -72,20 +84,7 @@ class OpFullOutTest : public OperatorTest { ET_FORALL_REALHBF16_TYPES(GENERATE_TEST) -TEST_F(OpFullOutTest, ValueOverflow) { - if (torch::executor::testing::SupportedFeatures::get()->is_aten) { - GTEST_SKIP() << "ATen kernel doesn't handle overflow"; - } - TensorFactory tf; - - std::vector sizes_int64_t_vec = {2, 3}; - std::vector sizes_in32_t_vec = {2, 3}; - auto sizes = IntArrayRef(sizes_int64_t_vec.data(), sizes_int64_t_vec.size()); - - Tensor out = tf.zeros(sizes_in32_t_vec); - - op_full_out(sizes, 1000, out); -} +GENERATE_SCALAR_OVERFLOW_TESTS(OpFullOutTest) TEST_F(OpFullOutTest, HalfSupport) { TensorFactory tf; diff --git a/kernels/test/op_hardtanh_test.cpp b/kernels/test/op_hardtanh_test.cpp index 72d09063d3e..38b0eeea40f 100644 --- a/kernels/test/op_hardtanh_test.cpp +++ b/kernels/test/op_hardtanh_test.cpp @@ -7,6 +7,7 @@ */ #include // Declares the operator +#include #include #include #include @@ -51,6 +52,21 @@ class OpHardTanhTest : public OperatorTest { EXPECT_TENSOR_EQ(out, ret); EXPECT_TENSOR_EQ(out, tf.make({2, 2}, {lower_bound, 0, 1, 2})); } + + template + void expect_bad_scalar_value_dies(const Scalar& bad_value) { + TensorFactory tf; + Tensor in = tf.ones({2, 2}); + Tensor out = tf.zeros({2, 2}); + + // Test overflow for min parameter (using valid max) + ET_EXPECT_KERNEL_FAILURE( + context_, op_hardtanh_out(in, bad_value, 1.0, out)); + + // Test overflow for max parameter (using valid min) + ET_EXPECT_KERNEL_FAILURE( + context_, op_hardtanh_out(in, -1.0, bad_value, out)); + } }; TEST_F(OpHardTanhTest, SanityCheck) { @@ -58,3 +74,5 @@ TEST_F(OpHardTanhTest, SanityCheck) { ET_FORALL_REALHBF16_TYPES(TEST_ENTRY); #undef TEST_ENTRY } + +GENERATE_SCALAR_OVERFLOW_TESTS(OpHardTanhTest) diff --git a/kernels/test/op_leaky_relu_test.cpp b/kernels/test/op_leaky_relu_test.cpp index 847c00652be..6b2e3083e2e 100644 --- a/kernels/test/op_leaky_relu_test.cpp +++ b/kernels/test/op_leaky_relu_test.cpp @@ -40,6 +40,15 @@ class OpLeakyReluTest : public OperatorTest { EXPECT_TENSOR_EQ(out, ret); EXPECT_TENSOR_EQ(out, tf.ones({2, 2})); } + + template + void expect_bad_scalar_value_dies(const Scalar& bad_value) { + TensorFactory tf; + Tensor in = tf.ones({2, 2}); + Tensor out = tf.zeros({2, 2}); + + ET_EXPECT_KERNEL_FAILURE(context_, op_leaky_relu_out(in, bad_value, out)); + } }; TEST_F(OpLeakyReluTest, SanityCheck) { @@ -47,3 +56,13 @@ TEST_F(OpLeakyReluTest, SanityCheck) { ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY); #undef TEST_ENTRY } + +TEST_F(OpLeakyReluTest, FloatTensorTooSmallScalarDies) { + /* Cannot be represented by a float. */ + expect_bad_scalar_value_dies(-3.41e+38); +} + +TEST_F(OpLeakyReluTest, FloatTensorTooLargeScalarDies) { + /* Cannot be represented by a float. */ + expect_bad_scalar_value_dies(3.41e+38); +} diff --git a/kernels/test/op_scalar_tensor_test.cpp b/kernels/test/op_scalar_tensor_test.cpp index db4816e8847..0be6f395eb0 100644 --- a/kernels/test/op_scalar_tensor_test.cpp +++ b/kernels/test/op_scalar_tensor_test.cpp @@ -7,6 +7,7 @@ */ #include // Declares the operator +#include #include #include #include @@ -71,6 +72,14 @@ class OpScalarTensorOutTest : public OperatorTest { ET_EXPECT_KERNEL_FAILURE(context_, op_scalar_tensor_out(value, out)); } + + template + void expect_bad_scalar_value_dies(const Scalar& bad_value) { + TensorFactory tf; + Tensor out = tf.zeros({}); + + ET_EXPECT_KERNEL_FAILURE(context_, op_scalar_tensor_out(bad_value, out)); + } }; #define GENERATE_TEST_0D(ctype, dtype) \ @@ -131,3 +140,5 @@ TEST_F(OpScalarTensorOutTest, HalfSupport) { op_scalar_tensor_out(INFINITY, out); EXPECT_TENSOR_CLOSE(out, tf.make({}, {INFINITY})); } + +GENERATE_SCALAR_OVERFLOW_TESTS(OpScalarTensorOutTest) diff --git a/kernels/test/op_scatter_test.cpp b/kernels/test/op_scatter_test.cpp index 0e55aadaeda..dac9017d188 100644 --- a/kernels/test/op_scatter_test.cpp +++ b/kernels/test/op_scatter_test.cpp @@ -7,6 +7,7 @@ */ #include // Declares the operator +#include #include #include #include @@ -364,6 +365,19 @@ class OpScatterValueOutTest : public OperatorTest { op_scatter_value_out(input, 2, index, value, out); EXPECT_TENSOR_EQ(out, expected); } + + template + void expect_bad_scalar_value_dies(const Scalar& bad_value) { + TensorFactory tf; + TensorFactory tf_index; + + Tensor self = tf.ones({2, 2}); + Tensor index = tf_index.zeros({2, 2}); + Tensor out = tf.zeros({2, 2}); + + ET_EXPECT_KERNEL_FAILURE( + context_, op_scatter_value_out(self, 0, index, bad_value, out)); + } }; TEST_F(OpScatterSrcOutTest, AllValidInputOutputSupport) { @@ -652,3 +666,5 @@ TEST_F(OpScatterSrcOutTest, InvalidOneDimInputAndZeroDimIndex) { ET_EXPECT_KERNEL_FAILURE( context_, op_scatter_src_out(self, 0, index, src, out)); } + +GENERATE_SCALAR_OVERFLOW_TESTS(OpScatterValueOutTest)