Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion kernels/portable/cpu/op_constant_pad_nd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,10 @@ Tensor& constant_pad_nd_out(
ScalarType in_type = in.scalar_type();

ET_SWITCH_REALHBBF16_TYPES(in_type, ctx, "constant_pad_nd.out", CTYPE, [&]() {
const CTYPE value_casted = utils::scalar_to<CTYPE>(value);
auto opt_value_casted =
utils::internal::check_overflow_scalar_cast<CTYPE>(value);
ET_KERNEL_CHECK(ctx, opt_value_casted.has_value(), InvalidArgument, );
auto value_casted = opt_value_casted.value();
constant_pad_nd_out_impl<CTYPE>(in, pad, value_casted, out);
});

Expand Down
5 changes: 4 additions & 1 deletion kernels/portable/cpu/op_full.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ Tensor& full_out(
constexpr auto name = "full.out";

ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
CTYPE_OUT val_casted = utils::scalar_to<CTYPE_OUT>(fill_value);
auto opt_val_casted =
utils::internal::check_overflow_scalar_cast<CTYPE_OUT>(fill_value);
ET_KERNEL_CHECK(ctx, opt_val_casted.has_value(), InvalidArgument, );
auto val_casted = opt_val_casted.value();
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
for (const auto i : c10::irange(out.numel())) {
data_out[i] = val_casted;
Expand Down
22 changes: 9 additions & 13 deletions kernels/portable/cpu/op_full_like.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,23 +48,19 @@ Tensor& full_like_out(
out,
"Failed to resize output tensor.");

ScalarType val_type = utils::get_scalar_dtype(fill_value);
ScalarType out_type = out.scalar_type();

constexpr auto name = "scalar_tensor.out";

ET_SWITCH_REALB_TYPES(val_type, ctx, name, CTYPE_VAL, [&] {
CTYPE_VAL val;
ET_KERNEL_CHECK(
ctx, utils::extract_scalar(fill_value, &val), InvalidArgument, );

ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
CTYPE_OUT val_casted = static_cast<CTYPE_OUT>(val);
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
for (const auto i : c10::irange(out.numel())) {
data_out[i] = val_casted;
}
});
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE_OUT, [&] {
auto opt_val_casted =
utils::internal::check_overflow_scalar_cast<CTYPE_OUT>(fill_value);
ET_KERNEL_CHECK(ctx, opt_val_casted.has_value(), InvalidArgument, );
auto val_casted = opt_val_casted.value();
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
for (const auto i : c10::irange(out.numel())) {
data_out[i] = val_casted;
}
});

return out;
Expand Down
11 changes: 9 additions & 2 deletions kernels/portable/cpu/op_hardtanh.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,15 @@ Tensor& hardtanh_out(
ET_KERNEL_CHECK(ctx, in_type == out_type, InvalidArgument, out);

ET_SWITCH_REALHBF16_TYPES(in_type, ctx, "hardtanh.out", CTYPE, [&]() {
const CTYPE min_casted = utils::scalar_to<CTYPE>(min);
const CTYPE max_casted = utils::scalar_to<CTYPE>(max);
auto opt_min_casted =
utils::internal::check_overflow_scalar_cast<CTYPE>(min);
ET_KERNEL_CHECK(ctx, opt_min_casted.has_value(), InvalidArgument, );
auto min_casted = opt_min_casted.value();

auto opt_max_casted =
utils::internal::check_overflow_scalar_cast<CTYPE>(max);
ET_KERNEL_CHECK(ctx, opt_max_casted.has_value(), InvalidArgument, );
auto max_casted = opt_max_casted.value();

apply_unary_map_fn(
[min_casted, max_casted](const CTYPE val_in) {
Expand Down
6 changes: 5 additions & 1 deletion kernels/portable/cpu/op_leaky_relu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,11 @@ Tensor& leaky_relu_out(
ET_KERNEL_CHECK(ctx, in_type == out_type, InvalidArgument, out);

ET_SWITCH_FLOATHBF16_TYPES(in_type, ctx, "leaky_relu.out", CTYPE, [&]() {
const CTYPE negative_slope_casted = utils::scalar_to<CTYPE>(negative_slope);
auto opt_negative_slope_casted =
utils::internal::check_overflow_scalar_cast<CTYPE>(negative_slope);
ET_KERNEL_CHECK(
ctx, opt_negative_slope_casted.has_value(), InvalidArgument, );
auto negative_slope_casted = opt_negative_slope_casted.value();

apply_unary_map_fn(
[negative_slope_casted](const CTYPE val_in) {
Expand Down
16 changes: 5 additions & 11 deletions kernels/portable/cpu/op_scalar_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,11 @@ scalar_tensor_out(KernelRuntimeContext& ctx, const Scalar& s, Tensor& out) {

constexpr auto name = "scalar_tensor.out";

if (s.isFloatingPoint() &&
executorch::runtime::isIntegralType(out_type, false)) {
ET_SWITCH_INT_TYPES(out_type, ctx, name, CTYPE, [&]() {
out.mutable_data_ptr<CTYPE>()[0] =
static_cast<CTYPE>(utils::scalar_to<int64_t>(s));
});
} else {
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE, [&]() {
out.mutable_data_ptr<CTYPE>()[0] = utils::scalar_to<CTYPE>(s);
});
}
ET_SWITCH_REALHBBF16_TYPES(out_type, ctx, name, CTYPE, [&]() {
auto opt_val_casted = utils::internal::check_overflow_scalar_cast<CTYPE>(s);
ET_KERNEL_CHECK(ctx, opt_val_casted.has_value(), InvalidArgument, );
out.mutable_data_ptr<CTYPE>()[0] = opt_val_casted.value();
});

return out;
}
Expand Down
4 changes: 3 additions & 1 deletion kernels/portable/cpu/op_scatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,9 @@ Tensor& scatter_value_out(
constexpr auto name = "scatter.value_out";

ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, name, CTYPE, [&]() {
const CTYPE val = utils::scalar_to<CTYPE>(value);
auto opt_val = utils::internal::check_overflow_scalar_cast<CTYPE>(value);
ET_KERNEL_CHECK(ctx, opt_val.has_value(), InvalidArgument, );
auto val = opt_val.value();
scatter_value_helper<CTYPE>(in, dim, index, val, out);
});

Expand Down
18 changes: 18 additions & 0 deletions kernels/test/op_constant_pad_nd_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
#include <executorch/kernels/test/ScalarOverflowTestMacros.h>
#include <executorch/kernels/test/TestUtil.h>
#include <executorch/kernels/test/supported_features.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
Expand Down Expand Up @@ -347,6 +348,21 @@ class OpConstantPadNDOutTest : public OperatorTest {
op_constant_pad_nd_out(self, padding_ref, 7, out);
EXPECT_TENSOR_CLOSE(out, expected);
}

template <ScalarType DTYPE>
void expect_bad_scalar_value_dies(const Scalar& bad_value) {
TensorFactory<DTYPE> tf;
const std::vector<int32_t> sizes = {2, 2};
const std::vector<int32_t> sizes_out = {2, 4};
const std::vector<int64_t> padding = {1, 1};

IntArrayRef padding_ref = IntArrayRef(padding.data(), padding.size());
Tensor self = tf.ones(sizes);
Tensor out = tf.zeros(sizes_out);

ET_EXPECT_KERNEL_FAILURE(
context_, op_constant_pad_nd_out(self, padding_ref, bad_value, out));
}
};

TEST_F(OpConstantPadNDOutTest, TestPadDim2) {
Expand Down Expand Up @@ -465,3 +481,5 @@ TEST_F(OpConstantPadNDOutTest, IncorrectOutputShapeFail) {
ET_EXPECT_KERNEL_FAILURE(
context_, op_constant_pad_nd_out(self, padding_ref, 0, out));
}

GENERATE_SCALAR_OVERFLOW_TESTS(OpConstantPadNDOutTest)
15 changes: 15 additions & 0 deletions kernels/test/op_full_like_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
#include <executorch/kernels/test/ScalarOverflowTestMacros.h>
#include <executorch/kernels/test/TestUtil.h>
#include <executorch/kernels/test/supported_features.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
Expand Down Expand Up @@ -65,6 +66,18 @@ class OpFullLikeTest : public OperatorTest {
ET_EXPECT_KERNEL_FAILURE(
context_, op_full_like_out(in, value, memory_format, out));
}

template <ScalarType DTYPE>
void expect_bad_scalar_value_dies(const Scalar& bad_value) {
TensorFactory<DTYPE> tf;
const std::vector<int32_t> sizes = {2, 2};
Tensor in = tf.zeros(sizes);
Tensor out = tf.zeros(sizes);
optional<MemoryFormat> memory_format;

ET_EXPECT_KERNEL_FAILURE(
context_, op_full_like_out(in, bad_value, memory_format, out));
}
};

template <>
Expand Down Expand Up @@ -209,3 +222,5 @@ TEST_F(OpFullLikeTest, HalfSupport) {
op_full_like_out(in, INFINITY, memory_format, out);
EXPECT_TENSOR_CLOSE(out, tf.full({2, 3}, INFINITY));
}

GENERATE_SCALAR_OVERFLOW_TESTS(OpFullLikeTest)
27 changes: 13 additions & 14 deletions kernels/test/op_full_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
#include <executorch/kernels/test/ScalarOverflowTestMacros.h>
#include <executorch/kernels/test/TestUtil.h>
#include <executorch/kernels/test/supported_features.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
Expand Down Expand Up @@ -59,6 +60,17 @@ class OpFullOutTest : public OperatorTest {
op_full_out(aref, 1.0, out);
EXPECT_TENSOR_EQ(out, tf.ones(size_int32_t));
}

template <ScalarType DTYPE>
void expect_bad_scalar_value_dies(const Scalar& bad_value) {
TensorFactory<DTYPE> tf;
std::vector<int32_t> sizes = {2, 2};
std::vector<int64_t> sizes_int64_t(sizes.begin(), sizes.end());
auto aref = IntArrayRef(sizes_int64_t.data(), sizes_int64_t.size());
Tensor out = tf.zeros(sizes);

ET_EXPECT_KERNEL_FAILURE(context_, op_full_out(aref, bad_value, out));
}
};

#define GENERATE_TEST(_, DTYPE) \
Expand All @@ -72,20 +84,7 @@ class OpFullOutTest : public OperatorTest {

ET_FORALL_REALHBF16_TYPES(GENERATE_TEST)

TEST_F(OpFullOutTest, ValueOverflow) {
if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
GTEST_SKIP() << "ATen kernel doesn't handle overflow";
}
TensorFactory<ScalarType::Byte> tf;

std::vector<int64_t> sizes_int64_t_vec = {2, 3};
std::vector<int32_t> sizes_in32_t_vec = {2, 3};
auto sizes = IntArrayRef(sizes_int64_t_vec.data(), sizes_int64_t_vec.size());

Tensor out = tf.zeros(sizes_in32_t_vec);

op_full_out(sizes, 1000, out);
}
GENERATE_SCALAR_OVERFLOW_TESTS(OpFullOutTest)

TEST_F(OpFullOutTest, HalfSupport) {
TensorFactory<ScalarType::Half> tf;
Expand Down
18 changes: 18 additions & 0 deletions kernels/test/op_hardtanh_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
#include <executorch/kernels/test/ScalarOverflowTestMacros.h>
#include <executorch/kernels/test/TestUtil.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
Expand Down Expand Up @@ -51,10 +52,27 @@ class OpHardTanhTest : public OperatorTest {
EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, tf.make({2, 2}, {lower_bound, 0, 1, 2}));
}

template <ScalarType DTYPE>
void expect_bad_scalar_value_dies(const Scalar& bad_value) {
TensorFactory<DTYPE> tf;
Tensor in = tf.ones({2, 2});
Tensor out = tf.zeros({2, 2});

// Test overflow for min parameter (using valid max)
ET_EXPECT_KERNEL_FAILURE(
context_, op_hardtanh_out(in, bad_value, 1.0, out));

// Test overflow for max parameter (using valid min)
ET_EXPECT_KERNEL_FAILURE(
context_, op_hardtanh_out(in, -1.0, bad_value, out));
}
};

TEST_F(OpHardTanhTest, SanityCheck) {
#define TEST_ENTRY(ctype, dtype) test_dtype<ctype, ScalarType::dtype>();
ET_FORALL_REALHBF16_TYPES(TEST_ENTRY);
#undef TEST_ENTRY
}

GENERATE_SCALAR_OVERFLOW_TESTS(OpHardTanhTest)
19 changes: 19 additions & 0 deletions kernels/test/op_leaky_relu_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,29 @@ class OpLeakyReluTest : public OperatorTest {
EXPECT_TENSOR_EQ(out, ret);
EXPECT_TENSOR_EQ(out, tf.ones({2, 2}));
}

template <ScalarType DTYPE>
void expect_bad_scalar_value_dies(const Scalar& bad_value) {
TensorFactory<DTYPE> tf;
Tensor in = tf.ones({2, 2});
Tensor out = tf.zeros({2, 2});

ET_EXPECT_KERNEL_FAILURE(context_, op_leaky_relu_out(in, bad_value, out));
}
};

TEST_F(OpLeakyReluTest, SanityCheck) {
#define TEST_ENTRY(ctype, dtype) test_leaky_relu_dtype<ScalarType::dtype>();
ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
#undef TEST_ENTRY
}

TEST_F(OpLeakyReluTest, FloatTensorTooSmallScalarDies) {
/* Cannot be represented by a float. */
expect_bad_scalar_value_dies<ScalarType::Float>(-3.41e+38);
}

TEST_F(OpLeakyReluTest, FloatTensorTooLargeScalarDies) {
/* Cannot be represented by a float. */
expect_bad_scalar_value_dies<ScalarType::Float>(3.41e+38);
}
11 changes: 11 additions & 0 deletions kernels/test/op_scalar_tensor_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
#include <executorch/kernels/test/ScalarOverflowTestMacros.h>
#include <executorch/kernels/test/TestUtil.h>
#include <executorch/kernels/test/supported_features.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
Expand Down Expand Up @@ -71,6 +72,14 @@ class OpScalarTensorOutTest : public OperatorTest {

ET_EXPECT_KERNEL_FAILURE(context_, op_scalar_tensor_out(value, out));
}

template <ScalarType DTYPE>
void expect_bad_scalar_value_dies(const Scalar& bad_value) {
TensorFactory<DTYPE> tf;
Tensor out = tf.zeros({});

ET_EXPECT_KERNEL_FAILURE(context_, op_scalar_tensor_out(bad_value, out));
}
};

#define GENERATE_TEST_0D(ctype, dtype) \
Expand Down Expand Up @@ -131,3 +140,5 @@ TEST_F(OpScalarTensorOutTest, HalfSupport) {
op_scalar_tensor_out(INFINITY, out);
EXPECT_TENSOR_CLOSE(out, tf.make({}, {INFINITY}));
}

GENERATE_SCALAR_OVERFLOW_TESTS(OpScalarTensorOutTest)
16 changes: 16 additions & 0 deletions kernels/test/op_scatter_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/

#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
#include <executorch/kernels/test/ScalarOverflowTestMacros.h>
#include <executorch/kernels/test/TestUtil.h>
#include <executorch/kernels/test/supported_features.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
Expand Down Expand Up @@ -364,6 +365,19 @@ class OpScatterValueOutTest : public OperatorTest {
op_scatter_value_out(input, 2, index, value, out);
EXPECT_TENSOR_EQ(out, expected);
}

template <ScalarType DTYPE>
void expect_bad_scalar_value_dies(const Scalar& bad_value) {
TensorFactory<DTYPE> tf;
TensorFactory<ScalarType::Long> tf_index;

Tensor self = tf.ones({2, 2});
Tensor index = tf_index.zeros({2, 2});
Tensor out = tf.zeros({2, 2});

ET_EXPECT_KERNEL_FAILURE(
context_, op_scatter_value_out(self, 0, index, bad_value, out));
}
};

TEST_F(OpScatterSrcOutTest, AllValidInputOutputSupport) {
Expand Down Expand Up @@ -652,3 +666,5 @@ TEST_F(OpScatterSrcOutTest, InvalidOneDimInputAndZeroDimIndex) {
ET_EXPECT_KERNEL_FAILURE(
context_, op_scatter_src_out(self, 0, index, src, out));
}

GENERATE_SCALAR_OVERFLOW_TESTS(OpScatterValueOutTest)
Loading