diff --git a/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp b/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp index bb0be9a4c1b..bd0b6e68445 100644 --- a/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp +++ b/kernels/portable/cpu/pattern/unary_ufunc_realhb_to_floath.cpp @@ -23,6 +23,8 @@ Tensor& unary_ufunc_realhb_to_floath( Tensor& out) { (void)ctx; + ET_KERNEL_CHECK(ctx, tensor_is_floating_type(out), InvalidArgument, out); + // Resize for dynamic shape ET_KERNEL_CHECK_MSG( ctx, diff --git a/kernels/portable/cpu/util/activation_ops_util.cpp b/kernels/portable/cpu/util/activation_ops_util.cpp index b697c49e04f..273f5d59595 100644 --- a/kernels/portable/cpu/util/activation_ops_util.cpp +++ b/kernels/portable/cpu/util/activation_ops_util.cpp @@ -15,6 +15,7 @@ namespace executor { bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); + ET_LOG_AND_RETURN_IF_FALSE(in.scalar_type() != ScalarType::Bool); ET_LOG_MSG_AND_RETURN_IF_FALSE( approximate == "tanh" || approximate == "none", "Invalid approximation format: %.*s for gelu", diff --git a/kernels/portable/cpu/util/broadcast_util.cpp b/kernels/portable/cpu/util/broadcast_util.cpp index 4173c1b0856..64ef2086ffd 100644 --- a/kernels/portable/cpu/util/broadcast_util.cpp +++ b/kernels/portable/cpu/util/broadcast_util.cpp @@ -198,7 +198,10 @@ Tensor broadcast_tensor( repeats[i] = 1; } } - repeat_tensor(broadcast_from, makeArrayRef(repeats, ndim), out); + + ET_CHECK( + repeat_tensor(broadcast_from, makeArrayRef(repeats, ndim), out) == + Error::Ok); free(repeats); diff --git a/kernels/portable/cpu/util/broadcast_util.h b/kernels/portable/cpu/util/broadcast_util.h index 6ca1cf7ee97..77f42c266ad 100644 --- a/kernels/portable/cpu/util/broadcast_util.h +++ b/kernels/portable/cpu/util/broadcast_util.h @@ -97,7 +97,7 @@ __ET_DEPRECATED exec_aten::Tensor broadcast_tensor( * @param[out] out_dim The dimension of the broadcasted target * tensor */ -[[nodiscard]] Error get_broadcast_target_size( +__ET_NODISCARD Error get_broadcast_target_size( const exec_aten::ArrayRef a_size, const exec_aten::ArrayRef b_size, Tensor::SizesType* out_sizes, @@ -115,7 +115,7 @@ __ET_DEPRECATED exec_aten::Tensor broadcast_tensor( * @param[out] out_dim The dimension of the broadcasted target * tensor */ -[[nodiscard]] Error get_broadcast_target_size( +__ET_NODISCARD Error get_broadcast_target_size( const Tensor& a, const Tensor& b, Tensor::SizesType* out_sizes, @@ -130,7 +130,7 @@ __ET_DEPRECATED exec_aten::Tensor broadcast_tensor( * @param[in] b The second tensor going to be broadcasted. * @param[out] out The output tensor that will be resized. */ -[[nodiscard]] inline Error +__ET_NODISCARD inline Error resize_to_broadcast_target_size(const Tensor& a, const Tensor& b, Tensor& out) { Tensor::SizesType expected_output_size[kTensorDimensionLimit]; size_t expected_output_dim = 0; @@ -156,7 +156,7 @@ resize_to_broadcast_target_size(const Tensor& a, const Tensor& b, Tensor& out) { * @param[in] c The third tensor going to be broadcasted. * @param[out] out The output tensor that will be resized. */ -[[nodiscard]] inline Error resize_to_broadcast_target_size( +__ET_NODISCARD inline Error resize_to_broadcast_target_size( const Tensor& a, const Tensor& b, const Tensor& c, diff --git a/kernels/portable/cpu/util/copy_ops_util.cpp b/kernels/portable/cpu/util/copy_ops_util.cpp index ae48dee0fb4..5b54cd6890d 100644 --- a/kernels/portable/cpu/util/copy_ops_util.cpp +++ b/kernels/portable/cpu/util/copy_ops_util.cpp @@ -114,6 +114,7 @@ bool check_cat_args( // Ensure dim is in range. ET_LOG_AND_RETURN_IF_FALSE( tensors[ref_i].numel() == 0 || tensors[ref_i].dim() > dim); + ET_LOG_AND_RETURN_IF_FALSE(dim >= 0); return true; } @@ -378,6 +379,7 @@ bool check_slice_copy_args( int64_t dim, int64_t step, Tensor& out) { + ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); ET_LOG_MSG_AND_RETURN_IF_FALSE( @@ -737,6 +739,8 @@ bool check_unsqueeze_copy_args( const Tensor input, int64_t dim, const Tensor out) { + ET_LOG_AND_RETURN_IF_FALSE(dim >= 0); + // The input and out shall share same dtype ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(input, out)); diff --git a/kernels/portable/cpu/util/kernel_ops_util.cpp b/kernels/portable/cpu/util/kernel_ops_util.cpp index 384b1859b22..fdbc5a0e532 100644 --- a/kernels/portable/cpu/util/kernel_ops_util.cpp +++ b/kernels/portable/cpu/util/kernel_ops_util.cpp @@ -462,6 +462,8 @@ bool check_slice_scatter_args( int64_t num_values, int64_t step, Tensor output) { + ET_LOG_AND_RETURN_IF_FALSE(input.dim() > 0); + // Check dim. The dim planed to be selected on shall exist in input ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, input.dim())); diff --git a/kernels/portable/cpu/util/repeat_util.cpp b/kernels/portable/cpu/util/repeat_util.cpp index bc721cd493c..9acb7ba088e 100644 --- a/kernels/portable/cpu/util/repeat_util.cpp +++ b/kernels/portable/cpu/util/repeat_util.cpp @@ -20,12 +20,12 @@ using Tensor = exec_aten::Tensor; namespace { -void check_repeat_args( +bool check_repeat_args( Tensor self, exec_aten::ArrayRef repeats, Tensor& out) { // Ensure the self tensors list is non-empty. - ET_CHECK_MSG( + ET_LOG_MSG_AND_RETURN_IF_FALSE( repeats.size() >= self.dim(), "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor"); @@ -34,11 +34,11 @@ void check_repeat_args( for (auto repeat : repeats) { all_non_negative = all_non_negative && (repeat >= 0); } - ET_CHECK_MSG( + ET_LOG_MSG_AND_RETURN_IF_FALSE( all_non_negative, "Trying to create tensor with negative dimension"); /// Check if out.size() is legal. - ET_CHECK_MSG( + ET_LOG_MSG_AND_RETURN_IF_FALSE( out.dim() == repeats.size(), "The dimension of out shall equal size of repeats, but now is %zd and %zd", out.dim(), @@ -47,12 +47,12 @@ void check_repeat_args( // Right now we only support the tensors whose dimension is no greater than // kTensorDimensionLimit. Only check out tensor because the number of // dimension of out tensor shall have more than or equal to self tensor - ET_CHECK_MSG( + ET_LOG_MSG_AND_RETURN_IF_FALSE( out.dim() <= kTensorDimensionLimit, "The dimension of input and output should not be larger than %zd", kTensorDimensionLimit); - ET_CHECK_SAME_DTYPE2(out, self); + ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(out, self)); // We pad one to the beginning of self.size() to make its length equal // repeats, and called it reformat_self_size. We then make point-to-point mul @@ -66,13 +66,15 @@ void check_repeat_args( reformat_self_size[out.dim() - 1 - i] = self.size(self.dim() - 1 - i); } for (size_t i = 0; i < repeats.size(); i++) { - ET_CHECK_MSG( + ET_LOG_MSG_AND_RETURN_IF_FALSE( reformat_self_size[i] * repeats[i] == out.size(i), "Expect out size at dimension %zu is %" PRId64 ", but now is %zd", i, reformat_self_size[i] * repeats[i], out.size(i)); } + + return true; } // Given the indices to a point in an n-D tensor, and the stride (in bytes) @@ -163,16 +165,19 @@ void repeat_internal( // TODO(gasoonjia): dynamic allocate array to support tensor dimension larger // than kTensorDimensionLimit. -Tensor& repeat_tensor( +Error repeat_tensor( const Tensor& self, exec_aten::ArrayRef repeats, Tensor& out) { - // Assert that the args are valid. - check_repeat_args(self, repeats, out); + // Verify that the args are valid. + ET_CHECK_OR_RETURN_ERROR( + check_repeat_args(self, repeats, out), + InvalidArgument, + "Repeat arguments are invalid."); // Returns out if out.numel == 0, nothing needs to be repeated. if (out.numel() == 0) { - return out; + return Error::Ok; } ssize_t element_size = out.element_size(); @@ -183,7 +188,7 @@ Tensor& repeat_tensor( const char* src = self.const_data_ptr(); char* dest = out.mutable_data_ptr(); memcpy(dest, src, element_size); - return out; + return Error::Ok; } // Treats zero-dim self as one-dim tensor with size {1}. @@ -274,7 +279,7 @@ Tensor& repeat_tensor( accum_offset *= out.size(i); } - return out; + return Error::Ok; } } // namespace executor diff --git a/kernels/portable/cpu/util/repeat_util.h b/kernels/portable/cpu/util/repeat_util.h index 68e72c8aa83..28f5cfa5556 100644 --- a/kernels/portable/cpu/util/repeat_util.h +++ b/kernels/portable/cpu/util/repeat_util.h @@ -20,9 +20,9 @@ namespace executor { * @param[in] The number of times to repeat this tensor along each dimension * @param[in] Output tensor to write to. * - * @returns Repeated tensor. + * @returns The status of the repeat operation. */ -exec_aten::Tensor& repeat_tensor( +Error repeat_tensor( const exec_aten::Tensor& in, exec_aten::ArrayRef repeats, exec_aten::Tensor& out); diff --git a/kernels/portable/cpu/util/targets.bzl b/kernels/portable/cpu/util/targets.bzl index 135b8af5af8..f7ca5bce920 100644 --- a/kernels/portable/cpu/util/targets.bzl +++ b/kernels/portable/cpu/util/targets.bzl @@ -27,6 +27,7 @@ def define_common_targets(): ], exported_headers = ["repeat_util.h"], deps = [ + "//executorch/runtime/kernel:kernel_includes", "//executorch/runtime/core/exec_aten/util:scalar_type_util", "//executorch/runtime/core/exec_aten/util:tensor_util", ], diff --git a/kernels/test/TestUtil.h b/kernels/test/TestUtil.h index a8ebc21c0f9..ed72dbc4128 100644 --- a/kernels/test/TestUtil.h +++ b/kernels/test/TestUtil.h @@ -13,6 +13,9 @@ #pragma once +#include +#include +#include #include #include @@ -21,16 +24,62 @@ * Ensure the kernel will fail when `_statement` is executed. * @param _statement Statement to execute. */ -#define ET_EXPECT_KERNEL_FAILURE(_statement) EXPECT_ANY_THROW(_statement) +#define ET_EXPECT_KERNEL_FAILURE(_context, _statement) \ + EXPECT_ANY_THROW(_statement) -#define ET_EXPECT_KERNEL_FAILURE_WITH_MSG(_statement, _matcher) \ +#define ET_EXPECT_KERNEL_FAILURE_WITH_MSG(_context, _statement, _matcher) \ EXPECT_ANY_THROW(_statement) #else -#define ET_EXPECT_KERNEL_FAILURE(_statement) ET_EXPECT_DEATH(_statement, "") +#define ET_EXPECT_KERNEL_FAILURE(_context, _statement) \ + do { \ + _statement; \ + expect_failure(); \ + if ((_context).failure_state() == torch::executor::Error::Ok) { \ + ET_LOG(Error, "Expected kernel failure but found success."); \ + ADD_FAILURE(); \ + } \ + } while (false) -#define ET_EXPECT_KERNEL_FAILURE_WITH_MSG(_statement, _matcher) \ - ET_EXPECT_DEATH(_statement, _matcher) +#define ET_EXPECT_KERNEL_FAILURE_WITH_MSG(_context, _statement, _msg) \ + do { \ + _statement; \ + expect_failure(); \ + if ((_context).failure_state() == torch::executor::Error::Ok) { \ + ET_LOG(Error, "Expected kernel failure but found success."); \ + ADD_FAILURE(); \ + } \ + } while (false) #endif // USE_ATEN_LIB + +/* + * Common test fixture for kernel / operator-level tests. Provides + * a runtime context object and verifies failure state post-execution. + */ +class OperatorTest : public ::testing::Test { + public: + OperatorTest() : expect_failure_(false) {} + + void SetUp() override { + torch::executor::runtime_init(); + } + + void TearDown() override { + // Validate error state. + if (!expect_failure_) { + EXPECT_EQ(context_.failure_state(), torch::executor::Error::Ok); + } else { + EXPECT_NE(context_.failure_state(), torch::executor::Error::Ok); + } + } + + void expect_failure() { + expect_failure_ = true; + } + + protected: + exec_aten::RuntimeContext context_; + bool expect_failure_; +}; diff --git a/kernels/test/targets.bzl b/kernels/test/targets.bzl index 789179c4cad..f110ec007b8 100644 --- a/kernels/test/targets.bzl +++ b/kernels/test/targets.bzl @@ -50,10 +50,12 @@ def define_common_targets(is_fbcode = False): fbcode_exported_deps = [ "//common/init:init", "//common/gtest:gtest", + "//executorch/runtime/kernel:kernel_includes", ], xplat_exported_deps = [ "//xplat/folly:init_init", "//third-party/googletest:gtest_main", + "//executorch/runtime/kernel:kernel_includes", ], ) diff --git a/kernels/test/util.bzl b/kernels/test/util.bzl index 0efeb497740..7a7da46d07a 100644 --- a/kernels/test/util.bzl +++ b/kernels/test/util.bzl @@ -51,6 +51,7 @@ def op_test(name, deps = [], aten_compatible = True, kernel_name = "portable", u deps = [ "//executorch/runtime/core/exec_aten:lib" + aten_suffix, "//executorch/runtime/core/exec_aten/testing_util:tensor_util" + aten_suffix, + "//executorch/runtime/kernel:kernel_includes" + aten_suffix, "//executorch/kernels/test:test_util" + aten_suffix, ] + generated_lib_and_op_deps + deps, ) @@ -84,6 +85,7 @@ def generated_op_test(name, op_impl_target, generated_lib_headers_target, suppor deps = [ "//executorch/runtime/core/exec_aten:lib", "//executorch/runtime/core/exec_aten/testing_util:tensor_util", + "//executorch/runtime/kernel:kernel_includes", "//executorch/kernels/test:test_util", op_impl_target, generated_lib_headers_target, diff --git a/runtime/core/exec_aten/util/scalar_type_util.h b/runtime/core/exec_aten/util/scalar_type_util.h index f831f826f54..c1917d1dd9e 100644 --- a/runtime/core/exec_aten/util/scalar_type_util.h +++ b/runtime/core/exec_aten/util/scalar_type_util.h @@ -360,6 +360,26 @@ inline bool isFloatingType(exec_aten::ScalarType t) { t == exec_aten::ScalarType::Half || t == exec_aten::ScalarType::BFloat16); } +inline bool isRealType(exec_aten::ScalarType t) { + return ( + t == exec_aten::ScalarType::Byte || t == exec_aten::ScalarType::Char || + t == exec_aten::ScalarType::Short || t == exec_aten::ScalarType::Int || + t == exec_aten::ScalarType::Long || t == exec_aten::ScalarType::Float || + t == exec_aten::ScalarType::Double); +} + +inline bool isRealHType(exec_aten::ScalarType t) { + return ( + t == exec_aten::ScalarType::Byte || t == exec_aten::ScalarType::Char || + t == exec_aten::ScalarType::Short || t == exec_aten::ScalarType::Int || + t == exec_aten::ScalarType::Long || t == exec_aten::ScalarType::Float || + t == exec_aten::ScalarType::Double || t == exec_aten::ScalarType::Half); +} + +inline bool isRealHBType(exec_aten::ScalarType t) { + return (isRealHType(t) || t == exec_aten::ScalarType::Bool); +} + inline bool isComplexType(exec_aten::ScalarType t) { return ( t == exec_aten::ScalarType::ComplexHalf || diff --git a/runtime/core/exec_aten/util/tensor_util.h b/runtime/core/exec_aten/util/tensor_util.h index f7a4a8d2a99..c5c663e28c5 100644 --- a/runtime/core/exec_aten/util/tensor_util.h +++ b/runtime/core/exec_aten/util/tensor_util.h @@ -357,9 +357,6 @@ * If `cond` is false, log `cond` and return from the kernel with a failure * state set. * - * TODO(ssjia): add context.fail(torch.executor::Error::error); before exit - * TODO(ssjia): replace runtime_abort() with return retval - * * @param[in] context the runtime context * @param[in] cond the condition to check * @param[in] error torch::executor::Error enum value (e.g `InvalidArgument`) @@ -369,7 +366,8 @@ do { \ if (!(cond)) { \ ET_LOG(Error, "Check failed (%s): ", #cond); \ - torch::executor::runtime_abort(); \ + context.fail(torch::executor::Error::error); \ + return retval; \ } \ } while (false) @@ -377,9 +375,6 @@ * If `cond` is false, log `message` and return from the kernel with a failure * state set. * - * TODO(ssjia): add context.fail(torch.executor::Error::error); before exit - * TODO(ssjia): replace runtime_abort() with return retval - * * @param[in] context the runtime context * @param[in] cond the condition to check * @param[in] error torch::executor::Error enum value (e.g `InvalidArgument`) @@ -389,7 +384,8 @@ do { \ if (!(cond)) { \ ET_LOG(Error, "Check failed (%s): " message, #cond, ##__VA_ARGS__); \ - torch::executor::runtime_abort(); \ + context.fail(torch::executor::Error::error); \ + return retval; \ } \ } while (false) @@ -491,6 +487,33 @@ inline bool tensor_is_floating_type(exec_aten::Tensor t) { return true; } +inline bool tensor_is_real_type(exec_aten::Tensor t) { + ET_LOG_MSG_AND_RETURN_IF_FALSE( + torch::executor::isRealType(t.scalar_type()), + "Expected to find a real type, but tensor has type %s", + torch::executor::toString(t.scalar_type())); + + return true; +} + +inline bool tensor_is_realh_type(exec_aten::Tensor t) { + ET_LOG_MSG_AND_RETURN_IF_FALSE( + torch::executor::isRealHType(t.scalar_type()), + "Expected to find a real type, but tensor has type %s", + torch::executor::toString(t.scalar_type())); + + return true; +} + +inline bool tensor_is_realhb_type(exec_aten::Tensor t) { + ET_LOG_MSG_AND_RETURN_IF_FALSE( + torch::executor::isRealHBType(t.scalar_type()), + "Expected to find a real type, but tensor has type %s", + torch::executor::toString(t.scalar_type())); + + return true; +} + inline bool tensor_is_complex_type(exec_aten::Tensor t) { ET_LOG_MSG_AND_RETURN_IF_FALSE( torch::executor::isComplexType(t.scalar_type()),