From ca66a332ff62c1fb33221b265962b1a7b39b9d3a Mon Sep 17 00:00:00 2001 From: Dave Bort Date: Sun, 29 Sep 2024 22:17:34 -0700 Subject: [PATCH] Fix unqualified uses of executorch functions (#5709) Summary: Pull Request resolved: https://github.com/pytorch/executorch/pull/5709 I'm not sure how this worked before, but these sites called functions under torch::executor without actually qualifying them. Qualify them explicitly, because the "can call without qualification" magic stops working when we move the etensor types in D63294217. In a few places I used `namespace etrt = executorch::runtime;` instead of a using statement for a particular function, like `etrt::isIntegralType`. If I just say `using executorch::runtime::isIntegralType`, those files fail in aten mode because the unqualified call to `isIntegralType()` is deemed ambiguous in the presence of `c10::isIntegralType()` -- but afaict that `c10` version isn't `using`'d into the global namespace, so I don't know why it conflicts. It'd be good to figure that out at some point, but this works for now. I also updated custom_kernel_example to stop using the `torch::` namespace. Reviewed By: swolchok Differential Revision: D63476419 --- .../reference/operators/quantized_layer_norm.cpp | 3 ++- .../reference/operators/quantized_linear_out.cpp | 3 ++- .../reference/operators/quantized_matmul_out.cpp | 3 ++- .../portable/cpu/util/test/broadcast_test.cpp | 6 ++++++ kernels/portable/cpu/util/test/reduce_test.cpp | 5 ++++- .../test/custom_kernel_example/my_functions.yaml | 2 +- kernels/test/custom_kernel_example/op_relu.cpp | 16 ++++++++-------- kernels/test/op_add_test.cpp | 12 +++++++----- kernels/test/op_mul_test.cpp | 12 +++++++----- kernels/test/op_sub_test.cpp | 12 +++++++----- 10 files changed, 46 insertions(+), 28 deletions(-) diff --git a/backends/cadence/reference/operators/quantized_layer_norm.cpp b/backends/cadence/reference/operators/quantized_layer_norm.cpp index ab1af935eec..574bcef1b22 100644 --- a/backends/cadence/reference/operators/quantized_layer_norm.cpp +++ b/backends/cadence/reference/operators/quantized_layer_norm.cpp @@ -11,7 +11,8 @@ #include -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; +using executorch::runtime::getLeadingDims; using executorch::runtime::KernelRuntimeContext; namespace impl { diff --git a/backends/cadence/reference/operators/quantized_linear_out.cpp b/backends/cadence/reference/operators/quantized_linear_out.cpp index 300158d8e5e..c85e3a59603 100644 --- a/backends/cadence/reference/operators/quantized_linear_out.cpp +++ b/backends/cadence/reference/operators/quantized_linear_out.cpp @@ -13,7 +13,8 @@ namespace impl { namespace reference { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; +using executorch::runtime::getLeadingDims; using executorch::runtime::KernelRuntimeContext; void quantized_linear_out( diff --git a/backends/cadence/reference/operators/quantized_matmul_out.cpp b/backends/cadence/reference/operators/quantized_matmul_out.cpp index b381a8ee394..b0a9393cd01 100644 --- a/backends/cadence/reference/operators/quantized_matmul_out.cpp +++ b/backends/cadence/reference/operators/quantized_matmul_out.cpp @@ -13,7 +13,8 @@ namespace impl { namespace reference { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; +using executorch::runtime::getLeadingDims; using executorch::runtime::KernelRuntimeContext; // The quantized matmul. The quantized matmul accumulates in a wider register, diff --git a/kernels/portable/cpu/util/test/broadcast_test.cpp b/kernels/portable/cpu/util/test/broadcast_test.cpp index 9ad6555d94e..d87e8ecec85 100644 --- a/kernels/portable/cpu/util/test/broadcast_test.cpp +++ b/kernels/portable/cpu/util/test/broadcast_test.cpp @@ -22,6 +22,12 @@ using exec_aten::ScalarType; using exec_aten::Tensor; using executorch::runtime::ArrayRef; using executorch::runtime::testing::TensorFactory; +using torch::executor::broadcast_tensor; +using torch::executor::delinearize_index; +using torch::executor::get_broadcast_target_size; +using torch::executor::linearize_access_indexes; +using torch::executor::tensor_is_broadcastable_to; +using torch::executor::tensors_are_broadcastable_between; TEST(BroadcastUtilTest, BroadcastTensor) { TensorFactory tf; diff --git a/kernels/portable/cpu/util/test/reduce_test.cpp b/kernels/portable/cpu/util/test/reduce_test.cpp index 9ee37aab657..e7bb03c30c8 100644 --- a/kernels/portable/cpu/util/test/reduce_test.cpp +++ b/kernels/portable/cpu/util/test/reduce_test.cpp @@ -19,7 +19,10 @@ using exec_aten::ArrayRef; using exec_aten::optional; using exec_aten::ScalarType; using exec_aten::Tensor; -using torch::executor::testing::TensorFactory; +using executorch::runtime::testing::TensorFactory; +using torch::executor::apply_over_dim; +using torch::executor::apply_over_dim_list; +using torch::executor::get_out_numel; void _apply_over_dim(const Tensor& in, const optional& dim) { int64_t* in_data = in.mutable_data_ptr(); diff --git a/kernels/test/custom_kernel_example/my_functions.yaml b/kernels/test/custom_kernel_example/my_functions.yaml index 72f1d2cf865..de5ce952ab4 100644 --- a/kernels/test/custom_kernel_example/my_functions.yaml +++ b/kernels/test/custom_kernel_example/my_functions.yaml @@ -5,4 +5,4 @@ - op: relu.out kernels: - arg_meta: null - kernel_name: torch::my_custom_kernel::my_relu_out + kernel_name: my_custom_kernels::my_relu_out diff --git a/kernels/test/custom_kernel_example/op_relu.cpp b/kernels/test/custom_kernel_example/op_relu.cpp index e59fbf4bd72..39be620d86b 100644 --- a/kernels/test/custom_kernel_example/op_relu.cpp +++ b/kernels/test/custom_kernel_example/op_relu.cpp @@ -12,14 +12,15 @@ #include #include -namespace torch { -namespace my_custom_kernel { +namespace my_custom_kernels { namespace native { -using Tensor = exec_aten::Tensor; -using ScalarType = exec_aten::ScalarType; -using executor::Error; +using exec_aten::ScalarType; +using exec_aten::Tensor; +using executorch::runtime::Error; using executorch::runtime::KernelRuntimeContext; +using executorch::runtime::resize_tensor; +using executorch::runtime::tensors_have_same_shape_and_dtype; namespace { @@ -67,7 +68,7 @@ my_relu_out(KernelRuntimeContext& context, const Tensor& input, Tensor& out) { resize(out, input.sizes()); ET_KERNEL_CHECK( context, - executor::tensors_have_same_shape_and_dtype(input, out), + tensors_have_same_shape_and_dtype(input, out), InvalidArgument, out); @@ -94,5 +95,4 @@ my_relu_out(KernelRuntimeContext& context, const Tensor& input, Tensor& out) { } } // namespace native -} // namespace my_custom_kernel -} // namespace torch +} // namespace my_custom_kernels diff --git a/kernels/test/op_add_test.cpp b/kernels/test/op_add_test.cpp index e35a4100c9a..0e4e2fc6359 100644 --- a/kernels/test/op_add_test.cpp +++ b/kernels/test/op_add_test.cpp @@ -18,11 +18,12 @@ #include using namespace ::testing; -using exec_aten::Scalar; -using exec_aten::ScalarType; -using exec_aten::Tensor; +using executorch::aten::Scalar; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::runtime::testing::TensorFactory; using torch::executor::testing::SupportedFeatures; -using torch::executor::testing::TensorFactory; +namespace etrt = executorch::runtime; class OpAddOutKernelTest : public OperatorTest { protected: @@ -63,7 +64,8 @@ class OpAddOutKernelTest : public OperatorTest { test_add(); test_add(); // Integral out type is only allowed if both inputs are integral types - if (isIntegralType(DTYPE_A, false) && isIntegralType(DTYPE_B, false)) { + if (etrt::isIntegralType(DTYPE_A, false) && + etrt::isIntegralType(DTYPE_B, false)) { test_add(); test_add(); } diff --git a/kernels/test/op_mul_test.cpp b/kernels/test/op_mul_test.cpp index f8205ea601e..f3c9e54c862 100644 --- a/kernels/test/op_mul_test.cpp +++ b/kernels/test/op_mul_test.cpp @@ -17,11 +17,12 @@ #include using namespace ::testing; -using exec_aten::Scalar; -using exec_aten::ScalarType; -using exec_aten::Tensor; +using executorch::aten::Scalar; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::runtime::testing::TensorFactory; using torch::executor::testing::SupportedFeatures; -using torch::executor::testing::TensorFactory; +namespace etrt = executorch::runtime; class OpMulOutTest : public OperatorTest { protected: @@ -61,7 +62,8 @@ class OpMulOutTest : public OperatorTest { test_mul(); test_mul(); // Integral out type is only allowed if both inputs are integral types - if (isIntegralType(DTYPE_A, false) && isIntegralType(DTYPE_B, false)) { + if (etrt::isIntegralType(DTYPE_A, false) && + etrt::isIntegralType(DTYPE_B, false)) { test_mul(); test_mul(); } diff --git a/kernels/test/op_sub_test.cpp b/kernels/test/op_sub_test.cpp index 9f795516723..886adaf2e9d 100644 --- a/kernels/test/op_sub_test.cpp +++ b/kernels/test/op_sub_test.cpp @@ -16,11 +16,12 @@ #include using namespace ::testing; -using exec_aten::Scalar; -using exec_aten::ScalarType; -using exec_aten::Tensor; +using executorch::aten::Scalar; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::runtime::testing::TensorFactory; using torch::executor::testing::SupportedFeatures; -using torch::executor::testing::TensorFactory; +namespace etrt = executorch::runtime; class OpSubOutTest : public OperatorTest { protected: @@ -60,7 +61,8 @@ class OpSubOutTest : public OperatorTest { test_sub(); test_sub(); // Integral out type is only allowed if both inputs are integral types - if (isIntegralType(DTYPE_A, false) && isIntegralType(DTYPE_B, false)) { + if (etrt::isIntegralType(DTYPE_A, false) && + etrt::isIntegralType(DTYPE_B, false)) { test_sub(); test_sub(); }