From 77c90e9b821419d0767f7b855a7564a020108126 Mon Sep 17 00:00:00 2001 From: Dave Bort Date: Wed, 9 Oct 2024 10:56:28 -0700 Subject: [PATCH] Migrate backends/cadence away from deprecated namespaces (#5905) Summary: Pull Request resolved: https://github.com/pytorch/executorch/pull/5905 Stop using the `torch::` namespace where possible. For now, ops still live under `torch::executor::`. Reviewed By: Gasoonjia, zonglinpeng Differential Revision: D63924099 fbshipit-source-id: e1132f889bfdeccb56e55a5bad6be937cce366e3 (cherry picked from commit 7bebf8e426ae1857593a157c11c53008c10e4913) --- backends/cadence/executor_runner.cpp | 29 +++++++++---------- .../hifi/operators/dequantize_per_tensor.cpp | 4 +-- .../hifi/operators/quantize_per_tensor.cpp | 4 +-- .../hifi/operators/quantized_layer_norm.cpp | 8 ++--- .../hifi/operators/quantized_linear_out.cpp | 4 +-- .../operators/dequantize_per_tensor.cpp | 4 +-- .../reference/operators/op_embedding.cpp | 2 +- .../cadence/reference/operators/op_full.cpp | 4 +-- .../reference/operators/op_view_copy.cpp | 2 +- .../operators/quantize_per_tensor.cpp | 4 +-- .../operators/quantized_conv_out.cpp | 8 ++--- .../operators/quantized_layer_norm.cpp | 6 ++-- .../operators/quantized_linear_out.cpp | 2 +- .../operators/quantized_matmul_out.cpp | 8 ++--- .../operators/quantized_relu_out.cpp | 6 ++-- 15 files changed, 47 insertions(+), 48 deletions(-) diff --git a/backends/cadence/executor_runner.cpp b/backends/cadence/executor_runner.cpp index dd24105179b..4e85a3b4564 100644 --- a/backends/cadence/executor_runner.cpp +++ b/backends/cadence/executor_runner.cpp @@ -37,7 +37,6 @@ static uint8_t method_allocator_pool[18 * 1024U]; // 4 MB -using namespace torch::executor; #include #define APP_MU MUB @@ -48,8 +47,8 @@ using namespace torch::executor; /* How many message is used to test message sending */ #define MSG_LENGTH 32U -using torch::executor::Error; -using torch::executor::Result; +using executorch::runtime::Error; +using executorch::runtime::Result; void LED_INIT(); void LED_TOGGLE(); @@ -106,13 +105,13 @@ int main(int argc, char** argv) { BOARD_InitDebugConsole(); ET_LOG(Info, "Booted up in DSP."); - torch::executor::runtime_init(); + executorch::runtime::runtime_init(); auto loader = - torch::executor::util::BufferDataLoader(model_pte, sizeof(model_pte)); + executorch::extension::BufferDataLoader(model_pte, sizeof(model_pte)); - Result program = - torch::executor::Program::load(&loader); + Result program = + executorch::runtime::Program::load(&loader); if (!program.ok()) { ET_LOG( Error, @@ -132,7 +131,7 @@ int main(int argc, char** argv) { } ET_LOG(Info, "ET: Running method %s", method_name); - Result method_meta = + Result method_meta = program->method_meta(method_name); if (!method_meta.ok()) { ET_LOG( @@ -142,12 +141,12 @@ int main(int argc, char** argv) { (unsigned int)method_meta.error()); } - torch::executor::MemoryAllocator method_allocator{ - torch::executor::MemoryAllocator( + executorch::runtime::MemoryAllocator method_allocator{ + executorch::runtime::MemoryAllocator( sizeof(method_allocator_pool), method_allocator_pool)}; std::vector> planned_buffers; // Owns the memory - std::vector> + std::vector> planned_spans; // Passed to the allocator size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers(); @@ -161,13 +160,13 @@ int main(int argc, char** argv) { planned_spans.push_back({planned_buffers.back().get(), buffer_size}); } - torch::executor::HierarchicalAllocator planned_memory( + executorch::runtime::HierarchicalAllocator planned_memory( {planned_spans.data(), planned_spans.size()}); - torch::executor::MemoryManager memory_manager( + executorch::runtime::MemoryManager memory_manager( &method_allocator, &planned_memory); - Result method = + Result method = program->load_method(method_name, &memory_manager); if (!method.ok()) { ET_LOG( @@ -178,7 +177,7 @@ int main(int argc, char** argv) { } ET_LOG(Info, "Method loaded."); - torch::executor::util::prepare_input_tensors(*method); + executorch::extension::prepare_input_tensors(*method); ET_LOG(Info, "Starting the model execution..."); Error status = method->execute(); diff --git a/backends/cadence/hifi/operators/dequantize_per_tensor.cpp b/backends/cadence/hifi/operators/dequantize_per_tensor.cpp index 37eaecbe19d..ca480680a8a 100644 --- a/backends/cadence/hifi/operators/dequantize_per_tensor.cpp +++ b/backends/cadence/hifi/operators/dequantize_per_tensor.cpp @@ -13,9 +13,9 @@ namespace impl { namespace HiFi { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; -using ScalarType = exec_aten::ScalarType; void dequantize_per_tensor_out( KernelRuntimeContext& context, diff --git a/backends/cadence/hifi/operators/quantize_per_tensor.cpp b/backends/cadence/hifi/operators/quantize_per_tensor.cpp index 6e74fb4f3ce..043957d7e92 100644 --- a/backends/cadence/hifi/operators/quantize_per_tensor.cpp +++ b/backends/cadence/hifi/operators/quantize_per_tensor.cpp @@ -13,9 +13,9 @@ namespace impl { namespace HiFi { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; -using ScalarType = exec_aten::ScalarType; // Quantize the input tensor (PT2 version). Note that quant_ are not // used in any computation. diff --git a/backends/cadence/hifi/operators/quantized_layer_norm.cpp b/backends/cadence/hifi/operators/quantized_layer_norm.cpp index 930ce12dea9..189d117bd39 100644 --- a/backends/cadence/hifi/operators/quantized_layer_norm.cpp +++ b/backends/cadence/hifi/operators/quantized_layer_norm.cpp @@ -13,7 +13,7 @@ #include #include -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; namespace impl { @@ -118,14 +118,14 @@ void quantized_layer_norm_out( const Tensor& input, const Tensor& in_scale, const Tensor& in_zero_point, - const exec_aten::IntArrayRef normalized_shape, + const executorch::aten::IntArrayRef normalized_shape, const Tensor& weight, const Tensor& bias, double eps, double output_scale, int64_t output_zero_point, Tensor& out) { - if (input.scalar_type() == exec_aten::ScalarType::Byte) { + if (input.scalar_type() == executorch::aten::ScalarType::Byte) { quantized_layer_norm_( input, in_scale, @@ -136,7 +136,7 @@ void quantized_layer_norm_out( output_scale, output_zero_point, out); - } else if (input.scalar_type() == exec_aten::ScalarType::Char) { + } else if (input.scalar_type() == executorch::aten::ScalarType::Char) { quantized_layer_norm_( input, in_scale, diff --git a/backends/cadence/hifi/operators/quantized_linear_out.cpp b/backends/cadence/hifi/operators/quantized_linear_out.cpp index 0a254cb5f7d..f57d342d524 100644 --- a/backends/cadence/hifi/operators/quantized_linear_out.cpp +++ b/backends/cadence/hifi/operators/quantized_linear_out.cpp @@ -16,7 +16,7 @@ namespace impl { namespace HiFi { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; void quantized_linear_out( @@ -29,7 +29,7 @@ void quantized_linear_out( const Tensor& out_multiplier, const Tensor& out_shift, int64_t out_zero_point, - const exec_aten::optional& offset, + const executorch::aten::optional& offset, Tensor& out) { // input comes in shape [leading_dims, in_dim] // weight comes in shape [out_dim, in_dim] diff --git a/backends/cadence/reference/operators/dequantize_per_tensor.cpp b/backends/cadence/reference/operators/dequantize_per_tensor.cpp index 9c6cf6ecc55..bbf427e069d 100644 --- a/backends/cadence/reference/operators/dequantize_per_tensor.cpp +++ b/backends/cadence/reference/operators/dequantize_per_tensor.cpp @@ -13,9 +13,9 @@ namespace impl { namespace reference { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; -using ScalarType = exec_aten::ScalarType; void dequantize_per_tensor_out( KernelRuntimeContext& context, diff --git a/backends/cadence/reference/operators/op_embedding.cpp b/backends/cadence/reference/operators/op_embedding.cpp index e1e4984b56e..ce28789a156 100644 --- a/backends/cadence/reference/operators/op_embedding.cpp +++ b/backends/cadence/reference/operators/op_embedding.cpp @@ -12,7 +12,7 @@ namespace torch { namespace executor { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; void embedding_out( diff --git a/backends/cadence/reference/operators/op_full.cpp b/backends/cadence/reference/operators/op_full.cpp index 00be1889651..21d5fc56299 100644 --- a/backends/cadence/reference/operators/op_full.cpp +++ b/backends/cadence/reference/operators/op_full.cpp @@ -13,8 +13,8 @@ namespace torch { namespace executor { namespace native { -using Tensor = exec_aten::Tensor; -using ScalarType = exec_aten::ScalarType; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; Tensor& full_out( KernelRuntimeContext& ctx, diff --git a/backends/cadence/reference/operators/op_view_copy.cpp b/backends/cadence/reference/operators/op_view_copy.cpp index ac0a8598499..162e9ee201b 100644 --- a/backends/cadence/reference/operators/op_view_copy.cpp +++ b/backends/cadence/reference/operators/op_view_copy.cpp @@ -12,7 +12,7 @@ namespace torch { namespace executor { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; Tensor& view_copy_out( diff --git a/backends/cadence/reference/operators/quantize_per_tensor.cpp b/backends/cadence/reference/operators/quantize_per_tensor.cpp index bc200fd376e..df44171cf1b 100644 --- a/backends/cadence/reference/operators/quantize_per_tensor.cpp +++ b/backends/cadence/reference/operators/quantize_per_tensor.cpp @@ -13,9 +13,9 @@ namespace impl { namespace reference { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; -using ScalarType = exec_aten::ScalarType; // Quantize the input tensor (PT2 version). Note that quant_ are not // used in any computation. diff --git a/backends/cadence/reference/operators/quantized_conv_out.cpp b/backends/cadence/reference/operators/quantized_conv_out.cpp index 47234a7cd95..3ba3faa1b3e 100644 --- a/backends/cadence/reference/operators/quantized_conv_out.cpp +++ b/backends/cadence/reference/operators/quantized_conv_out.cpp @@ -16,7 +16,7 @@ namespace impl { namespace reference { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; // This implements a generic 2d conv kernel that operates on raw pointers. @@ -160,9 +160,9 @@ void quantized_conv_out( const Tensor& input, const Tensor& weight, const Tensor& bias, - exec_aten::IntArrayRef stride, - exec_aten::IntArrayRef padding, - exec_aten::IntArrayRef dilation, + executorch::aten::IntArrayRef stride, + executorch::aten::IntArrayRef padding, + executorch::aten::IntArrayRef dilation, int64_t groups, int64_t in_zero_point, const Tensor& weight_zero_point, diff --git a/backends/cadence/reference/operators/quantized_layer_norm.cpp b/backends/cadence/reference/operators/quantized_layer_norm.cpp index 0f535ccb6f1..6f4df41e8b3 100644 --- a/backends/cadence/reference/operators/quantized_layer_norm.cpp +++ b/backends/cadence/reference/operators/quantized_layer_norm.cpp @@ -117,14 +117,14 @@ void quantized_layer_norm_out( const Tensor& input, const Tensor& in_scale, const Tensor& in_zero_point, - const exec_aten::IntArrayRef normalized_shape, + const executorch::aten::IntArrayRef normalized_shape, const Tensor& weight, const Tensor& bias, double eps, double output_scale, int64_t output_zero_point, Tensor& out) { - if (input.scalar_type() == exec_aten::ScalarType::Byte) { + if (input.scalar_type() == executorch::aten::ScalarType::Byte) { quantized_layer_norm_( input, in_scale, @@ -135,7 +135,7 @@ void quantized_layer_norm_out( output_scale, output_zero_point, out); - } else if (input.scalar_type() == exec_aten::ScalarType::Char) { + } else if (input.scalar_type() == executorch::aten::ScalarType::Char) { quantized_layer_norm_( input, in_scale, diff --git a/backends/cadence/reference/operators/quantized_linear_out.cpp b/backends/cadence/reference/operators/quantized_linear_out.cpp index c85e3a59603..a02794c179c 100644 --- a/backends/cadence/reference/operators/quantized_linear_out.cpp +++ b/backends/cadence/reference/operators/quantized_linear_out.cpp @@ -27,7 +27,7 @@ void quantized_linear_out( const Tensor& out_multiplier, const Tensor& out_shift, int64_t out_zero_point, - const exec_aten::optional& offset, + const executorch::aten::optional& offset, Tensor& out) { // Assuming uint8_t for now, but needs to be updated for other quantization // types diff --git a/backends/cadence/reference/operators/quantized_matmul_out.cpp b/backends/cadence/reference/operators/quantized_matmul_out.cpp index b0a9393cd01..5e357be4304 100644 --- a/backends/cadence/reference/operators/quantized_matmul_out.cpp +++ b/backends/cadence/reference/operators/quantized_matmul_out.cpp @@ -60,7 +60,7 @@ void inline _typed_quantized_matmul( int64_t X_zero_point, const Tensor& Y, int64_t Y_zero_point, - const exec_aten::optional& bias, + const executorch::aten::optional& bias, int64_t out_multiplier, int64_t out_shift, int64_t out_zero_point, @@ -114,13 +114,13 @@ void quantized_matmul_out( int64_t X_zero_point, const Tensor& Y, int64_t Y_zero_point, - const exec_aten::optional& bias, + const executorch::aten::optional& bias, int64_t out_multiplier, int64_t out_shift, int64_t out_zero_point, bool transposed, Tensor& out) { - if (out.scalar_type() == exec_aten::ScalarType::Byte) { + if (out.scalar_type() == executorch::aten::ScalarType::Byte) { _typed_quantized_matmul( X, X_zero_point, @@ -132,7 +132,7 @@ void quantized_matmul_out( out_zero_point, transposed, out); - } else if (out.scalar_type() == exec_aten::ScalarType::Char) { + } else if (out.scalar_type() == executorch::aten::ScalarType::Char) { _typed_quantized_matmul( X, X_zero_point, diff --git a/backends/cadence/reference/operators/quantized_relu_out.cpp b/backends/cadence/reference/operators/quantized_relu_out.cpp index 04cb2c88336..460084fcfb0 100644 --- a/backends/cadence/reference/operators/quantized_relu_out.cpp +++ b/backends/cadence/reference/operators/quantized_relu_out.cpp @@ -13,7 +13,7 @@ namespace impl { namespace reference { namespace native { -using Tensor = exec_aten::Tensor; +using executorch::aten::Tensor; using executorch::runtime::KernelRuntimeContext; template @@ -51,7 +51,7 @@ void quantized_relu_out( const Tensor& out_multiplier, const Tensor& out_shift, Tensor& output) { - if (input.scalar_type() == exec_aten::ScalarType::Byte) { + if (input.scalar_type() == executorch::aten::ScalarType::Byte) { quantized_relu_( input, in_zero_point, @@ -59,7 +59,7 @@ void quantized_relu_out( out_multiplier, out_shift, output); - } else if (input.scalar_type() == exec_aten::ScalarType::Char) { + } else if (input.scalar_type() == executorch::aten::ScalarType::Char) { quantized_relu_( input, in_zero_point,