From fd484e0a0fc909b2d19a4f6c774d0106dac479db Mon Sep 17 00:00:00 2001 From: Dave Bort Date: Fri, 20 Sep 2024 11:42:19 -0700 Subject: [PATCH] Remove `torch::` references from arm_executor_runner (#5506) Summary: Example code should use the new `executorch::` namespace wherever possible. Pull Request resolved: https://github.com/pytorch/executorch/pull/5506 Test Plan: Built using the instructions at https://github.com/pytorch/executorch/blob/main/examples/arm/README.md Reviewed By: Gasoonjia Differential Revision: D63075681 Pulled By: dbort fbshipit-source-id: 62d12ccf6c792056d9a2949d23c64c97c0cf6a51 (cherry picked from commit 01dcebdc7591e5b216f1273723d442078b3ac92f) --- .../executor_runner/arm_executor_runner.cpp | 63 ++++++++++--------- 1 file changed, 35 insertions(+), 28 deletions(-) diff --git a/examples/arm/executor_runner/arm_executor_runner.cpp b/examples/arm/executor_runner/arm_executor_runner.cpp index 95f2623497e..b6ac43bcf21 100644 --- a/examples/arm/executor_runner/arm_executor_runner.cpp +++ b/examples/arm/executor_runner/arm_executor_runner.cpp @@ -42,10 +42,23 @@ char* model_pte = nullptr; #include "model_pte.h" #endif -using namespace exec_aten; -using namespace std; -using torch::executor::Error; -using torch::executor::Result; +using executorch::aten::ScalarType; +using executorch::aten::Tensor; +using executorch::aten::TensorImpl; +using executorch::extension::BufferCleanup; +using executorch::extension::BufferDataLoader; +using executorch::runtime::Error; +using executorch::runtime::EValue; +using executorch::runtime::HierarchicalAllocator; +using executorch::runtime::MemoryAllocator; +using executorch::runtime::MemoryManager; +using executorch::runtime::Method; +using executorch::runtime::MethodMeta; +using executorch::runtime::Program; +using executorch::runtime::Result; +using executorch::runtime::Span; +using executorch::runtime::Tag; +using executorch::runtime::TensorInfo; #define METHOD_ALLOCATOR_POOL_SIZE (70 * 1024 * 1024) unsigned char __attribute__(( @@ -83,11 +96,10 @@ void et_pal_emit_log_message( } namespace { -using namespace torch::executor; -Result prepare_input_tensors( +Result prepare_input_tensors( Method& method, - torch::executor::MemoryAllocator& allocator, + MemoryAllocator& allocator, std::vector>& input_buffers) { MethodMeta method_meta = method.method_meta(); size_t num_inputs = method_meta.num_inputs(); @@ -170,18 +182,18 @@ Result prepare_input_tensors( ET_LOG( Error, "Failed to prepare input %zu: 0x%" PRIx32, i, (uint32_t)err); // The BufferCleanup will free the inputs when it goes out of scope. - util::BufferCleanup cleanup({inputs, num_allocated}); + BufferCleanup cleanup({inputs, num_allocated}); return err; } } - return util::BufferCleanup({inputs, num_allocated}); + return BufferCleanup({inputs, num_allocated}); } #ifdef SEMIHOSTING std::pair read_binary_file( const char* filename, - torch::executor::MemoryAllocator& allocator) { + MemoryAllocator& allocator) { FILE* fp = fopen(filename, "rb"); if (!fp) { ET_LOG( @@ -233,13 +245,13 @@ int main(int argc, const char* argv[]) { (void)argv; #endif - torch::executor::runtime_init(); + executorch::runtime::runtime_init(); std::vector> input_buffers; size_t pte_size = sizeof(model_pte); #ifdef SEMIHOSTING const char* output_basename = nullptr; - torch::executor::MemoryAllocator input_allocator( + MemoryAllocator input_allocator( input_allocation_pool_size, input_allocation_pool); /* parse input parameters */ @@ -272,10 +284,9 @@ int main(int argc, const char* argv[]) { } #endif ET_LOG(Info, "Model in %p %c", model_pte, model_pte[0]); - auto loader = torch::executor::util::BufferDataLoader(model_pte, pte_size); + auto loader = BufferDataLoader(model_pte, pte_size); ET_LOG(Info, "Model PTE file loaded. Size: %lu bytes.", pte_size); - Result program = - torch::executor::Program::load(&loader); + Result program = Program::load(&loader); if (!program.ok()) { ET_LOG( Info, @@ -294,8 +305,7 @@ int main(int argc, const char* argv[]) { } ET_LOG(Info, "Running method %s", method_name); - Result method_meta = - program->method_meta(method_name); + Result method_meta = program->method_meta(method_name); if (!method_meta.ok()) { ET_LOG( Info, @@ -304,13 +314,11 @@ int main(int argc, const char* argv[]) { (unsigned int)method_meta.error()); } - torch::executor::MemoryAllocator method_allocator{ - torch::executor::MemoryAllocator( - METHOD_ALLOCATOR_POOL_SIZE, method_allocation_pool)}; + MemoryAllocator method_allocator( + METHOD_ALLOCATOR_POOL_SIZE, method_allocation_pool); std::vector planned_buffers; // Owns the memory - std::vector> - planned_spans; // Passed to the allocator + std::vector> planned_spans; // Passed to the allocator size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers(); for (size_t id = 0; id < num_memory_planned_buffers; ++id) { @@ -325,17 +333,16 @@ int main(int argc, const char* argv[]) { planned_spans.push_back({planned_buffers.back(), buffer_size}); } - torch::executor::HierarchicalAllocator planned_memory( + HierarchicalAllocator planned_memory( {planned_spans.data(), planned_spans.size()}); - torch::executor::MemoryAllocator temp_allocator( + MemoryAllocator temp_allocator( temp_allocation_pool_size, temp_allocation_pool); - torch::executor::MemoryManager memory_manager( + MemoryManager memory_manager( &method_allocator, &planned_memory, &temp_allocator); - Result method = - program->load_method(method_name, &memory_manager); + Result method = program->load_method(method_name, &memory_manager); if (!method.ok()) { ET_LOG( Info, @@ -374,7 +381,7 @@ int main(int argc, const char* argv[]) { ET_LOG(Info, "Model executed successfully."); } - std::vector outputs(method->outputs_size()); + std::vector outputs(method->outputs_size()); ET_LOG(Info, "%zu outputs: ", outputs.size()); status = method->get_outputs(outputs.data(), outputs.size()); ET_CHECK(status == Error::Ok);