diff --git a/examples/apple/coreml/executor_runner/main.mm b/examples/apple/coreml/executor_runner/main.mm index c83287fb44d..405bfb9c6c4 100644 --- a/examples/apple/coreml/executor_runner/main.mm +++ b/examples/apple/coreml/executor_runner/main.mm @@ -24,8 +24,25 @@ static inline id check_class(id obj, Class cls) { #define SAFE_CAST(Object, Type) ((Type *)check_class(Object, [Type class])) -using namespace torch::executor; -using torch::executor::util::FileDataLoader; +using executorch::etdump::ETDumpGen; +using executorch::etdump::ETDumpResult; +using executorch::extension::FileDataLoader; +using executorch::runtime::DataLoader; +using executorch::runtime::EValue; +using executorch::runtime::Error; +using executorch::runtime::EventTracer; +using executorch::runtime::EventTracerDebugLogLevel; +using executorch::runtime::FreeableBuffer; +using executorch::runtime::HierarchicalAllocator; +using executorch::runtime::MemoryAllocator; +using executorch::runtime::MemoryManager; +using executorch::runtime::Method; +using executorch::runtime::MethodMeta; +using executorch::runtime::Program; +using executorch::runtime::Result; +using executorch::runtime::Span; +using executorch::runtime::TensorInfo; +using torch::executor::CoreMLBackendDelegate; static constexpr size_t kRuntimeMemorySize = 16 * 1024U * 1024U; // 16 MB @@ -294,7 +311,7 @@ bool is_model_analysis_enabled(const Args& args) { } void dump_etdump_gen(ETDumpGen *etdump_gen, const Buffer& debug_buffer, const Args& args) { - etdump_result result = (etdump_gen != nullptr) ? etdump_gen->get_etdump_data() : etdump_result{.buf = nullptr, .size = 0}; + ETDumpResult result = (etdump_gen != nullptr) ? etdump_gen->get_etdump_data() : ETDumpResult{.buf = nullptr, .size = 0}; if (result.size == 0) { return; } @@ -316,7 +333,7 @@ void dump_etdump_gen(ETDumpGen *etdump_gen, const Buffer& debug_buffer, const Ar int main(int argc, char * argv[]) { @autoreleasepool { - runtime_init(); + executorch::runtime::runtime_init(); auto args = parse_command_line_args([[NSProcessInfo processInfo] arguments]); if (args.purge_models_cache) { diff --git a/examples/apple/mps/executor_runner/mps_executor_runner.mm b/examples/apple/mps/executor_runner/mps_executor_runner.mm index 040b2fcd996..e3d0e2978b6 100644 --- a/examples/apple/mps/executor_runner/mps_executor_runner.mm +++ b/examples/apple/mps/executor_runner/mps_executor_runner.mm @@ -97,8 +97,26 @@ 262144, // 256 KB "Size of the debug buffer in bytes to allocate for intermediate outputs and program outputs logging."); -using namespace torch::executor; -using torch::executor::util::FileDataLoader; +using executorch::etdump::ETDumpGen; +using executorch::etdump::ETDumpResult; +using executorch::extension::BufferCleanup; +using executorch::extension::BufferDataLoader; +using executorch::extension::FileDataLoader; +using executorch::runtime::DataLoader; +using executorch::runtime::EValue; +using executorch::runtime::Error; +using executorch::runtime::EventTracerDebugLogLevel; +using executorch::runtime::FreeableBuffer; +using executorch::runtime::HierarchicalAllocator; +using executorch::runtime::MemoryAllocator; +using executorch::runtime::MemoryManager; +using executorch::runtime::Method; +using executorch::runtime::MethodMeta; +using executorch::runtime::Program; +using executorch::runtime::Result; +using executorch::runtime::Span; + +namespace bundled_program = executorch::bundled_program; int main(int argc, char** argv) { { @@ -113,7 +131,7 @@ int main(int argc, char** argv) { return 1; } - runtime_init(); + executorch::runtime::runtime_init(); gflags::ParseCommandLineFlags(&argc, &argv, true); if (argc != 1) { @@ -144,20 +162,20 @@ int main(int argc, char** argv) { // Find the offset to the embedded Program. const void* program_data; size_t program_data_len; - Error status = torch::executor::bundled_program::GetProgramData( + Error status = bundled_program::get_program_data( const_cast(file_data->data()), file_data->size(), &program_data, &program_data_len); ET_CHECK_MSG( status == Error::Ok, - "GetProgramData() failed on file '%s': 0x%x", + "get_program_data() failed on file '%s': 0x%x", model_path, (unsigned int)status); // Wrap the buffer in a DataLoader. auto buffer_data_loader = - util::BufferDataLoader(program_data, program_data_len); + BufferDataLoader(program_data, program_data_len); // Parse the program file. This is immutable, and can also be reused between // multiple execution invocations across multiple threads. @@ -239,7 +257,7 @@ HierarchicalAllocator planned_memory( // be used by a single thread at at time, but it can be reused. // - torch::executor::ETDumpGen etdump_gen = torch::executor::ETDumpGen(); + ETDumpGen etdump_gen; Result method = program->load_method(method_name, &memory_manager, &etdump_gen); ET_CHECK_MSG( @@ -263,11 +281,11 @@ HierarchicalAllocator planned_memory( } // Prepare the inputs. - std::unique_ptr inputs; + std::unique_ptr inputs; if (FLAGS_bundled_program) { ET_LOG(Info, "Loading bundled program..."); // Use the inputs embedded in the bundled program. - status = torch::executor::bundled_program::LoadBundledInput( + status = bundled_program::load_bundled_input( *method, file_data->data(), FLAGS_testset_idx); @@ -278,11 +296,11 @@ HierarchicalAllocator planned_memory( } else { ET_LOG(Info, "Loading non-bundled program...\n"); // Use ones-initialized inputs. - auto inputs_result = torch::executor::util::prepare_input_tensors(*method); + auto inputs_result = executorch::extension::prepare_input_tensors(*method); if (inputs_result.ok()) { // Will free the inputs when destroyed. inputs = - std::make_unique(std::move(inputs_result.get())); + std::make_unique(std::move(inputs_result.get())); } } ET_LOG(Info, "Inputs prepared."); @@ -322,14 +340,14 @@ HierarchicalAllocator planned_memory( status = method->get_outputs(outputs.data(), outputs.size()); ET_CHECK(status == Error::Ok); // Print the first and last 100 elements of long lists of scalars. - std::cout << torch::executor::util::evalue_edge_items(100); + std::cout << executorch::extension::evalue_edge_items(100); for (int i = 0; i < outputs.size(); ++i) { std::cout << "Output " << i << ": " << outputs[i] << std::endl; } // Dump the etdump data containing profiling/debugging data to the specified // file. - etdump_result result = etdump_gen.get_etdump_data(); + ETDumpResult result = etdump_gen.get_etdump_data(); if (result.buf != nullptr && result.size > 0) { FILE* f = fopen(FLAGS_etdump_path.c_str(), "w+"); fwrite((uint8_t*)result.buf, 1, result.size, f); @@ -362,7 +380,7 @@ HierarchicalAllocator planned_memory( atol = 1e-01; rtol = 1e-01; } - status = torch::executor::bundled_program::VerifyResultWithBundledExpectedOutput( + status = bundled_program::verify_method_outputs( *method, file_data->data(), FLAGS_testset_idx,