diff --git a/backends/apple/coreml/runtime/delegate/backend_delegate.h b/backends/apple/coreml/runtime/delegate/backend_delegate.h index 55e14673203..6eff02816f1 100644 --- a/backends/apple/coreml/runtime/delegate/backend_delegate.h +++ b/backends/apple/coreml/runtime/delegate/backend_delegate.h @@ -6,6 +6,7 @@ // Please refer to the license found in the LICENSE file in the root directory of the source tree. #include +#include #include namespace executorchcoreml { diff --git a/backends/apple/coreml/runtime/kvstore/database.hpp b/backends/apple/coreml/runtime/kvstore/database.hpp index ebf628572ce..7bf7b9ba370 100644 --- a/backends/apple/coreml/runtime/kvstore/database.hpp +++ b/backends/apple/coreml/runtime/kvstore/database.hpp @@ -8,6 +8,7 @@ #pragma once #include +#include #include #include #include diff --git a/backends/apple/coreml/runtime/kvstore/key_value_store.hpp b/backends/apple/coreml/runtime/kvstore/key_value_store.hpp index 27636154f12..a63b3972bb2 100644 --- a/backends/apple/coreml/runtime/kvstore/key_value_store.hpp +++ b/backends/apple/coreml/runtime/kvstore/key_value_store.hpp @@ -7,6 +7,7 @@ #pragma once +#import #include #include #include diff --git a/backends/apple/coreml/runtime/test/CoreMLBackendDelegateTests.mm b/backends/apple/coreml/runtime/test/CoreMLBackendDelegateTests.mm index 6e152a0c8b3..aae70025f6a 100644 --- a/backends/apple/coreml/runtime/test/CoreMLBackendDelegateTests.mm +++ b/backends/apple/coreml/runtime/test/CoreMLBackendDelegateTests.mm @@ -14,7 +14,6 @@ #import #import #import -#import static constexpr size_t kRuntimeMemorySize = 10 * 1024U * 1024U; // 10 MB @@ -95,6 +94,33 @@ return result; } + +Result> prepare_input_tensors(Method& method) { + MethodMeta method_meta = method.method_meta(); + size_t num_inputs = method_meta.num_inputs(); + std::vector> buffers; + for (size_t i = 0; i < num_inputs; i++) { + Result tensor_meta = method_meta.input_tensor_meta(i); + if (!tensor_meta.ok()) { + ET_LOG(Info, "Skipping non-tensor input %zu", i); + continue; + } + Buffer buffer(tensor_meta->nbytes(), 1); + auto sizes = tensor_meta->sizes(); + exec_aten::TensorImpl tensor_impl(tensor_meta->scalar_type(), std::size(sizes), const_cast(sizes.data()), buffer.data()); + exec_aten::Tensor tensor(&tensor_impl); + EValue input_value(std::move(tensor)); + Error err = method.set_input(input_value, i); + if (err != Error::Ok) { + ET_LOG(Error, "Failed to prepare input %zu: 0x%" PRIx32, i, (uint32_t)err); + return err; + } + buffers.emplace_back(std::move(buffer)); + } + + return buffers; +} + } @interface CoreMLBackendDelegateTests : XCTestCase @@ -145,15 +171,12 @@ - (void)executeModelAtURL:(NSURL *)modelURL nTimes:(NSUInteger)nTimes { MemoryManager memoryManger(&methodAllocator, &plannedAllocator); auto method = program->load_method(methodName.get().c_str(), &memoryManger); XCTAssert(method.ok()); - auto inputs = util::PrepareInputTensors(method.get()); - + auto inputBuffers = prepare_input_tensors(method.get()); auto status = method->execute(); XCTAssertEqual(status, Error::Ok); auto outputs = methodAllocator.allocateList(method->outputs_size()); status = method->get_outputs(outputs, method->outputs_size()); XCTAssertEqual(status, Error::Ok); - - util::FreeInputs(inputs); } } diff --git a/examples/apple/coreml/executor_runner/main.mm b/examples/apple/coreml/executor_runner/main.mm index 27699c5708e..0e38f9e6cbd 100644 --- a/examples/apple/coreml/executor_runner/main.mm +++ b/examples/apple/coreml/executor_runner/main.mm @@ -211,6 +211,32 @@ Args parse_command_line_args(NSArray *args) { return result; } +Result> prepare_input_tensors(Method& method) { + MethodMeta method_meta = method.method_meta(); + size_t num_inputs = method_meta.num_inputs(); + std::vector> buffers; + for (size_t i = 0; i < num_inputs; i++) { + Result tensor_meta = method_meta.input_tensor_meta(i); + if (!tensor_meta.ok()) { + ET_LOG(Info, "Skipping non-tensor input %zu", i); + continue; + } + Buffer buffer(tensor_meta->nbytes(), 1); + auto sizes = tensor_meta->sizes(); + exec_aten::TensorImpl tensor_impl(tensor_meta->scalar_type(), std::size(sizes), const_cast(sizes.data()), buffer.data()); + exec_aten::Tensor tensor(&tensor_impl); + EValue input_value(std::move(tensor)); + Error err = method.set_input(input_value, i); + if (err != Error::Ok) { + ET_LOG(Error, "Failed to prepare input %zu: 0x%" PRIx32, i, (uint32_t)err); + return err; + } + buffers.emplace_back(std::move(buffer)); + } + + return buffers; +} + double calculate_mean(const std::vector& durations) { if (durations.size() == 0) { return 0.0; @@ -293,7 +319,7 @@ int main(int argc, char * argv[]) { ET_CHECK_MSG(method_name.ok(), "Failed to load method with name=%s from program=%p", method_name.get().c_str(), program.get()); ET_LOG(Info, "Running method = %s", method_name.get().c_str()); - auto inputs = util::PrepareInputTensors(*method); + auto inputs = prepare_input_tensors(*method); ET_LOG(Info, "Inputs prepared."); // Run the model. @@ -322,7 +348,6 @@ int main(int argc, char * argv[]) { } } - util::FreeInputs(inputs); - return 0; + return EXIT_SUCCESS; } }