From da936c3f55b75f2adf8b1bc2202f49cca260e104 Mon Sep 17 00:00:00 2001 From: JoDio <49283499+JoDio-zd@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:26:25 +0800 Subject: [PATCH 1/2] feat: add coreml compilation: success run: fail --- CMakeLists.txt | 11 + cmake/config_m1.cmake | 36 ++- cmake/coreml.cmake | 7 + cmake/nndeploy.cmake | 70 +++--- include/nndeploy/base/common.h | 1 + .../inference/coreml/coreml_convert.h | 44 ++++ .../inference/coreml/coreml_include.h | 7 + .../inference/coreml/coreml_inference.h | 58 +++++ .../inference/coreml/coreml_inference_param.h | 42 ++++ .../inference/coreml/coreml_convert.mm | 70 ++++++ .../inference/coreml/coreml_inference.mm | 227 ++++++++++++++++++ .../coreml/coreml_inference_param.mm | 41 ++++ 12 files changed, 572 insertions(+), 42 deletions(-) create mode 100644 cmake/coreml.cmake create mode 100644 include/nndeploy/inference/coreml/coreml_convert.h create mode 100644 include/nndeploy/inference/coreml/coreml_include.h create mode 100644 include/nndeploy/inference/coreml/coreml_inference.h create mode 100644 include/nndeploy/inference/coreml/coreml_inference_param.h create mode 100644 source/nndeploy/inference/coreml/coreml_convert.mm create mode 100644 source/nndeploy/inference/coreml/coreml_inference.mm create mode 100644 source/nndeploy/inference/coreml/coreml_inference_param.mm diff --git a/CMakeLists.txt b/CMakeLists.txt index d1b7c4a3..d46dba45 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,6 +17,9 @@ if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/config.cmake) include(${CMAKE_CURRENT_BINARY_DIR}/config.cmake) endif() +# you must have return sentence for un-void function +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror=return-type") + # common nndeploy_option(ENABLE_NNDEPLOY_BUILD_SHARED "ENABLE_NNDEPLOY_BUILD_SHARED" ON) nndeploy_option(ENABLE_NNDEPLOY_SYMBOL_HIDE "ENABLE_NNDEPLOY_SYMBOL_HIDE" OFF) @@ -338,6 +341,14 @@ if(ENABLE_NNDEPLOY_INFERENCE) ) set(INFERENCE_SOURCE ${INFERENCE_SOURCE} ${INFERENCE_OPENVINO_SOURCE}) endif() + if (ENABLE_NNDEPLOY_INFERENCE_COREML) + file(GLOB_RECURSE INFERENCE_COREML_SOURCE + "${ROOT_PATH}/include/nndeploy/inference/coreml/*.h" + "${ROOT_PATH}/source/nndeploy/inference/coreml/*.cc" + "${ROOT_PATH}/source/nndeploy/inference/coreml/*.mm" + ) + set(INFERENCE_SOURCE ${INFERENCE_SOURCE} ${INFERENCE_COREML_SOURCE}) + endif() if (ENABLE_NNDEPLOY_INFERENCE_ONNXRUNTIME) file(GLOB_RECURSE INFERENCE_ONNXRUNTIME_SOURCE "${ROOT_PATH}/include/nndeploy/inference/onnxruntime/*.h" diff --git a/cmake/config_m1.cmake b/cmake/config_m1.cmake index 29f205ca..a8e0d906 100644 --- a/cmake/config_m1.cmake +++ b/cmake/config_m1.cmake @@ -25,13 +25,17 @@ set(ENABLE_NNDEPLOY_DOCS OFF) set(ENABLE_NNDEPLOY_TIME_PROFILER ON) set(ENABLE_NNDEPLOY_OPENCV "/Users/jodio/Documents/GitHub/opencv/build/forNN") # must be ON or PATH/TO/OPENCV set(NNDEPLOY_OPENCV_LIBS "opencv_imgproc" "opencv_core" "opencv_imgcodecs") -## base + +# # base set(ENABLE_NNDEPLOY_BASE ON) -## thread + +# # thread set(ENABLE_NNDEPLOY_THREAD_POOL ON) -## cryption + +# # cryption set(ENABLE_NNDEPLOY_CRYPTION OFF) -## device + +# # device set(ENABLE_NNDEPLOY_DEVICE ON) set(ENABLE_NNDEPLOY_DEVICE_CPU OFF) set(ENABLE_NNDEPLOY_DEVICE_ARM ON) @@ -44,33 +48,39 @@ set(ENABLE_NNDEPLOY_DEVICE_METAL OFF) set(ENABLE_NNDEPLOY_DEVICE_APPLE_NPU OFF) set(ENABLE_NNDEPLOY_DEVICE_HVX OFF) set(ENABLE_NNDEPLOY_DEVICE_MTK_VPU OFF) -## op + +# # op set(ENABLE_NNDEPLOY_OP OFF) set(ENABLE_NNDEPLOY_OP_NN OFF) set(ENABLE_NNDEPLOY_OP_CV OFF) set(ENABLE_NNDEPLOY_OP_AUDIO OFF) -## forward + +# # forward set(ENABLE_NNDEPLOY_FORWARD OFF) -## inference + +# # inference set(ENABLE_NNDEPLOY_INFERENCE ON) set(ENABLE_NNDEPLOY_INFERENCE_TENSORRT OFF) set(ENABLE_NNDEPLOY_INFERENCE_OPENVINO OFF) -set(ENABLE_NNDEPLOY_INFERENCE_COREML OFF) +set(ENABLE_NNDEPLOY_INFERENCE_COREML ON) set(ENABLE_NNDEPLOY_INFERENCE_TFLITE OFF) set(ENABLE_NNDEPLOY_INFERENCE_ONNXRUNTIME OFF) set(ENABLE_NNDEPLOY_INFERENCE_NCNN OFF) set(ENABLE_NNDEPLOY_INFERENCE_TNN OFF) -set(ENABLE_NNDEPLOY_INFERENCE_MNN "/Users/jodio/project/mnn/MNN/build/install") +set(ENABLE_NNDEPLOY_INFERENCE_MNN OFF) set(ENABLE_NNDEPLOY_INFERENCE_PADDLELITE OFF) set(ENABLE_NNDEPLOY_AICOMPILER_TVM OFF) -## model + +# # model set(ENABLE_NNDEPLOY_MODEL ON) -## test + +# # test set(ENABLE_NNDEPLOY_TEST OFF) -## demo + +# # demo set(ENABLE_NNDEPLOY_DEMO ON) -## model detect +# # model detect set(ENABLE_NNDEPLOY_MODEL_DETECT ON) set(ENABLE_NNDEPLOY_MODEL_DETECT_DETR OFF) set(ENABLE_NNDEPLOY_MODEL_DETECT_YOLO ON) \ No newline at end of file diff --git a/cmake/coreml.cmake b/cmake/coreml.cmake new file mode 100644 index 00000000..86ef6be9 --- /dev/null +++ b/cmake/coreml.cmake @@ -0,0 +1,7 @@ +include(ExternalProject) + +if (ENABLE_NNDEPLOY_INFERENCE_COREML STREQUAL "OFF") +else() + set(NNDEPLOY_THIRD_PARTY_LIBRARY ${NNDEPLOY_THIRD_PARTY_LIBRARY} "/System/Library/Frameworks/CoreML.framework") + set(NNDEPLOY_THIRD_PARTY_LIBRARY ${NNDEPLOY_THIRD_PARTY_LIBRARY} "/System/Library/Frameworks/CoreVideo.framework") +endif() \ No newline at end of file diff --git a/cmake/nndeploy.cmake b/cmake/nndeploy.cmake index f7473118..33998de3 100644 --- a/cmake/nndeploy.cmake +++ b/cmake/nndeploy.cmake @@ -1,5 +1,6 @@ set(NNDEPLOY_THIRD_PARTY_LIBRARY_PATH_SUFFIX lib) + if(SYSTEM.Android) list(APPEND NNDEPLOY_SYSTEM_LIBRARY log) set(NNDEPLOY_THIRD_PARTY_LIBRARY_PATH_SUFFIX ${ANDROID_ABI}) @@ -10,52 +11,63 @@ elseif(SYSTEM.iOS) elseif(SYSTEM.Windows) endif() -#################### common #################### -## OpenCV +# ################### common #################### +# # OpenCV include("${ROOT_PATH}/cmake/opencv.cmake") -#################### common #################### -#################### base #################### -#################### base #################### +# ################### common #################### + +# ################### base #################### +# ################### base #################### -#################### thread #################### -#################### thread #################### +# ################### thread #################### +# ################### thread #################### -#################### cryption #################### -#################### cryption #################### +# ################### cryption #################### +# ################### cryption #################### -#################### device #################### -## CUDA & CUDNN +# ################### device #################### +# # CUDA & CUDNN include("${ROOT_PATH}/cmake/cuda.cmake") -#################### device #################### -#################### op #################### -#################### op #################### +# ################### device #################### + +# ################### op #################### +# ################### op #################### -#################### forward #################### -#################### forward #################### +# ################### forward #################### +# ################### forward #################### -#################### inference #################### -## MNN +# ################### inference #################### +# # MNN include("${ROOT_PATH}/cmake/mnn.cmake") -## tensorrt + +# # tensorrt include("${ROOT_PATH}/cmake/tensorrt.cmake") -## onnxruntime + +# # onnxruntime include("${ROOT_PATH}/cmake/onnxruntime.cmake") -## tnn + +# # tnn include("${ROOT_PATH}/cmake/tnn.cmake") -## openvino + +# # openvino include("${ROOT_PATH}/cmake/openvino.cmake") -## ncnn + +# # ncnn include("${ROOT_PATH}/cmake/ncnn.cmake") -## paddle-lite + +# # coreml +include("${ROOT_PATH}/cmake/coreml.cmake") + +# # paddle-lite include("${ROOT_PATH}/cmake/paddlelite.cmake") -#################### inference #################### -#################### pipeline #################### -#################### pipeline #################### +# ################### inference #################### -#################### model #################### -#################### model #################### +# ################### pipeline #################### +# ################### pipeline #################### +# ################### model #################### +# ################### model #################### message(STATUS "NNDEPLOY_THIRD_PARTY_LIBRARY: ${NNDEPLOY_THIRD_PARTY_LIBRARY}") \ No newline at end of file diff --git a/include/nndeploy/base/common.h b/include/nndeploy/base/common.h index 88b2a5e8..bb99bb25 100644 --- a/include/nndeploy/base/common.h +++ b/include/nndeploy/base/common.h @@ -67,6 +67,7 @@ enum DeviceTypeCode : int { kDeviceTypeCodeOpenGL, kDeviceTypeCodeMetal, kDeviceTypeCodeVulkan, + kDeviceTypeCodeNpu, // not sopport kDeviceTypeCodeNotSupport, diff --git a/include/nndeploy/inference/coreml/coreml_convert.h b/include/nndeploy/inference/coreml/coreml_convert.h new file mode 100644 index 00000000..0df46bdc --- /dev/null +++ b/include/nndeploy/inference/coreml/coreml_convert.h @@ -0,0 +1,44 @@ + +#ifndef _NNDEPLOY_INFERENCE_COREML_COREML_CONVERT_H_ +#define _NNDEPLOY_INFERENCE_COREML_COREML_CONVERT_H_ + +#include "nndeploy/base/common.h" +#include "nndeploy/base/file.h" +#include "nndeploy/base/glic_stl_include.h" +#include "nndeploy/base/log.h" +#include "nndeploy/base/macro.h" +#include "nndeploy/base/object.h" +#include "nndeploy/base/status.h" +#include "nndeploy/device/device.h" +#include "nndeploy/device/tensor.h" +#include "nndeploy/inference/coreml/coreml_include.h" +#include "nndeploy/inference/coreml/coreml_inference_param.h" +#include "nndeploy/inference/inference_param.h" + +namespace nndeploy { +namespace inference { + +class CoremlConvert { + public: + // TODO: these two functions are for buffer type kind data + static base::DataType convertToDataType(const OSType &src); + static OSType convertFromDataType(const base::DataType &src); + + static base::DataFormat convertToDataFormat(const MLFeatureDescription &src); + + static MLFeatureDescription *convertFromDataFormat(const base::DataFormat &src); + // You need to free it manually + static NSObject *convertFromDeviceType(const base::DeviceType &src); + + static device::Tensor *convertToTensor(MLFeatureValue *src, std::string name, + device::Device *device); + static MLFeatureValue *convertFromTensor(device::Tensor *src); + + static base::Status convertFromInferenceParam(CoremlInferenceParam *src, + MLModelConfiguration *dst); +}; + +} // namespace inference +} // namespace nndeploy + +#endif diff --git a/include/nndeploy/inference/coreml/coreml_include.h b/include/nndeploy/inference/coreml/coreml_include.h new file mode 100644 index 00000000..9d8a3d89 --- /dev/null +++ b/include/nndeploy/inference/coreml/coreml_include.h @@ -0,0 +1,7 @@ + +#ifndef _NNDEPLOY_INFERENCE_COREML_COREML_INCLUDE_H_ +#define _NNDEPLOY_INFERENCE_COREML_COREML_INCLUDE_H_ +#import +#import +#import +#endif diff --git a/include/nndeploy/inference/coreml/coreml_inference.h b/include/nndeploy/inference/coreml/coreml_inference.h new file mode 100644 index 00000000..d1f0ee28 --- /dev/null +++ b/include/nndeploy/inference/coreml/coreml_inference.h @@ -0,0 +1,58 @@ + +#ifndef _NNDEPLOY_INFERENCE_COREML_COREML_INFERENCE_H_ +#define _NNDEPLOY_INFERENCE_COREML_COREML_INFERENCE_H_ + +#include "nndeploy/base/common.h" +#include "nndeploy/base/log.h" +#include "nndeploy/base/macro.h" +#include "nndeploy/base/object.h" +#include "nndeploy/base/shape.h" +#include "nndeploy/base/status.h" +#include "nndeploy/base/value.h" +#include "nndeploy/device/device.h" +#include "nndeploy/device/tensor.h" +#include "nndeploy/inference/coreml/coreml_convert.h" +#include "nndeploy/inference/coreml/coreml_include.h" +#include "nndeploy/inference/coreml/coreml_inference_param.h" +#include "nndeploy/inference/inference.h" +#include "nndeploy/inference/inference_param.h" + +namespace nndeploy { +namespace inference { + +#define CHECK_ERR(err) \ + if (err) NSLog(@"error: %@", err); + +class CoremlInference : public Inference { + public: + CoremlInference(base::InferenceType type); + virtual ~CoremlInference(); + + virtual base::Status init(); + virtual base::Status deinit(); + + virtual base::Status reshape(base::ShapeMap &shape_map); + + virtual int64_t getMemorySize(); + + virtual float getGFLOPs(); + + virtual device::TensorDesc getInputTensorAlignDesc(const std::string &name); + virtual device::TensorDesc getOutputTensorAlignDesc(const std::string &name); + + virtual base::Status run(); + + private: + base::Status allocateInputOutputTensor(); + base::Status deallocateInputOutputTensor(); + MLModel *mlmodel_ = nullptr; + NSError *err_ = nil; + MLModelConfiguration *config_ = nullptr; + NSDictionary *dict_ = nullptr; + NSDictionary *result_ = nullptr; +}; + +} // namespace inference +} // namespace nndeploy + +#endif diff --git a/include/nndeploy/inference/coreml/coreml_inference_param.h b/include/nndeploy/inference/coreml/coreml_inference_param.h new file mode 100644 index 00000000..76e026d3 --- /dev/null +++ b/include/nndeploy/inference/coreml/coreml_inference_param.h @@ -0,0 +1,42 @@ + +#ifndef _NNDEPLOY_INFERENCE_COREML_COREML_INFERENCE_PARAM_H_ +#define _NNDEPLOY_INFERENCE_COREML_COREML_INFERENCE_PARAM_H_ + +#include "nndeploy/device/device.h" +#include "nndeploy/inference/coreml/coreml_include.h" +#include "nndeploy/inference/inference_param.h" + +namespace nndeploy { +namespace inference { + +class CoremlInferenceParam : public InferenceParam { + public: + CoremlInferenceParam(); + virtual ~CoremlInferenceParam(); + + CoremlInferenceParam(const CoremlInferenceParam ¶m) = default; + CoremlInferenceParam &operator=(const CoremlInferenceParam ¶m) = default; + + PARAM_COPY(CoremlInferenceParam) + PARAM_COPY_TO(CoremlInferenceParam) + + virtual base::Status parse(const std::string &json, bool is_path = true); + virtual base::Status set(const std::string &key, base::Value &value); + virtual base::Status get(const std::string &key, base::Value &value); + + /// @brief A Boolean value that determines whether to allow low-precision + /// accumulation on a GPU. + bool low_precision_acceleration_ = false; + enum inferenceUnits { + ALL_UNITS = 0, + CPU_ONLY = 1, + CPU_AND_GPU = 2, + CPU_AND_NPU + }; + inferenceUnits inference_units_ = CPU_ONLY; +}; + +} // namespace inference +} // namespace nndeploy + +#endif diff --git a/source/nndeploy/inference/coreml/coreml_convert.mm b/source/nndeploy/inference/coreml/coreml_convert.mm new file mode 100644 index 00000000..32e365b1 --- /dev/null +++ b/source/nndeploy/inference/coreml/coreml_convert.mm @@ -0,0 +1,70 @@ + +#include "nndeploy/inference/coreml/coreml_convert.h" + +namespace nndeploy { +namespace inference { + +base::DataType CoremlConvert::convertToDataType(const OSType &src) { + base::DataType dst; + switch (src) { + case kCVPixelFormatType_OneComponent8: + case kCVPixelFormatType_32BGRA: + case kCVPixelFormatType_32RGBA: + dst.code_ = base::kDataTypeCodeInt; + dst.bits_ = 8; + break; + default: + break; + } + return dst; +} + +NSObject *CoremlConvert::convertFromDeviceType(const base::DeviceType &src) { + NSObject *type = nil; + switch (src.code_) { + case base::kDeviceTypeCodeCpu: + case base::kDeviceTypeCodeX86: + case base::kDeviceTypeCodeArm: + case base::kDeviceTypeCodeOpenCL: + type = reinterpret_cast(new MLCPUComputeDevice()); + break; + case base::kDeviceTypeCodeOpenGL: + case base::kDeviceTypeCodeMetal: + case base::kDeviceTypeCodeCuda: + case base::kDeviceTypeCodeVulkan: + type = reinterpret_cast(new MLGPUComputeDevice()); + break; + case base::kDeviceTypeCodeNpu: + type = reinterpret_cast(new MLNeuralEngineComputeDevice()); + break; + default: + type = reinterpret_cast(new MLCPUComputeDevice()); + } + return type; +} + +base::Status CoremlConvert::convertFromInferenceParam( + CoremlInferenceParam *src, MLModelConfiguration *dst) { + dst.allowLowPrecisionAccumulationOnGPU = src->low_precision_acceleration_; + switch (src->inference_units_) { + case CoremlInferenceParam::inferenceUnits::ALL_UNITS: + dst.computeUnits = MLComputeUnitsAll; + break; + case CoremlInferenceParam::inferenceUnits::CPU_ONLY: + dst.computeUnits = MLComputeUnitsCPUOnly; + break; + case CoremlInferenceParam::inferenceUnits::CPU_AND_GPU: + dst.computeUnits = MLComputeUnitsCPUAndGPU; + break; + case CoremlInferenceParam::inferenceUnits::CPU_AND_NPU: + dst.computeUnits = MLComputeUnitsCPUAndNeuralEngine; + break; + default: + dst.computeUnits = MLComputeUnitsCPUOnly; + break; + } + return base::kStatusCodeOk; +} + +} // namespace inference +} // namespace nndeploy diff --git a/source/nndeploy/inference/coreml/coreml_inference.mm b/source/nndeploy/inference/coreml/coreml_inference.mm new file mode 100644 index 00000000..3e765353 --- /dev/null +++ b/source/nndeploy/inference/coreml/coreml_inference.mm @@ -0,0 +1,227 @@ + +#include "nndeploy/inference/coreml/coreml_inference.h" + +namespace nndeploy { +namespace inference { + +TypeInferenceRegister> + g_mnn_inference_register(base::kInferenceTypeCoreML); + +CoremlInference::CoremlInference(base::InferenceType type) : Inference(type) {} +CoremlInference::~CoremlInference() {} + +base::Status CoremlInference::init() { + base::Status status = base::kStatusCodeOk; + + if (device::isHostDeviceType(inference_param_->device_type_)) { + is_share_command_queue_ = true; + } else { + is_share_command_queue_ = false; + } + + CoremlInferenceParam *coreml_inference_param = + dynamic_cast(inference_param_); + config_ = [MLModelConfiguration alloc]; + + CoremlConvert::convertFromInferenceParam(coreml_inference_param, config_); + + if (inference_param_->is_path_) { + NSURL *model_path = [NSURL + fileURLWithPath:[NSString + stringWithCString:inference_param_->model_value_[0] + .c_str() encoding:NSASCIIStringEncoding]]; + mlmodel_ = [MLModel modelWithContentsOfURL:model_path + configuration:config_ + error:&err_]; + CHECK_ERR(err_); + } else { + NNDEPLOY_LOGI("You will load model from memory\n"); + // TODO: APPLE only support load model when running so not here + + // [MLModel loadModelAsset:[MLModelAsset + // modelAssetWithSpecificationData:[NSData dataWithBytesNoCopy:(void + // *)inference_param_->model_value_[0].c_str() + // length:inference_param_->model_value_[0].length()] error:&err] + // configuration:config completionHandler:(mlmodel, err){ + // + // }]; + } + status = allocateInputOutputTensor(); + return status; +} + +base::Status CoremlInference::deinit() { + base::Status status = deallocateInputOutputTensor(); + if (mlmodel_) { + [mlmodel_ dealloc]; + } + if (config_) { + [config_ dealloc]; + } + return status; +} + +base::Status CoremlInference::reshape(base::ShapeMap &shape_map) { + return base::kStatusCodeOk; +} + +int64_t CoremlInference::getMemorySize() { return 0; } + +float CoremlInference::getGFLOPs() { return 1000.f; } + +device::TensorDesc CoremlInference::getInputTensorAlignDesc( + const std::string &name) { + if (input_tensors_.count(name) > 0) { + device::TensorDesc desc = input_tensors_[name]->getDesc(); + if (desc.shape_.size() == 5) { + if (desc.data_format_ != base::kDataFormatNCDHW && + desc.data_format_ != base::kDataFormatNDHWC) { + desc.data_format_ = base::kDataFormatNCDHW; + } + } else if (desc.shape_.size() == 4) { + if (desc.data_format_ != base::kDataFormatNHWC && + desc.data_format_ != base::kDataFormatNCHW) { + desc.data_format_ = base::kDataFormatNCHW; + } + } else if (desc.shape_.size() == 3) { + if (desc.data_format_ != base::kDataFormatNHW && + desc.data_format_ != base::kDataFormatNWC && + desc.data_format_ != base::kDataFormatNCW) { + desc.data_format_ = base::kDataFormatNHW; + } + } else if (desc.shape_.size() == 2) { + if (desc.data_format_ != base::kDataFormatNC) { + desc.data_format_ = base::kDataFormatNC; + } + } else if (desc.shape_.size() == 1) { + if (desc.data_format_ != base::kDataFormatN) { + desc.data_format_ = base::kDataFormatN; + } + } else { + desc.data_format_ = base::kDataFormatNotSupport; + } + return desc; + } else { + return device::TensorDesc(); + } +} + +device::TensorDesc CoremlInference::getOutputTensorAlignDesc( + const std::string &name) { + if (output_tensors_.count(name) > 0) { + device::TensorDesc desc = output_tensors_[name]->getDesc(); + if (desc.shape_.size() == 5) { + if (desc.data_format_ != base::kDataFormatNCDHW && + desc.data_format_ != base::kDataFormatNDHWC) { + desc.data_format_ = base::kDataFormatNCDHW; + } + } else if (desc.shape_.size() == 4) { + if (desc.data_format_ != base::kDataFormatNHWC && + desc.data_format_ != base::kDataFormatNCHW) { + desc.data_format_ = base::kDataFormatNCHW; + } + } else if (desc.shape_.size() == 3) { + if (desc.data_format_ != base::kDataFormatNHW && + desc.data_format_ != base::kDataFormatNWC && + desc.data_format_ != base::kDataFormatNCW) { + desc.data_format_ = base::kDataFormatNHW; + } + } else if (desc.shape_.size() == 2) { + if (desc.data_format_ != base::kDataFormatNC) { + desc.data_format_ = base::kDataFormatNC; + } + } else if (desc.shape_.size() == 1) { + if (desc.data_format_ != base::kDataFormatN) { + desc.data_format_ = base::kDataFormatN; + } + } else { + desc.data_format_ = base::kDataFormatNotSupport; + } + return desc; + } else { + return device::TensorDesc(); + } +} + +base::Status CoremlInference::run() { + if (dict_ == nil) { + dict_ = [NSDictionary alloc]; + } + for (auto iter : external_input_tensors_) { + CVPixelBufferRef photodata = NULL; + int width = iter.second->getWidth(); + int height = iter.second->getHeight(); + int stride = iter.second->getStride()[0]; + OSType pixelFormat = kCVPixelFormatType_OneComponent8; + CVReturn status = CVPixelBufferCreateWithBytes( + kCFAllocatorDefault, width, height, pixelFormat, iter.second->getPtr(), + stride, NULL, NULL, NULL, &photodata); + if (status != 0) { + NNDEPLOY_LOGE("Tensor create failed"); + } + auto input_data = [MLFeatureValue featureValueWithPixelBuffer:photodata]; + [dict_ setValue:input_data + forKey:[NSString stringWithCString:iter.first.c_str() encoding:NSASCIIStringEncoding]]; + } + MLDictionaryFeatureProvider *provider = + [[MLDictionaryFeatureProvider alloc] initWithDictionary:dict_ + error:&err_]; + NSDictionary* res = [[mlmodel_ predictionFromFeatures:provider error:&err_] dictionary]; + for (auto iter : external_output_tensors_) { + MLFeatureValue *value = res[[NSString stringWithCString:iter.first.c_str() encoding:NSASCIIStringEncoding]]; + [&](void *&& data) -> void *& {return data;}(iter.second->getPtr()) = CVPixelBufferGetBaseAddress([value imageBufferValue]); + } + return base::kStatusCodeOk; +} + +base::Status CoremlInference::allocateInputOutputTensor() { + device::Device *device = nullptr; + if (device::isHostDeviceType(inference_param_->device_type_)) { + device = device::getDevice(inference_param_->device_type_); + } + MLModelDescription *model_description = [mlmodel_ modelDescription]; + NSDictionary *model_input_feature = [model_description inputDescriptionsByName]; + for (NSString * iter in model_input_feature) { + std::string name([iter cStringUsingEncoding:NSASCIIStringEncoding]); + MLFeatureDescription *attr = model_input_feature[iter]; + MLImageConstraint *constraint = [attr imageConstraint]; + base::DataType data_type = CoremlConvert::convertToDataType([constraint pixelFormatType]); + base::DataFormat data_fmt = base::kDataFormatAuto; + base::IntVector shape = {(int)constraint.pixelsHigh, (int)constraint.pixelsWide}; + base::SizeVector stride = base::SizeVector(); + device::TensorDesc desc(data_type, data_fmt, shape, stride); + device::Tensor *dst = nullptr; + dst = new device::Tensor(desc, name); + input_tensors_.insert({name, dst}); + } + NSDictionary *model_output_feature = [model_description outputDescriptionsByName]; + for (NSString * iter in model_output_feature) { + std::string name([iter cStringUsingEncoding:NSASCIIStringEncoding]); + MLFeatureDescription *attr = model_output_feature[iter]; + MLImageConstraint *constraint = [attr imageConstraint]; + base::DataType data_type = CoremlConvert::convertToDataType([constraint pixelFormatType]); + base::DataFormat data_fmt = base::kDataFormatAuto; + base::IntVector shape = {(int)constraint.pixelsHigh, (int)constraint.pixelsWide}; + base::SizeVector stride = base::SizeVector(); + device::TensorDesc desc(data_type, data_fmt, shape, stride); + device::Tensor *dst = nullptr; + dst = new device::Tensor(desc, name); + output_tensors_.insert({name, dst}); + } + return base::kStatusCodeOk; +} + +base::Status CoremlInference::deallocateInputOutputTensor() { + for (auto iter : input_tensors_) { + delete iter.second; + } + input_tensors_.clear(); + for (auto iter : output_tensors_) { + delete iter.second; + } + output_tensors_.clear(); + return base::kStatusCodeOk; +} + +} // namespace inference +} // namespace nndeploy diff --git a/source/nndeploy/inference/coreml/coreml_inference_param.mm b/source/nndeploy/inference/coreml/coreml_inference_param.mm new file mode 100644 index 00000000..961c1a37 --- /dev/null +++ b/source/nndeploy/inference/coreml/coreml_inference_param.mm @@ -0,0 +1,41 @@ + +#include "nndeploy/inference/coreml/coreml_inference_param.h" + +namespace nndeploy { +namespace inference { + +static TypeInferenceParamRegister< + TypeInferenceParamCreator> + g_coreml_inference_param_register(base::kInferenceTypeCoreML); + +CoremlInferenceParam::CoremlInferenceParam() : InferenceParam() { + model_type_ = base::kModelTypeCoreML; + // device_type_ = device::getDefaultHostDeviceType(); + // num_thread_ = 4; +} + +CoremlInferenceParam::~CoremlInferenceParam() {} + +base::Status CoremlInferenceParam::parse(const std::string &json, + bool is_path) { + std::string json_content = ""; + base::Status status = InferenceParam::parse(json_content, false); + NNDEPLOY_RETURN_ON_NEQ(status, base::kStatusCodeOk, "parse json failed!"); + + return base::kStatusCodeOk; +} + +base::Status CoremlInferenceParam::set(const std::string &key, + base::Value &value) { + base::Status status = base::kStatusCodeOk; + return base::kStatusCodeOk; +} + +base::Status CoremlInferenceParam::get(const std::string &key, + base::Value &value) { + base::Status status = base::kStatusCodeOk; + return base::kStatusCodeOk; +} + +} // namespace inference +} // namespace nndeploy From 19d767bd90f40f71a485e192802cabde59a85f41 Mon Sep 17 00:00:00 2001 From: JoDio <49283499+JoDio-zd@users.noreply.github.com> Date: Tue, 3 Oct 2023 15:25:45 +0800 Subject: [PATCH 2/2] feat: get to run successfully problem: still need to test on yolo5 mlmodel improve: need to add more type support in coreml --- .../inference/coreml/coreml_convert.h | 4 +- .../inference/coreml/coreml_inference.h | 4 +- .../inference/coreml/coreml_convert.mm | 32 ++++++++++++++++ .../inference/coreml/coreml_inference.mm | 37 +++++-------------- 4 files changed, 46 insertions(+), 31 deletions(-) diff --git a/include/nndeploy/inference/coreml/coreml_convert.h b/include/nndeploy/inference/coreml/coreml_convert.h index 0df46bdc..255efdb7 100644 --- a/include/nndeploy/inference/coreml/coreml_convert.h +++ b/include/nndeploy/inference/coreml/coreml_convert.h @@ -30,9 +30,9 @@ class CoremlConvert { // You need to free it manually static NSObject *convertFromDeviceType(const base::DeviceType &src); - static device::Tensor *convertToTensor(MLFeatureValue *src, std::string name, + static device::Tensor *convertToTensor(MLFeatureDescription *src, NSString *name, device::Device *device); - static MLFeatureValue *convertFromTensor(device::Tensor *src); + static MLFeatureDescription *convertFromTensor(device::Tensor *src); static base::Status convertFromInferenceParam(CoremlInferenceParam *src, MLModelConfiguration *dst); diff --git a/include/nndeploy/inference/coreml/coreml_inference.h b/include/nndeploy/inference/coreml/coreml_inference.h index d1f0ee28..91710a7a 100644 --- a/include/nndeploy/inference/coreml/coreml_inference.h +++ b/include/nndeploy/inference/coreml/coreml_inference.h @@ -48,8 +48,8 @@ class CoremlInference : public Inference { MLModel *mlmodel_ = nullptr; NSError *err_ = nil; MLModelConfiguration *config_ = nullptr; - NSDictionary *dict_ = nullptr; - NSDictionary *result_ = nullptr; + NSMutableDictionary *dict_ = nullptr; + NSMutableDictionary *result_ = nullptr; }; } // namespace inference diff --git a/source/nndeploy/inference/coreml/coreml_convert.mm b/source/nndeploy/inference/coreml/coreml_convert.mm index 32e365b1..631b4673 100644 --- a/source/nndeploy/inference/coreml/coreml_convert.mm +++ b/source/nndeploy/inference/coreml/coreml_convert.mm @@ -66,5 +66,37 @@ return base::kStatusCodeOk; } +device::Tensor *CoremlConvert::convertToTensor(MLFeatureDescription *src, NSString *name, + device::Device *device) { + MLFeatureType tensor_type = [src type]; + device::Tensor *dst = nullptr; + device::TensorDesc desc; + switch (tensor_type) { + case MLFeatureTypeImage: + { + MLImageConstraint *image_attr = [src imageConstraint]; + base::DataType data_type = CoremlConvert::convertToDataType([image_attr pixelFormatType]); + base::DataFormat format = base::kDataFormatNHWC; + base::IntVector shape = {1, int([image_attr pixelsHigh]), int ([image_attr pixelsWide]), 3}; + base::SizeVector stride = base::SizeVector(); + desc = device::TensorDesc(data_type, format, shape, stride); + break; + } + case MLFeatureTypeDouble: + { + base::DataType data_type = base::DataType(); + base::DataFormat format = base::kDataFormatN; + base::IntVector shape = {1}; + base::SizeVector stride = base::SizeVector(); + desc = device::TensorDesc(data_type, format, shape, stride); + break; + } + default: + break; + } + dst = new device::Tensor(desc, std::string([name cStringUsingEncoding:NSASCIIStringEncoding])); + return dst; +} + } // namespace inference } // namespace nndeploy diff --git a/source/nndeploy/inference/coreml/coreml_inference.mm b/source/nndeploy/inference/coreml/coreml_inference.mm index 3e765353..9ce3ee18 100644 --- a/source/nndeploy/inference/coreml/coreml_inference.mm +++ b/source/nndeploy/inference/coreml/coreml_inference.mm @@ -145,13 +145,13 @@ base::Status CoremlInference::run() { if (dict_ == nil) { - dict_ = [NSDictionary alloc]; + dict_ = [[NSMutableDictionary alloc] init]; } for (auto iter : external_input_tensors_) { CVPixelBufferRef photodata = NULL; int width = iter.second->getWidth(); int height = iter.second->getHeight(); - int stride = iter.second->getStride()[0]; + int stride = width; OSType pixelFormat = kCVPixelFormatType_OneComponent8; CVReturn status = CVPixelBufferCreateWithBytes( kCFAllocatorDefault, width, height, pixelFormat, iter.second->getPtr(), @@ -159,9 +159,9 @@ if (status != 0) { NNDEPLOY_LOGE("Tensor create failed"); } - auto input_data = [MLFeatureValue featureValueWithPixelBuffer:photodata]; - [dict_ setValue:input_data - forKey:[NSString stringWithCString:iter.first.c_str() encoding:NSASCIIStringEncoding]]; + MLFeatureValue* input_data = [MLFeatureValue featureValueWithPixelBuffer:photodata]; + [dict_ setObject:[NSString stringWithCString:iter.first.c_str() encoding:NSASCIIStringEncoding] + forKey:input_data]; } MLDictionaryFeatureProvider *provider = [[MLDictionaryFeatureProvider alloc] initWithDictionary:dict_ @@ -182,31 +182,14 @@ MLModelDescription *model_description = [mlmodel_ modelDescription]; NSDictionary *model_input_feature = [model_description inputDescriptionsByName]; for (NSString * iter in model_input_feature) { - std::string name([iter cStringUsingEncoding:NSASCIIStringEncoding]); - MLFeatureDescription *attr = model_input_feature[iter]; - MLImageConstraint *constraint = [attr imageConstraint]; - base::DataType data_type = CoremlConvert::convertToDataType([constraint pixelFormatType]); - base::DataFormat data_fmt = base::kDataFormatAuto; - base::IntVector shape = {(int)constraint.pixelsHigh, (int)constraint.pixelsWide}; - base::SizeVector stride = base::SizeVector(); - device::TensorDesc desc(data_type, data_fmt, shape, stride); - device::Tensor *dst = nullptr; - dst = new device::Tensor(desc, name); - input_tensors_.insert({name, dst}); + device::Tensor *input_tensor = + CoremlConvert::convertToTensor(model_input_feature[iter], iter, device); + input_tensors_.insert({std::string([iter cStringUsingEncoding:NSASCIIStringEncoding]), input_tensor}); } NSDictionary *model_output_feature = [model_description outputDescriptionsByName]; for (NSString * iter in model_output_feature) { - std::string name([iter cStringUsingEncoding:NSASCIIStringEncoding]); - MLFeatureDescription *attr = model_output_feature[iter]; - MLImageConstraint *constraint = [attr imageConstraint]; - base::DataType data_type = CoremlConvert::convertToDataType([constraint pixelFormatType]); - base::DataFormat data_fmt = base::kDataFormatAuto; - base::IntVector shape = {(int)constraint.pixelsHigh, (int)constraint.pixelsWide}; - base::SizeVector stride = base::SizeVector(); - device::TensorDesc desc(data_type, data_fmt, shape, stride); - device::Tensor *dst = nullptr; - dst = new device::Tensor(desc, name); - output_tensors_.insert({name, dst}); + device::Tensor *dst = CoremlConvert::convertToTensor(model_input_feature[iter], iter, device); + output_tensors_.insert({std::string([iter cStringUsingEncoding:NSASCIIStringEncoding]), dst}); } return base::kStatusCodeOk; }