Skip to content

Commit

Permalink
Merge pull request #22 from JoDio-zd/main
Browse files Browse the repository at this point in the history
feat: add coreml
  • Loading branch information
Alwaysssssss committed Oct 3, 2023
2 parents 0a212c0 + 19d767b commit 84b047a
Show file tree
Hide file tree
Showing 12 changed files with 587 additions and 42 deletions.
11 changes: 11 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/config.cmake)
include(${CMAKE_CURRENT_BINARY_DIR}/config.cmake)
endif()

# you must have return sentence for un-void function
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror=return-type")

# common
nndeploy_option(ENABLE_NNDEPLOY_BUILD_SHARED "ENABLE_NNDEPLOY_BUILD_SHARED" ON)
nndeploy_option(ENABLE_NNDEPLOY_SYMBOL_HIDE "ENABLE_NNDEPLOY_SYMBOL_HIDE" OFF)
Expand Down Expand Up @@ -338,6 +341,14 @@ if(ENABLE_NNDEPLOY_INFERENCE)
)
set(INFERENCE_SOURCE ${INFERENCE_SOURCE} ${INFERENCE_OPENVINO_SOURCE})
endif()
if (ENABLE_NNDEPLOY_INFERENCE_COREML)
file(GLOB_RECURSE INFERENCE_COREML_SOURCE
"${ROOT_PATH}/include/nndeploy/inference/coreml/*.h"
"${ROOT_PATH}/source/nndeploy/inference/coreml/*.cc"
"${ROOT_PATH}/source/nndeploy/inference/coreml/*.mm"
)
set(INFERENCE_SOURCE ${INFERENCE_SOURCE} ${INFERENCE_COREML_SOURCE})
endif()
if (ENABLE_NNDEPLOY_INFERENCE_ONNXRUNTIME)
file(GLOB_RECURSE INFERENCE_ONNXRUNTIME_SOURCE
"${ROOT_PATH}/include/nndeploy/inference/onnxruntime/*.h"
Expand Down
36 changes: 23 additions & 13 deletions cmake/config_m1.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,17 @@ set(ENABLE_NNDEPLOY_DOCS OFF)
set(ENABLE_NNDEPLOY_TIME_PROFILER ON)
set(ENABLE_NNDEPLOY_OPENCV "/Users/jodio/Documents/GitHub/opencv/build/forNN") # must be ON or PATH/TO/OPENCV
set(NNDEPLOY_OPENCV_LIBS "opencv_imgproc" "opencv_core" "opencv_imgcodecs")
## base

# # base
set(ENABLE_NNDEPLOY_BASE ON)
## thread

# # thread
set(ENABLE_NNDEPLOY_THREAD_POOL ON)
## cryption

# # cryption
set(ENABLE_NNDEPLOY_CRYPTION OFF)
## device

# # device
set(ENABLE_NNDEPLOY_DEVICE ON)
set(ENABLE_NNDEPLOY_DEVICE_CPU OFF)
set(ENABLE_NNDEPLOY_DEVICE_ARM ON)
Expand All @@ -44,33 +48,39 @@ set(ENABLE_NNDEPLOY_DEVICE_METAL OFF)
set(ENABLE_NNDEPLOY_DEVICE_APPLE_NPU OFF)
set(ENABLE_NNDEPLOY_DEVICE_HVX OFF)
set(ENABLE_NNDEPLOY_DEVICE_MTK_VPU OFF)
## op

# # op
set(ENABLE_NNDEPLOY_OP OFF)
set(ENABLE_NNDEPLOY_OP_NN OFF)
set(ENABLE_NNDEPLOY_OP_CV OFF)
set(ENABLE_NNDEPLOY_OP_AUDIO OFF)
## forward

# # forward
set(ENABLE_NNDEPLOY_FORWARD OFF)
## inference

# # inference
set(ENABLE_NNDEPLOY_INFERENCE ON)
set(ENABLE_NNDEPLOY_INFERENCE_TENSORRT OFF)
set(ENABLE_NNDEPLOY_INFERENCE_OPENVINO OFF)
set(ENABLE_NNDEPLOY_INFERENCE_COREML OFF)
set(ENABLE_NNDEPLOY_INFERENCE_COREML ON)
set(ENABLE_NNDEPLOY_INFERENCE_TFLITE OFF)
set(ENABLE_NNDEPLOY_INFERENCE_ONNXRUNTIME OFF)
set(ENABLE_NNDEPLOY_INFERENCE_NCNN OFF)
set(ENABLE_NNDEPLOY_INFERENCE_TNN OFF)
set(ENABLE_NNDEPLOY_INFERENCE_MNN "/Users/jodio/project/mnn/MNN/build/install")
set(ENABLE_NNDEPLOY_INFERENCE_MNN OFF)
set(ENABLE_NNDEPLOY_INFERENCE_PADDLELITE OFF)
set(ENABLE_NNDEPLOY_AICOMPILER_TVM OFF)
## model

# # model
set(ENABLE_NNDEPLOY_MODEL ON)
## test

# # test
set(ENABLE_NNDEPLOY_TEST OFF)
## demo

# # demo
set(ENABLE_NNDEPLOY_DEMO ON)

## model detect
# # model detect
set(ENABLE_NNDEPLOY_MODEL_DETECT ON)
set(ENABLE_NNDEPLOY_MODEL_DETECT_DETR OFF)
set(ENABLE_NNDEPLOY_MODEL_DETECT_YOLO ON)
7 changes: 7 additions & 0 deletions cmake/coreml.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
include(ExternalProject)

if (ENABLE_NNDEPLOY_INFERENCE_COREML STREQUAL "OFF")
else()
set(NNDEPLOY_THIRD_PARTY_LIBRARY ${NNDEPLOY_THIRD_PARTY_LIBRARY} "/System/Library/Frameworks/CoreML.framework")
set(NNDEPLOY_THIRD_PARTY_LIBRARY ${NNDEPLOY_THIRD_PARTY_LIBRARY} "/System/Library/Frameworks/CoreVideo.framework")
endif()
70 changes: 41 additions & 29 deletions cmake/nndeploy.cmake
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@

set(NNDEPLOY_THIRD_PARTY_LIBRARY_PATH_SUFFIX lib)

if(SYSTEM.Android)
list(APPEND NNDEPLOY_SYSTEM_LIBRARY log)
set(NNDEPLOY_THIRD_PARTY_LIBRARY_PATH_SUFFIX ${ANDROID_ABI})
Expand All @@ -10,52 +11,63 @@ elseif(SYSTEM.iOS)
elseif(SYSTEM.Windows)
endif()

#################### common ####################
## OpenCV
# ################### common ####################
# # OpenCV
include("${ROOT_PATH}/cmake/opencv.cmake")
#################### common ####################

#################### base ####################
#################### base ####################
# ################### common ####################

# ################### base ####################
# ################### base ####################

#################### thread ####################
#################### thread ####################
# ################### thread ####################
# ################### thread ####################

#################### cryption ####################
#################### cryption ####################
# ################### cryption ####################
# ################### cryption ####################

#################### device ####################
## CUDA & CUDNN
# ################### device ####################
# # CUDA & CUDNN
include("${ROOT_PATH}/cmake/cuda.cmake")
#################### device ####################

#################### op ####################
#################### op ####################
# ################### device ####################

# ################### op ####################
# ################### op ####################

#################### forward ####################
#################### forward ####################
# ################### forward ####################
# ################### forward ####################

#################### inference ####################
## MNN
# ################### inference ####################
# # MNN
include("${ROOT_PATH}/cmake/mnn.cmake")
## tensorrt

# # tensorrt
include("${ROOT_PATH}/cmake/tensorrt.cmake")
## onnxruntime

# # onnxruntime
include("${ROOT_PATH}/cmake/onnxruntime.cmake")
## tnn

# # tnn
include("${ROOT_PATH}/cmake/tnn.cmake")
## openvino

# # openvino
include("${ROOT_PATH}/cmake/openvino.cmake")
## ncnn

# # ncnn
include("${ROOT_PATH}/cmake/ncnn.cmake")
## paddle-lite

# # coreml
include("${ROOT_PATH}/cmake/coreml.cmake")

# # paddle-lite
include("${ROOT_PATH}/cmake/paddlelite.cmake")
#################### inference ####################

#################### pipeline ####################
#################### pipeline ####################
# ################### inference ####################

#################### model ####################
#################### model ####################
# ################### pipeline ####################
# ################### pipeline ####################

# ################### model ####################
# ################### model ####################
message(STATUS "NNDEPLOY_THIRD_PARTY_LIBRARY: ${NNDEPLOY_THIRD_PARTY_LIBRARY}")
1 change: 1 addition & 0 deletions include/nndeploy/base/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ enum DeviceTypeCode : int {
kDeviceTypeCodeOpenGL,
kDeviceTypeCodeMetal,
kDeviceTypeCodeVulkan,
kDeviceTypeCodeNpu,

// not sopport
kDeviceTypeCodeNotSupport,
Expand Down
44 changes: 44 additions & 0 deletions include/nndeploy/inference/coreml/coreml_convert.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@

#ifndef _NNDEPLOY_INFERENCE_COREML_COREML_CONVERT_H_
#define _NNDEPLOY_INFERENCE_COREML_COREML_CONVERT_H_

#include "nndeploy/base/common.h"
#include "nndeploy/base/file.h"
#include "nndeploy/base/glic_stl_include.h"
#include "nndeploy/base/log.h"
#include "nndeploy/base/macro.h"
#include "nndeploy/base/object.h"
#include "nndeploy/base/status.h"
#include "nndeploy/device/device.h"
#include "nndeploy/device/tensor.h"
#include "nndeploy/inference/coreml/coreml_include.h"
#include "nndeploy/inference/coreml/coreml_inference_param.h"
#include "nndeploy/inference/inference_param.h"

namespace nndeploy {
namespace inference {

class CoremlConvert {
public:
// TODO: these two functions are for buffer type kind data
static base::DataType convertToDataType(const OSType &src);
static OSType convertFromDataType(const base::DataType &src);

static base::DataFormat convertToDataFormat(const MLFeatureDescription &src);

static MLFeatureDescription *convertFromDataFormat(const base::DataFormat &src);
// You need to free it manually
static NSObject *convertFromDeviceType(const base::DeviceType &src);

static device::Tensor *convertToTensor(MLFeatureDescription *src, NSString *name,
device::Device *device);
static MLFeatureDescription *convertFromTensor(device::Tensor *src);

static base::Status convertFromInferenceParam(CoremlInferenceParam *src,
MLModelConfiguration *dst);
};

} // namespace inference
} // namespace nndeploy

#endif
7 changes: 7 additions & 0 deletions include/nndeploy/inference/coreml/coreml_include.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@

#ifndef _NNDEPLOY_INFERENCE_COREML_COREML_INCLUDE_H_
#define _NNDEPLOY_INFERENCE_COREML_COREML_INCLUDE_H_
#import <CoreML/CoreML.h>
#import <CoreServices/CoreServices.h>
#import <Foundation/Foundation.h>
#endif
58 changes: 58 additions & 0 deletions include/nndeploy/inference/coreml/coreml_inference.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@

#ifndef _NNDEPLOY_INFERENCE_COREML_COREML_INFERENCE_H_
#define _NNDEPLOY_INFERENCE_COREML_COREML_INFERENCE_H_

#include "nndeploy/base/common.h"
#include "nndeploy/base/log.h"
#include "nndeploy/base/macro.h"
#include "nndeploy/base/object.h"
#include "nndeploy/base/shape.h"
#include "nndeploy/base/status.h"
#include "nndeploy/base/value.h"
#include "nndeploy/device/device.h"
#include "nndeploy/device/tensor.h"
#include "nndeploy/inference/coreml/coreml_convert.h"
#include "nndeploy/inference/coreml/coreml_include.h"
#include "nndeploy/inference/coreml/coreml_inference_param.h"
#include "nndeploy/inference/inference.h"
#include "nndeploy/inference/inference_param.h"

namespace nndeploy {
namespace inference {

#define CHECK_ERR(err) \
if (err) NSLog(@"error: %@", err);

class CoremlInference : public Inference {
public:
CoremlInference(base::InferenceType type);
virtual ~CoremlInference();

virtual base::Status init();
virtual base::Status deinit();

virtual base::Status reshape(base::ShapeMap &shape_map);

virtual int64_t getMemorySize();

virtual float getGFLOPs();

virtual device::TensorDesc getInputTensorAlignDesc(const std::string &name);
virtual device::TensorDesc getOutputTensorAlignDesc(const std::string &name);

virtual base::Status run();

private:
base::Status allocateInputOutputTensor();
base::Status deallocateInputOutputTensor();
MLModel *mlmodel_ = nullptr;
NSError *err_ = nil;
MLModelConfiguration *config_ = nullptr;
NSMutableDictionary *dict_ = nullptr;
NSMutableDictionary *result_ = nullptr;
};

} // namespace inference
} // namespace nndeploy

#endif
42 changes: 42 additions & 0 deletions include/nndeploy/inference/coreml/coreml_inference_param.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@

#ifndef _NNDEPLOY_INFERENCE_COREML_COREML_INFERENCE_PARAM_H_
#define _NNDEPLOY_INFERENCE_COREML_COREML_INFERENCE_PARAM_H_

#include "nndeploy/device/device.h"
#include "nndeploy/inference/coreml/coreml_include.h"
#include "nndeploy/inference/inference_param.h"

namespace nndeploy {
namespace inference {

class CoremlInferenceParam : public InferenceParam {
public:
CoremlInferenceParam();
virtual ~CoremlInferenceParam();

CoremlInferenceParam(const CoremlInferenceParam &param) = default;
CoremlInferenceParam &operator=(const CoremlInferenceParam &param) = default;

PARAM_COPY(CoremlInferenceParam)
PARAM_COPY_TO(CoremlInferenceParam)

virtual base::Status parse(const std::string &json, bool is_path = true);
virtual base::Status set(const std::string &key, base::Value &value);
virtual base::Status get(const std::string &key, base::Value &value);

/// @brief A Boolean value that determines whether to allow low-precision
/// accumulation on a GPU.
bool low_precision_acceleration_ = false;
enum inferenceUnits {
ALL_UNITS = 0,
CPU_ONLY = 1,
CPU_AND_GPU = 2,
CPU_AND_NPU
};
inferenceUnits inference_units_ = CPU_ONLY;
};

} // namespace inference
} // namespace nndeploy

#endif

0 comments on commit 84b047a

Please sign in to comment.