Skip to content

Commit

Permalink
Merge pull request #4 from alibaba/master
Browse files Browse the repository at this point in the history
merge 191120
  • Loading branch information
zzz197 committed Nov 20, 2019
2 parents 9f668bd + ac84e11 commit 33cda51
Show file tree
Hide file tree
Showing 112 changed files with 2,911 additions and 226 deletions.
66 changes: 66 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
git:
depth: 3
quiet: true
matrix:
include:
- os: osx
language: cpp
osx_image: xcode11.2
compiler: clang
script:
- ./schema/generate.sh
- mkdir macosbuild
- cd macosbuild
- cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_METAL=ON -DMNN_BUILD_TEST=ON -DMNN_BUILD_BENCHMARK=ON
- make -j8
name: "macOS11.2 | CPU_Metal"
- os: osx
language: cpp
osx_image: xcode11.2
compiler: clang
script:
- ./schema/generate.sh
- xcodebuild -configuration Release -project project/ios/MNN.xcodeproj
name: "iOS | CPU_Metal"
- os: linux
sudo: required
dist: bionic
language: cpp
install:
- sudo apt-get install ant libprotobuf-dev libvulkan-dev libglew-dev freeglut3-dev protobuf-compiler ocl-icd-opencl-dev libglfw3-dev
compiler: gcc
script:
- ./schema/generate.sh
- mkdir linuxbuild
- cd linuxbuild
- cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_OPENCL=ON -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_BUILD_BENCHMARK=ON
- make -j8

name: "Linux | CPU_CL_OpenMP_Vulkan"
- os: linux
sudo: required
dist: trusty
language: android
compiler: clang
android:
components:
- tools
- build-tools
- platform-tools
- android-21
licenses:
- 'android-sdk-preview-license-.+'
- 'android-sdk-license-.+'
- 'google-gdk-license-.+'
before_script:
- sudo apt-get install ant libprotobuf-dev protobuf-compiler tree
- echo yes | sdkmanager "ndk-bundle"
- echo yes | sdkmanager "cmake;3.10.2.4988404"
- export ANDROID_NDK=$ANDROID_HOME/ndk-bundle
script:
- cd project/android/
- mkdir build_32
- cd build_32
- /usr/local/android-sdk/cmake/3.10.2.4988404/bin/cmake ../../../ -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="armeabi-v7a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DMNN_DEBUG=false -DNATIVE_LIBRARY_OUTPUT=.
- make -j4
name: "Android | AArch32"
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
20 changes: 18 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ message(STATUS "\tOpenCL: ${MNN_OPENCL}")
message(STATUS "\tOpenGL: ${MNN_OPENGL}")
message(STATUS "\tVulkan: ${MNN_VULKAN}")
message(STATUS "\tOpenMP: ${MNN_OPENMP}")
message(STATUS "\tHideen: ${MNN_HIDDEN}")
message(STATUS "\tHidden: ${MNN_HIDDEN}")

# flags
if(SYSTEM.Android)
Expand All @@ -116,6 +116,10 @@ endif()

if(SYSTEM.Linux)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__STRICT_ANSI__")
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
# This is to workaround libgcc.a
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv7")
add_definitions(-mfpu=neon) #please define in project/cross-compile/arm.toolchain.cmake
endif()
Expand Down Expand Up @@ -189,6 +193,7 @@ include_directories(
"include/"
"schema/current"
"3rd_party/flatbuffers/include"
"3rd_party/half"
)

if(MNN_METAL)
Expand Down Expand Up @@ -276,6 +281,12 @@ include_directories(${MNN.Source_DIR})
include_directories("./")

if(SYSTEM.Android AND NOT MNN_BUILD_FOR_ANDROID_COMMAND)
if(NOT DEFINED NATIVE_LIBRARY_OUTPUT)
set(NATIVE_LIBRARY_OUTPUT ${CMAKE_CURRENT_BINARY_DIR})
message(STATUS "Using default Android Build Library Directory: ${NATIVE_LIBRARY_OUTPUT}/MNN")
else()
message(STATUS "Using Android Build Library Directory: ${NATIVE_LIBRARY_OUTPUT}")
endif()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT}/${ANDROID_ABI})
add_definitions(-DMNN_USE_LOGCAT)
endif()
Expand Down Expand Up @@ -405,6 +416,12 @@ if(WIN32)
endif()

if(SYSTEM.Android AND NOT MNN_BUILD_FOR_ANDROID_COMMAND)
if(NOT DEFINED NATIVE_INCLUDE_OUTPUT)
set(NATIVE_INCLUDE_OUTPUT ${CMAKE_CURRENT_BINARY_DIR})
message(STATUS "Using default Android Build Directory: ${NATIVE_INCLUDE_OUTPUT}/MNN")
else()
message(STATUS "Using Android Build Directory: ${NATIVE_INCLUDE_OUTPUT}")
endif()
set(MNN_INCLUDE_OUTPUT ${NATIVE_INCLUDE_OUTPUT}/MNN)
add_custom_command(
TARGET MNN
Expand Down Expand Up @@ -482,4 +499,3 @@ install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/ExprCreator.hpp DEST
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/MathOp.hpp DESTINATION include)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/NeuralNetWorkOp.hpp DESTINATION include)
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/express/include/Optimizer.hpp DESTINATION include)

9 changes: 6 additions & 3 deletions MNN.podspec
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,10 @@ Pod::Spec.new do |s|
s.ios.deployment_target = '8.0'
s.requires_arc = true

s.source = { :git => "git@github.com:alibaba/MNN.git", :branch => 'master' }
s.source = { :git => "https://github.com/alibaba/MNN.git", :branch => 'master' }
s.frameworks = 'Metal', 'Accelerate'
s.library = 'c++'
s.prepare_command = 'schema/generate.sh'

s.subspec 'core' do |a|
a.source_files = \
Expand All @@ -52,15 +53,17 @@ Pod::Spec.new do |s|
end
s.subspec 'armv7' do |a|
a.source_files = 'source/backend/cpu/arm/arm32/*.{h,c,m,mm,cc,S,hpp,cpp}'
a.pod_target_xcconfig = {'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/source/backend/cpu/arm/"'}
end
s.subspec 'aarch64' do |a|
a.source_files = 'source/backend/cpu/arm/arm64/*.{h,c,m,mm,cc,S,hpp,cpp}'
a.pod_target_xcconfig = {'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/source/backend/cpu/arm/"'}
end
s.subspec 'metal' do |a|
a.source_files = 'source/backend/metal/**/*.{h,c,m,mm,cc,hpp,cpp,metal}'
end

s.default_subspecs = 'core', 'armv7', 'aarch64', 'metal'
s.pod_target_xcconfig = {'METAL_LIBRARY_FILE_BASE' => 'mnn', 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)" "$(PODS_TARGET_SRCROOT)/3rd_party/flatbuffers/include" ', 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) MNN_CODEGEN_REGISTER=1'}
s.user_target_xcconfig = { 'OTHER_LDFLAGS' => '-force_load $(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)/MNN/libMNN.a'}
s.pod_target_xcconfig = {'METAL_LIBRARY_FILE_BASE' => 'mnn', 'HEADER_SEARCH_PATHS' => '$(PODS_TARGET_SRCROOT)/include $(PODS_TARGET_SRCROOT)/3rd_party/flatbuffers/include $(PODS_TARGET_SRCROOT)/schema/current $(PODS_TARGET_SRCROOT)/source/core/ $(PODS_TARGET_SRCROOT)/source/backend/cpu/ $(PODS_TARGET_SRCROOT)/source/backend/cpu/compute/ $(PODS_TARGET_SRCROOT)/source/math/ $(PODS_TARGET_SRCROOT)/3rd_party/half', 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) MNN_CODEGEN_REGISTER=1 MNN_SUPPORT_TFLITE_QUAN=1'}
s.user_target_xcconfig = {'OTHER_LDFLAGS' => '-force_load $(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)/MNN/libMNN.a'}
end
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

[中文版本](README_CN.md)

[![Build Status](https://travis-ci.com/alibaba/MNN.svg?branch=master)](https://travis-ci.com/alibaba/MNN)

## Intro
MNN is a lightweight deep neural network inference engine. It loads models and do inference on devices. At present, MNN has been integrated in more than 20 apps of Alibaba-inc, such as Taobao, Tmall, Youku and etc., covering live broadcast, short video capture, search recommendation, product searching by image, interactive marketing, equity distribution, security risk control and other scenarios. In addition, MNN is also used on embedded devices, such as IoT.

Expand Down
5 changes: 5 additions & 0 deletions express/include/Expr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,11 @@ class MNN_EXPRESS_PUBLIC Variable {
size_t linkNumber() const {
return mTo.size();
}

const std::list< std::pair<int, WeakEXPRP> >& toExprs() const{
return mTo;
}

private:
Variable(EXPRP expr, int index) {
mFrom = expr;
Expand Down
2 changes: 2 additions & 0 deletions express/include/MathOp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ MNN_EXPRESS_PUBLIC VARP _Mul(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Sub(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Add(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Div(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Min(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Max(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Log(VARP x);
MNN_EXPRESS_PUBLIC VARP _Neg(VARP x);
MNN_EXPRESS_PUBLIC VARP _Rsqrt(VARP x);
Expand Down
12 changes: 9 additions & 3 deletions express/include/NeuralNetWorkOp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,10 @@ MNN_EXPRESS_PUBLIC VARP _Scale(VARP x, int channels, std::vector<float>&& scales

MNN_EXPRESS_PUBLIC VARP _Relu(VARP x, float slope = 0.0f);
MNN_EXPRESS_PUBLIC VARP _Relu6(VARP x);
MNN_EXPRESS_PUBLIC VARP _PRelu(VARP x, std::vector<float> &&slopes);
MNN_EXPRESS_PUBLIC VARP _Softmax(VARP x, int axis);
MNN_EXPRESS_PUBLIC std::vector<VARP> _Slice(VARP x, INTS points, int axis);
MNN_EXPRESS_PUBLIC std::vector<VARP> _Split(VARP x, INTS points, int axis);
MNN_EXPRESS_PUBLIC VARP _Slice(VARP x, VARP starts, VARP sizes);
MNN_EXPRESS_PUBLIC VARP _Concat(VARPS xs, int axis);
MNN_EXPRESS_PUBLIC VARP _Convert(VARP x, Dimensionformat dest);
MNN_EXPRESS_PUBLIC VARP _Transpose(VARP x, INTS perm);
Expand All @@ -51,13 +53,17 @@ MNN_EXPRESS_PUBLIC VARP _Resize(VARP x, float xScale, float yScale);
MNN_EXPRESS_PUBLIC VARP _Pad(VARP x, VARP pads);
MNN_EXPRESS_PUBLIC VARP _ExpandDims(VARP x, int axis);
MNN_EXPRESS_PUBLIC VARP _ExpandDims(VARP x, VARP axis);


MNN_EXPRESS_PUBLIC VARP _Shape(VARP x);
MNN_EXPRESS_PUBLIC VARP _Pack(VARPS xs, halide_type_t dtype, int axis);
enum InterpolationMethod {BILINEAR, NEAREST};
MNN_EXPRESS_PUBLIC VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extrapolation, InterpolationMethod method);
MNN_EXPRESS_PUBLIC VARP _Fill(VARP s, VARP v);
MNN_EXPRESS_PUBLIC VARP _Tile(VARP x, VARP mul);
MNN_EXPRESS_PUBLIC VARP _Gather(VARP embedding, VARP indices);
MNN_EXPRESS_PUBLIC VARP _GatherV2(VARP params, VARP indices, VARP axis = nullptr);

MNN_EXPRESS_PUBLIC VARP _Squeeze(VARP x, INTS axes = {});
MNN_EXPRESS_PUBLIC VARP _Unsqueeze(VARP x, INTS axes = {});

} // namespace Express
} // namespace MNN
2 changes: 1 addition & 1 deletion express/source/Expr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ bool Expr::requireInfo() {
return false;
}
mInside->mInputInfos[i] = mInputs[i]->getInfo();
if (nullptr == mInside->mInputInfos[i] && OpType_Concat != mOp->type()) {
if (nullptr == mInside->mInputInfos[i] && (!mInside->mReq.supportError[i])) {
#ifdef MNN_EXPRESS_ERROR_REPORT
MNN_ERROR("%s, %d input not ready\n", mName.c_str(), i);
#endif
Expand Down
6 changes: 6 additions & 0 deletions express/source/MathOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,12 @@ VARP _Sub(VARP x, VARP y) {
VARP _Add(VARP x, VARP y) {
return _Binary(x, y, BinaryOpOperation_ADD);
}
VARP _Min(VARP x, VARP y) {
return _Binary(x, y, BinaryOpOperation_MINIMUM);
}
VARP _Max(VARP x, VARP y) {
return _Binary(x, y, BinaryOpOperation_MAXIMUM);
}
VARP _Neg(VARP x) {
return _Unary(x, UnaryOpOperation_NEG);
}
Expand Down
49 changes: 48 additions & 1 deletion express/source/NeuralNetWorkOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,15 @@ VARP _Relu6(VARP x) {
relu->type = OpType_ReLU6;
return (Variable::create(Expr::create(relu.get(), {x})));
}
VARP _PRelu(VARP x, std::vector<float> &&slopes) {
std::unique_ptr<OpT> prelu(new OpT);
prelu->type = OpType_PReLU;
prelu->main.type = OpParameter_PRelu;
prelu->main.value = new PReluT;
prelu->main.AsPRelu()->slope = slopes;
prelu->main.AsPRelu()->slopeCount = slopes.size();
return (Variable::create(Expr::create(prelu.get(), {x})));
}

VARP _Softmax(VARP x, int axis) {
std::unique_ptr<OpT> softmax(new OpT);
Expand Down Expand Up @@ -335,7 +344,7 @@ VARP _Convert(VARP x, Dimensionformat dest) {
return (Variable::create(Expr::create(convert.get(), {x})));
}

std::vector<VARP> _Slice(VARP x, INTS points, int axis) {
std::vector<VARP> _Split(VARP x, INTS points, int axis) {
MNN_ASSERT(points.size() >= 1);
std::unique_ptr<OpT> op(new OpT);
op->type = OpType_Slice;
Expand All @@ -354,6 +363,12 @@ std::vector<VARP> _Slice(VARP x, INTS points, int axis) {
return res;
}

VARP _Slice(VARP x, VARP starts, VARP sizes) {
std::unique_ptr<OpT> slice(new OpT);
slice->type = OpType_SliceTf;
return (Variable::create(Expr::create(slice.get(), {x, starts, sizes})));
}

VARP _Transpose(VARP x, INTS perm) {
auto permVar = _Const((const void*)perm.data(), {static_cast<int>(perm.size())}, NHWC, halide_type_of<int>());
return _Transpose(x, permVar);
Expand Down Expand Up @@ -508,6 +523,11 @@ VARP _ExpandDims(VARP x, VARP axis) {
return (Variable::create(Expr::create(std::move(expand), {x, axis})));
}

VARP _Shape(VARP x) {
std::unique_ptr<OpT> shape(new OpT);
shape->type = OpType_Shape;
return (Variable::create(Expr::create(std::move(shape), {x})));
}
VARP _Pack(VARPS xs, halide_type_t dtype, int axis) {
std::unique_ptr<OpT> pack(new OpT);
pack->type = OpType_Pack;
Expand Down Expand Up @@ -546,6 +566,12 @@ VARP _Tile(VARP x, VARP mul) {
tile->type = OpType_Tile;
return (Variable::create(Expr::create(std::move(tile), {x, mul})));
}
VARP _Gather(VARP embedding, VARP indices) {
std::unique_ptr<OpT> gather(new OpT);
gather->type = OpType_Gather;
gather->main.value = new GatherT;
return (Variable::create(Expr::create(std::move(gather), {embedding, indices})));
}
VARP _GatherV2(VARP params, VARP indices, VARP axis) {
std::unique_ptr<OpT> gather(new OpT);
gather->type = OpType_GatherV2;
Expand All @@ -556,5 +582,26 @@ VARP _GatherV2(VARP params, VARP indices, VARP axis) {
return (Variable::create(Expr::create(std::move(gather), {params, indices})));
}
}

VARP _Squeeze(VARP x, INTS axes){
std::unique_ptr<OpT> squeeze(new OpT);
squeeze->type = OpType_Squeeze;
auto squeezeParam = new SqueezeParamT;
squeezeParam->squeezeDims = axes;
squeeze->main.type = OpParameter_SqueezeParam;
squeeze->main.value = squeezeParam;
return Variable::create(Expr::create(std::move(squeeze), {x}));
}

VARP _Unsqueeze(VARP x, INTS axes){
std::unique_ptr<OpT> squeeze(new OpT);
squeeze->type = OpType_Unsqueeze;
auto squeezeParam = new SqueezeParamT;
squeezeParam->squeezeDims = axes;
squeeze->main.type = OpParameter_SqueezeParam;
squeeze->main.value = squeezeParam;
return Variable::create(Expr::create(std::move(squeeze), {x}));
}

} // namespace Express
} // namespace MNN

0 comments on commit 33cda51

Please sign in to comment.