Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[python api] add armlinux supported and publish paddle-lite python #2252

Merged
merged 2 commits into from
Oct 23, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -79,14 +79,17 @@ if(ANDROID OR IOS OR ARMLINUX)
"Disable DSO when cross-compiling for Android and iOS" FORCE)
set(WITH_AVX OFF CACHE STRING
"Disable AVX when cross-compiling for Android and iOS" FORCE)
set(LITE_WITH_PYTHON OFF CACHE STRING
"Disable PYTHON when cross-compiling for Android and iOS" FORCE)
set(WITH_RDMA OFF CACHE STRING
"Disable RDMA when cross-compiling for Android and iOS" FORCE)
set(WITH_MKL OFF CACHE STRING
"Disable MKL when cross-compiling for Android and iOS" FORCE)
endif()

if(ANDROID OR IOS)
set(LITE_WITH_PYTHON OFF CACHE STRING
"Disable PYTHON when cross-compiling for Android and iOS" FORCE)
endif()

set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING
"A path setting third party libraries download & build directories.")

Expand Down
7 changes: 6 additions & 1 deletion cmake/cross_compiling/postproject.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ if(ANDROID)
endif()

if(ARMLINUX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
if(ARMLINUX_ARCH_ABI STREQUAL "armv8")
set(CMAKE_CXX_FLAGS "-march=armv8-a ${CMAKE_CXX_FLAGS}")
set(CMAKE_C_FLAGS "-march=armv8-a ${CMAKE_C_FLAGS}")
Expand Down Expand Up @@ -57,7 +59,10 @@ function(check_linker_flag)
endfunction()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
if (LITE_ON_TINY_PUBLISH)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math -Ofast -Os -fno-exceptions -fomit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables")
if(NOT LITE_WITH_PYTHON)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math -Ofast -Os -fomit-frame-pointer -fno-asynchronous-unwind-tables -fno-unwind-tables")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto -fvisibility=hidden -fvisibility-inlines-hidden -fdata-sections -ffunction-sections")
check_linker_flag(-Wl,--gc-sections)
endif()
Expand Down
30 changes: 27 additions & 3 deletions lite/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ if (WITH_TESTING)
endif()
endif()

# ----------------------------- PUBLISH -----------------------------
# The final target for publish lite lib
add_custom_target(publish_inference)
if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM)
# for publish
set(INFER_LITE_PUBLISH_ROOT "${CMAKE_BINARY_DIR}/inference_lite_lib.${ARM_TARGET_OS}.${ARM_TARGET_ARCH_ABI}")
Expand All @@ -59,10 +62,31 @@ if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM)
if (LITE_WITH_FPGA)
set(INFER_LITE_PUBLISH_ROOT "${INFER_LITE_PUBLISH_ROOT}.fpga")
endif(LITE_WITH_FPGA)
message(STATUS "publish inference lib to ${INFER_LITE_PUBLISH_ROOT}")
else()
set(INFER_LITE_PUBLISH_ROOT "${CMAKE_BINARY_DIR}/inference_lite_lib")
endif()
message(STATUS "publish inference lib to ${INFER_LITE_PUBLISH_ROOT}")

# add python lib
if (LITE_WITH_PYTHON)
add_custom_target(publish_inference_python_lib ${TARGET}
COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/python/lib"
COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/python/pybind/liblite_pybind.so" "${INFER_LITE_PUBLISH_ROOT}/python/lib/lite_core.so")
add_custom_target(publish_inference_python_light_demo ${TARGET}
COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/python"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/python/mobilenetv1_light_api.py" "${INFER_LITE_PUBLISH_ROOT}/demo/python/")
if (NOT LITE_ON_TINY_PUBLISH)
add_custom_target(publish_inference_python_full_demo ${TARGET}
COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/python"
COMMAND cp "${CMAKE_SOURCE_DIR}/lite/demo/python/mobilenetv1_full_api.py" "${INFER_LITE_PUBLISH_ROOT}/demo/python/")
add_dependencies(publish_inference publish_inference_python_full_demo)
endif()
add_dependencies(publish_inference_python_lib lite_pybind)
add_dependencies(publish_inference publish_inference_python_lib)
add_dependencies(publish_inference publish_inference_python_light_demo)
endif()

# The final target for publish lite lib
add_custom_target(publish_inference)
if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM)
if (NOT LITE_ON_TINY_PUBLISH)
# add cxx lib
add_custom_target(publish_inference_cxx_lib ${TARGET}
Expand Down
4 changes: 3 additions & 1 deletion lite/api/python/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,15 @@ void BindLiteApi(py::module *m) {
BindLiteCxxPredictor(m);
#endif
BindLiteLightPredictor(m);
// Global helper methods
// Global helper methods
#ifndef LITE_ON_TINY_PUBLISH
m->def("create_paddle_predictor",
[](const CxxConfig &config) -> std::unique_ptr<CxxPaddleApiImpl> {
auto x = std::unique_ptr<CxxPaddleApiImpl>(new CxxPaddleApiImpl());
x->Init(config);
return std::move(x);
});
#endif
m->def("create_paddle_predictor",
[](const MobileConfig &config) -> std::unique_ptr<LightPredictorImpl> {
auto x =
Expand Down
67 changes: 67 additions & 0 deletions lite/demo/python/mobilenetv1_full_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Paddle-Lite full python api demo
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import sys
sys.path.append('../../python/lib')

from lite_core import *

# Command arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", default="", type=str, help="Non-combined Model dir path")
parser.add_argument(
"--model_file", default="", type=str, help="Model file")
parser.add_argument(
"--param_file", default="", type=str, help="Combined model param file")

def RunModel(args):
# 1. Set config information
config = CxxConfig()
if args.model_file != '' and args.param_file != '':
config.set_model_file(args.model_file)
config.set_param_file(args.param_file)
else:
config.set_model_dir(args.model_dir)
# For x86, you can set places = [Place(TargetType.X86, PrecisionType.FP32)]
places = [Place(TargetType.ARM, PrecisionType.FP32)]
config.set_valid_places(places)

# 2. Create paddle predictor
predictor = create_paddle_predictor(config)

# 3. Set input data
input_tensor = predictor.get_input(0)
input_tensor.resize([1, 3, 224, 224])
input_tensor.set_float_data([1.] * 3 * 224 * 224)

# 4. Run model
predictor.run()

# 5. Get output data
output_tensor = predictor.get_output(0)
print(output_tensor.shape())
print(output_tensor.float_data()[:10])

if __name__ == '__main__':
args = parser.parse_args()
RunModel(args)
56 changes: 56 additions & 0 deletions lite/demo/python/mobilenetv1_light_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Paddle-Lite light python api demo
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import sys
sys.path.append('../../python/lib')

from lite_core import *

# Command arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", default="", type=str, help="Non-combined Model dir path")

def RunModel(args):
# 1. Set config information
config = MobileConfig()
config.set_model_dir(args.model_dir)

# 2. Create paddle predictor
predictor = create_paddle_predictor(config)

# 3. Set input data
input_tensor = predictor.get_input(0)
input_tensor.resize([1, 3, 224, 224])
input_tensor.set_float_data([1.] * 3 * 224 * 224)

# 4. Run model
predictor.run()

# 5. Get output data
output_tensor = predictor.get_output(0)
print(output_tensor.shape())
print(output_tensor.float_data()[:10])

if __name__ == '__main__':
args = parser.parse_args()
RunModel(args)
48 changes: 48 additions & 0 deletions lite/tools/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ readonly NUM_PROC=${LITE_BUILD_THREADS:-4}
# global variables
BUILD_EXTRA=OFF
BUILD_JAVA=ON
BUILD_PYTHON=OFF
BUILD_DIR=$(pwd)

readonly THIRDPARTY_TAR=https://paddle-inference-dist.bj.bcebos.com/PaddleLite/third-party-05b862.tar.gz
Expand Down Expand Up @@ -84,9 +85,11 @@ function make_tiny_publish_so {
fi

cmake .. \
${PYTHON_FLAGS} \
${CMAKE_COMMON_OPTIONS} \
-DWITH_TESTING=OFF \
-DLITE_WITH_JAVA=$BUILD_JAVA \
-DLITE_WITH_PYTHON=$BUILD_PYTHON \
-DLITE_SHUTDOWN_LOG=ON \
-DLITE_ON_TINY_PUBLISH=ON \
-DANDROID_STL_TYPE=$android_stl \
Expand Down Expand Up @@ -122,9 +125,11 @@ function make_full_publish_so {

prepare_workspace $root_dir $build_directory
cmake $root_dir \
${PYTHON_FLAGS} \
${CMAKE_COMMON_OPTIONS} \
-DWITH_TESTING=OFF \
-DLITE_WITH_JAVA=$BUILD_JAVA \
-DLITE_WITH_PYTHON=$BUILD_PYTHON \
-DLITE_SHUTDOWN_LOG=ON \
-DANDROID_STL_TYPE=$android_stl \
-DLITE_BUILD_EXTRA=$BUILD_EXTRA \
Expand Down Expand Up @@ -196,6 +201,35 @@ function make_ios {
cd -
}

function make_cuda {
prepare_thirdparty

root_dir=$(pwd)
build_directory=$BUILD_DIR/build_cuda

if [ -d $build_directory ]
then
rm -rf $build_directory
fi
mkdir -p $build_directory
cd $build_directory

prepare_workspace $root_dir $build_directory

cmake .. -DWITH_MKL=OFF \
-DLITE_WITH_CUDA=ON \
-DWITH_MKLDNN=OFF \
-DLITE_WITH_X86=OFF \
-DLITE_WITH_PROFILE=OFF \
-DWITH_LITE=ON \
-DLITE_WITH_LIGHT_WEIGHT_FRAMEWORK=OFF \
-DWITH_TESTING=OFF \
-DLITE_WITH_ARM=OFF \
-DLITE_WITH_PYTHON=ON

make publish_inference_python_lib -j8
cd -
}

function print_usage {
set +x
Expand All @@ -216,6 +250,8 @@ function print_usage {
echo
echo -e "optional argument:"
echo -e "--build_extra: (OFF|ON); controls whether to publish extra operators and kernels for (sequence-related model such as OCR or NLP)"
echo -e "--build_python: (OFF|ON); controls whether to publish python api lib (ANDROID and IOS is not supported)"
echo -e "--build_java: (OFF|ON); controls whether to publish java api lib (Only ANDROID is supported)"
echo -e "--build_dir: directory for building"
echo
echo -e "argument choices:"
Expand Down Expand Up @@ -269,6 +305,14 @@ function main {
BUILD_EXTRA="${i#*=}"
shift
;;
--build_python=*)
BUILD_PYTHON="${i#*=}"
shift
;;
--build_java=*)
BUILD_JAVA="${i#*=}"
shift
;;
--build_dir=*)
BUILD_DIR="${i#*=}"
shift
Expand All @@ -293,6 +337,10 @@ function main {
build_model_optimize_tool
shift
;;
cuda)
make_cuda
shift
;;
*)
# unknown option
print_usage
Expand Down