Skip to content

Commit 5046574

Browse files
authored
Fix bug for part of detection model (#243)
* fix error for part of detection model * fix error for part of detection model * add patch paddle inference
1 parent 4d2fbcb commit 5046574

File tree

10 files changed

+61
-3
lines changed

10 files changed

+61
-3
lines changed

cmake/paddle_inference.cmake

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,10 @@ ExternalProject_Add(
8888
${CMAKE_COMMAND} -E copy_directory ${PADDLEINFERENCE_SOURCE_DIR} ${PADDLEINFERENCE_INSTALL_DIR}
8989
BUILD_BYPRODUCTS ${PADDLEINFERENCE_COMPILE_LIB})
9090

91+
if(UNIX)
92+
add_custom_target(patchelf_paddle_inference ALL COMMAND bash -c "python ${PROJECT_SOURCE_DIR}/scripts/patch_paddle_inference.py ${PADDLEINFERENCE_INSTALL_DIR}/paddle/lib/libpaddle_inference.so" DEPENDS ${LIBRARY_NAME})
93+
endif()
94+
9195
add_library(external_paddle_inference STATIC IMPORTED GLOBAL)
9296
set_property(TARGET external_paddle_inference PROPERTY IMPORTED_LOCATION
9397
${PADDLEINFERENCE_COMPILE_LIB})

fastdeploy/backends/openvino/ov_backend.cc

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,8 @@ FDDataType OpenVINODataTypeToFD(const ov::element::Type& type) {
3939
return FDDataType::FP64;
4040
} else if (type == ov::element::i8) {
4141
return FDDataType::INT8;
42+
} else if (type == ov::element::u8) {
43+
return FDDataType::UINT8;
4244
} else if (type == ov::element::i32) {
4345
return FDDataType::INT32;
4446
} else if (type == ov::element::i64) {
@@ -56,12 +58,14 @@ ov::element::Type FDDataTypeToOV(const FDDataType& type) {
5658
return ov::element::f64;
5759
} else if (type == FDDataType::INT8) {
5860
return ov::element::i8;
61+
} else if (type == FDDataType::UINT8) {
62+
return ov::element::u8;
5963
} else if (type == FDDataType::INT32) {
6064
return ov::element::i32;
6165
} else if (type == FDDataType::INT64) {
6266
return ov::element::i64;
6367
}
64-
FDASSERT(false, "Only support float/double/int8/int32/int64 now.");
68+
FDASSERT(false, "Only support float/double/int8/uint8/int32/int64 now.");
6569
return ov::element::f32;
6670
}
6771

fastdeploy/backends/ort/utils.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@ ONNXTensorElementDataType GetOrtDtype(const FDDataType& fd_dtype) {
2626
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32;
2727
} else if (fd_dtype == FDDataType::INT64) {
2828
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
29+
} else if (fd_dtype == FDDataType::UINT8) {
30+
return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
31+
} else if (fd_dtype == FDDataType::INT8) {
32+
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8;
2933
}
3034
FDERROR << "Unrecognized fastdeply data type:" << Str(fd_dtype) << "."
3135
<< std::endl;

fastdeploy/backends/paddle/util.cc

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,9 @@ FDDataType PaddleDataTypeToFD(const paddle_infer::DataType& dtype) {
7575
fd_dtype = FDDataType::INT32;
7676
} else if (dtype == paddle_infer::UINT8) {
7777
fd_dtype = FDDataType::UINT8;
78-
} else {
78+
} else if (dtype == paddle_infer::INT8) {
79+
fd_dtype = FDDataType::INT8;
80+
}else {
7981
FDASSERT(
8082
false,
8183
"Unexpected data type: %d while call CopyTensorToCpu in PaddleBackend.",

fastdeploy/vision/detection/ppdet/ppyoloe.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,8 @@ bool PPYOLOE::Preprocess(Mat* mat, std::vector<FDTensor>* outputs) {
157157
return false;
158158
}
159159
}
160+
161+
Cast::Run(mat, "float");
160162

161163
outputs->resize(2);
162164
(*outputs)[0].name = InputInfoOfRuntime(0).name;

python/scripts/__init__.py

Whitespace-only changes.

python/scripts/build_gpu.sh

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
export ENABLE_ORT_BACKEND=ON
2+
export ENABLE_OPENVINO_BACKEND=ON
3+
export ENABLE_PADDLE_BACKEND=ON
4+
export ENABLE_TRT_BACKEND=ON
5+
export TRT_DIRECTORY=/fastdeploy/libs/TensorRT-8.4.1.5
6+
export CUDA_DIRECTORY=/usr/local/cuda
7+
export ENABLE_VISION=ON
8+
export WITH_GPU=ON
9+
export CMAKE_CXX_COMPILER=/usr/local/gcc-8.2/bin/g++
10+
11+
python setup.py build
12+
python setup.py bdist_wheel

python/setup.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,6 @@ def run(self):
358358
"Didn't detect path: fastdeploy/libs/third_libs exist, please execute `python setup.py build` first"
359359
)
360360
sys.exit(0)
361-
sys.path.append(TOP_DIR)
362361
from scripts.process_libraries import process_libraries
363362
all_lib_data = process_libraries(
364363
os.path.split(os.path.abspath(__file__))[0])

scripts/patch_paddle_inference.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import os
16+
import sys
17+
import shutil
18+
import subprocess
19+
import platform
20+
21+
def process_paddle_inference(paddle_inference_so_file):
22+
rpaths = [
23+
"$ORIGIN",
24+
"$ORIGIN/../../third_party/install/mkldnn/lib/",
25+
"$ORIGIN/../../third_party/install/mklml/lib/",
26+
"$ORIGIN/../../../tensorrt/lib"
27+
]
28+
29+
command = "patchelf --set-rpath '{}' {}".format(":".join(rpaths), paddle_inference_so_file)
30+
if platform.machine() != 'sw_64' and platform.machine() != 'mips64':
31+
assert subprocess.Popen(command, shell=True) != 0, "patchelf {} failed, the command: {}".format(command, lib)

0 commit comments

Comments
 (0)