diff --git a/CMakeLists.txt b/CMakeLists.txt index 9c15fac1b12..7c1bfdde744 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -348,6 +348,6 @@ endif(BUILD_FASTDEPLOY_PYTHON) if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.4.0") string(STRIP "${CMAKE_CXX_COMPILER_VERSION}" CMAKE_CXX_COMPILER_VERSION) - message(WARNING "[WARNING] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.") + message(FATAL_ERROR "[ERROR] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.") endif() endif() diff --git a/FastDeploy.cmake.in b/FastDeploy.cmake.in index 818533c8bd4..4f4643fdfba 100644 --- a/FastDeploy.cmake.in +++ b/FastDeploy.cmake.in @@ -113,6 +113,6 @@ message(STATUS " ENABLE_VISION : ${ENABLE_VISION}") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.4.0") string(STRIP "${CMAKE_CXX_COMPILER_VERSION}" CMAKE_CXX_COMPILER_VERSION) - message(WARNING "[WARNING] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.") + message(FATAL_ERROR "[ERROR] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.") endif() endif() diff --git a/csrcs/fastdeploy/backends/tensorrt/trt_backend.cc b/csrcs/fastdeploy/backends/tensorrt/trt_backend.cc index 6a9d21d370d..dd3f837d972 100644 --- a/csrcs/fastdeploy/backends/tensorrt/trt_backend.cc +++ b/csrcs/fastdeploy/backends/tensorrt/trt_backend.cc @@ -54,8 +54,8 @@ std::vector toVec(const nvinfer1::Dims& dim) { bool CheckDynamicShapeConfig(const paddle2onnx::OnnxReader& reader, const TrtBackendOption& option) { - //paddle2onnx::ModelTensorInfo inputs[reader.NumInputs()]; - //std::string input_shapes[reader.NumInputs()]; + // paddle2onnx::ModelTensorInfo inputs[reader.NumInputs()]; + // std::string input_shapes[reader.NumInputs()]; std::vector inputs(reader.NumInputs()); std::vector input_shapes(reader.NumInputs()); for (int i = 0; i < reader.NumInputs(); ++i) { @@ -374,27 +374,27 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model, 1U << static_cast( nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); - auto builder = SampleUniquePtr( + builder_ = SampleUniquePtr( nvinfer1::createInferBuilder(sample::gLogger.getTRTLogger())); - if (!builder) { + if (!builder_) { FDERROR << "Failed to call createInferBuilder()." << std::endl; return false; } - auto network = SampleUniquePtr( - builder->createNetworkV2(explicitBatch)); - if (!network) { + network_ = SampleUniquePtr( + builder_->createNetworkV2(explicitBatch)); + if (!network_) { FDERROR << "Failed to call createNetworkV2()." << std::endl; return false; } - auto config = - SampleUniquePtr(builder->createBuilderConfig()); + auto config = SampleUniquePtr( + builder_->createBuilderConfig()); if (!config) { FDERROR << "Failed to call createBuilderConfig()." << std::endl; return false; } if (option.enable_fp16) { - if (!builder->platformHasFastFp16()) { + if (!builder_->platformHasFastFp16()) { FDWARNING << "Detected FP16 is not supported in the current GPU, " "will use FP32 instead." << std::endl; @@ -403,25 +403,25 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model, } } - auto parser = SampleUniquePtr( - nvonnxparser::createParser(*network, sample::gLogger.getTRTLogger())); - if (!parser) { + parser_ = SampleUniquePtr( + nvonnxparser::createParser(*network_, sample::gLogger.getTRTLogger())); + if (!parser_) { FDERROR << "Failed to call createParser()." << std::endl; return false; } - if (!parser->parse(onnx_model.data(), onnx_model.size())) { + if (!parser_->parse(onnx_model.data(), onnx_model.size())) { FDERROR << "Failed to parse ONNX model by TensorRT." << std::endl; return false; } FDINFO << "Start to building TensorRT Engine..." << std::endl; - bool fp16 = builder->platformHasFastFp16(); - builder->setMaxBatchSize(option.max_batch_size); + bool fp16 = builder_->platformHasFastFp16(); + builder_->setMaxBatchSize(option.max_batch_size); config->setMaxWorkspaceSize(option.max_workspace_size); if (option.max_shape.size() > 0) { - auto profile = builder->createOptimizationProfile(); + auto profile = builder_->createOptimizationProfile(); FDASSERT(option.max_shape.size() == option.min_shape.size() && option.min_shape.size() == option.opt_shape.size(), "[TrtBackend] Size of max_shape/opt_shape/min_shape in " @@ -459,7 +459,7 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model, } SampleUniquePtr plan{ - builder->buildSerializedNetwork(*network, *config)}; + builder_->buildSerializedNetwork(*network_, *config)}; if (!plan) { FDERROR << "Failed to call buildSerializedNetwork()." << std::endl; return false; diff --git a/csrcs/fastdeploy/backends/tensorrt/trt_backend.h b/csrcs/fastdeploy/backends/tensorrt/trt_backend.h index b2555c57668..a6bc3b05309 100644 --- a/csrcs/fastdeploy/backends/tensorrt/trt_backend.h +++ b/csrcs/fastdeploy/backends/tensorrt/trt_backend.h @@ -85,6 +85,9 @@ class TrtBackend : public BaseBackend { private: std::shared_ptr engine_; std::shared_ptr context_; + SampleUniquePtr parser_; + SampleUniquePtr builder_; + SampleUniquePtr network_; cudaStream_t stream_{}; std::vector bindings_; std::vector inputs_desc_; diff --git a/csrcs/fastdeploy/fastdeploy_model.cc b/csrcs/fastdeploy/fastdeploy_model.cc index e434e19fa5b..0558dd625ae 100644 --- a/csrcs/fastdeploy/fastdeploy_model.cc +++ b/csrcs/fastdeploy/fastdeploy_model.cc @@ -53,7 +53,7 @@ bool FastDeployModel::InitRuntime() { << std::endl; return false; } - runtime_ = new Runtime(); + runtime_ = std::unique_ptr(new Runtime()); if (!runtime_->Init(runtime_option)) { return false; } @@ -88,7 +88,7 @@ bool FastDeployModel::CreateCpuBackend() { continue; } runtime_option.backend = valid_cpu_backends[i]; - runtime_ = new Runtime(); + runtime_ = std::unique_ptr(new Runtime()); if (!runtime_->Init(runtime_option)) { return false; } @@ -111,7 +111,7 @@ bool FastDeployModel::CreateGpuBackend() { continue; } runtime_option.backend = valid_gpu_backends[i]; - runtime_ = new Runtime(); + runtime_ = std::unique_ptr(new Runtime()); if (!runtime_->Init(runtime_option)) { return false; } diff --git a/csrcs/fastdeploy/fastdeploy_model.h b/csrcs/fastdeploy/fastdeploy_model.h index 070a905f411..df83ac52588 100644 --- a/csrcs/fastdeploy/fastdeploy_model.h +++ b/csrcs/fastdeploy/fastdeploy_model.h @@ -18,7 +18,7 @@ namespace fastdeploy { class FASTDEPLOY_DECL FastDeployModel { public: - virtual std::string ModelName() const { return "NameUndefined"; }; + virtual std::string ModelName() const { return "NameUndefined"; } virtual bool InitRuntime(); virtual bool CreateCpuBackend(); @@ -47,21 +47,21 @@ class FASTDEPLOY_DECL FastDeployModel { virtual bool DebugEnabled(); private: - Runtime* runtime_ = nullptr; + std::unique_ptr runtime_; bool runtime_initialized_ = false; bool debug_ = false; }; -#define TIMERECORD_START(id) \ - TimeCounter tc_##id; \ +#define TIMERECORD_START(id) \ + TimeCounter tc_##id; \ tc_##id.Start(); -#define TIMERECORD_END(id, prefix) \ - if (DebugEnabled()) { \ - tc_##id.End(); \ - FDLogger() << __FILE__ << "(" << __LINE__ << "):" << __FUNCTION__ << " " \ - << prefix << " duration = " << tc_##id.Duration() << "s." \ - << std::endl; \ +#define TIMERECORD_END(id, prefix) \ + if (DebugEnabled()) { \ + tc_##id.End(); \ + FDLogger() << __FILE__ << "(" << __LINE__ << "):" << __FUNCTION__ << " " \ + << prefix << " duration = " << tc_##id.Duration() << "s." \ + << std::endl; \ } -} // namespace fastdeploy +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision.h b/csrcs/fastdeploy/vision.h index 34973ebcddd..4f1d55312a0 100644 --- a/csrcs/fastdeploy/vision.h +++ b/csrcs/fastdeploy/vision.h @@ -27,7 +27,7 @@ #include "fastdeploy/vision/megvii/yolox.h" #include "fastdeploy/vision/meituan/yolov6.h" #include "fastdeploy/vision/ppcls/model.h" -#include "fastdeploy/vision/ppdet/ppyoloe.h" +#include "fastdeploy/vision/ppdet/model.h" #include "fastdeploy/vision/ppogg/yolov5lite.h" #include "fastdeploy/vision/ppseg/model.h" #include "fastdeploy/vision/rangilyu/nanodet_plus.h" diff --git a/csrcs/fastdeploy/vision/common/processors/pad_to_size.cc b/csrcs/fastdeploy/vision/common/processors/pad_to_size.cc new file mode 100644 index 00000000000..d4cbacd879c --- /dev/null +++ b/csrcs/fastdeploy/vision/common/processors/pad_to_size.cc @@ -0,0 +1,141 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/common/processors/pad_to_size.h" + +namespace fastdeploy { +namespace vision { + +bool PadToSize::CpuRun(Mat* mat) { + if (mat->layout != Layout::HWC) { + FDERROR << "PadToSize: The input data must be Layout::HWC format!" + << std::endl; + return false; + } + if (mat->Channels() > 4) { + FDERROR << "PadToSize: Only support channels <= 4." << std::endl; + return false; + } + if (mat->Channels() != value_.size()) { + FDERROR + << "PadToSize: Require input channels equals to size of padding value, " + "but now channels = " + << mat->Channels() << ", the size of padding values = " << value_.size() + << "." << std::endl; + return false; + } + int origin_w = mat->Width(); + int origin_h = mat->Height(); + if (origin_w > width_) { + FDERROR << "PadToSize: the input width:" << origin_w + << " is greater than the target width: " << width_ << "." + << std::endl; + return false; + } + if (origin_h > height_) { + FDERROR << "PadToSize: the input height:" << origin_h + << " is greater than the target height: " << height_ << "." + << std::endl; + return false; + } + if (origin_w == width_ && origin_h == height_) { + return true; + } + + cv::Mat* im = mat->GetCpuMat(); + cv::Scalar value; + if (value_.size() == 1) { + value = cv::Scalar(value_[0]); + } else if (value_.size() == 2) { + value = cv::Scalar(value_[0], value_[1]); + } else if (value_.size() == 3) { + value = cv::Scalar(value_[0], value_[1], value_[2]); + } else { + value = cv::Scalar(value_[0], value_[1], value_[2], value_[3]); + } + // top, bottom, left, right + cv::copyMakeBorder(*im, *im, 0, height_ - origin_h, 0, width_ - origin_w, + cv::BORDER_CONSTANT, value); + mat->SetHeight(height_); + mat->SetWidth(width_); + return true; +} + +#ifdef ENABLE_OPENCV_CUDA +bool PadToSize::GpuRun(Mat* mat) { + if (mat->layout != Layout::HWC) { + FDERROR << "PadToSize: The input data must be Layout::HWC format!" + << std::endl; + return false; + } + if (mat->Channels() > 4) { + FDERROR << "PadToSize: Only support channels <= 4." << std::endl; + return false; + } + if (mat->Channels() != value_.size()) { + FDERROR + << "PadToSize: Require input channels equals to size of padding value, " + "but now channels = " + << mat->Channels() << ", the size of padding values = " << value_.size() + << "." << std::endl; + return false; + } + + int origin_w = mat->Width(); + int origin_h = mat->Height(); + if (origin_w > width_) { + FDERROR << "PadToSize: the input width:" << origin_w + << " is greater than the target width: " << width_ << "." + << std::endl; + return false; + } + if (origin_h > height_) { + FDERROR << "PadToSize: the input height:" << origin_h + << " is greater than the target height: " << height_ << "." + << std::endl; + return false; + } + if (origin_w == width_ && origin_h == height_) { + return true; + } + + cv::cuda::GpuMat* im = mat->GetGpuMat(); + cv::Scalar value; + if (value_.size() == 1) { + value = cv::Scalar(value_[0]); + } else if (value_.size() == 2) { + value = cv::Scalar(value_[0], value_[1]); + } else if (value_.size() == 3) { + value = cv::Scalar(value_[0], value_[1], value_[2]); + } else { + value = cv::Scalar(value_[0], value_[1], value_[2], value_[3]); + } + + // top, bottom, left, right + cv::cuda::copyMakeBorder(*im, *im, 0, height_ - origin_h, 0, + width_ - origin_w, cv::BORDER_CONSTANT, value); + mat->SetHeight(height_); + mat->SetWidth(width_); + return true; +} +#endif + +bool PadToSize::Run(Mat* mat, int width, int height, + const std::vector& value, ProcLib lib) { + auto p = PadToSize(width, height, value); + return p(mat, lib); +} + +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/common/processors/pad_to_size.h b/csrcs/fastdeploy/vision/common/processors/pad_to_size.h new file mode 100644 index 00000000000..ece0158f7be --- /dev/null +++ b/csrcs/fastdeploy/vision/common/processors/pad_to_size.h @@ -0,0 +1,46 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/vision/common/processors/base.h" + +namespace fastdeploy { +namespace vision { + +class PadToSize : public Processor { + public: + // only support pad with left-top padding mode + PadToSize(int width, int height, const std::vector& value) { + width_ = width; + height_ = height; + value_ = value; + } + bool CpuRun(Mat* mat); +#ifdef ENABLE_OPENCV_CUDA + bool GpuRun(Mat* mat); +#endif + std::string Name() { return "PadToSize"; } + + static bool Run(Mat* mat, int width, int height, + const std::vector& value, + ProcLib lib = ProcLib::OPENCV_CPU); + + private: + int width_; + int height_; + std::vector value_; +}; +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/common/processors/stride_pad.cc b/csrcs/fastdeploy/vision/common/processors/stride_pad.cc new file mode 100644 index 00000000000..8597c83758e --- /dev/null +++ b/csrcs/fastdeploy/vision/common/processors/stride_pad.cc @@ -0,0 +1,124 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/common/processors/stride_pad.h" + +namespace fastdeploy { +namespace vision { + +bool StridePad::CpuRun(Mat* mat) { + if (mat->layout != Layout::HWC) { + FDERROR << "StridePad: The input data must be Layout::HWC format!" + << std::endl; + return false; + } + if (mat->Channels() > 4) { + FDERROR << "StridePad: Only support channels <= 4." << std::endl; + return false; + } + if (mat->Channels() != value_.size()) { + FDERROR + << "StridePad: Require input channels equals to size of padding value, " + "but now channels = " + << mat->Channels() << ", the size of padding values = " << value_.size() + << "." << std::endl; + return false; + } + int origin_w = mat->Width(); + int origin_h = mat->Height(); + + int pad_h = (mat->Height() / stride_) * stride_ + + (mat->Height() % stride_ != 0) * stride_ - mat->Height(); + int pad_w = (mat->Width() / stride_) * stride_ + + (mat->Width() % stride_ != 0) * stride_ - mat->Width(); + if (pad_h == 0 && pad_w == 0) { + return true; + } + cv::Mat* im = mat->GetCpuMat(); + cv::Scalar value; + if (value_.size() == 1) { + value = cv::Scalar(value_[0]); + } else if (value_.size() == 2) { + value = cv::Scalar(value_[0], value_[1]); + } else if (value_.size() == 3) { + value = cv::Scalar(value_[0], value_[1], value_[2]); + } else { + value = cv::Scalar(value_[0], value_[1], value_[2], value_[3]); + } + // top, bottom, left, right + cv::copyMakeBorder(*im, *im, 0, pad_h, 0, pad_w, cv::BORDER_CONSTANT, value); + mat->SetHeight(origin_h + pad_h); + mat->SetWidth(origin_w + pad_w); + return true; +} + +#ifdef ENABLE_OPENCV_CUDA +bool StridePad::GpuRun(Mat* mat) { + if (mat->layout != Layout::HWC) { + FDERROR << "StridePad: The input data must be Layout::HWC format!" + << std::endl; + return false; + } + if (mat->Channels() > 4) { + FDERROR << "StridePad: Only support channels <= 4." << std::endl; + return false; + } + if (mat->Channels() != value_.size()) { + FDERROR + << "StridePad: Require input channels equals to size of padding value, " + "but now channels = " + << mat->Channels() << ", the size of padding values = " << value_.size() + << "." << std::endl; + return false; + } + + int origin_w = mat->Width(); + int origin_h = mat->Height(); + int pad_h = (mat->Height() / stride_) * stride_ + + (mat->Height() % stride_ != 0) * stride_; + int pad_w = (mat->Width() / stride_) * stride_ + + (mat->Width() % stride_ != 0) * stride_; + if (pad_h == 0 && pad_w == 0) { + return true; + } + + cv::cuda::GpuMat* im = mat->GetGpuMat(); + cv::Scalar value; + if (value_.size() == 1) { + value = cv::Scalar(value_[0]); + } else if (value_.size() == 2) { + value = cv::Scalar(value_[0], value_[1]); + } else if (value_.size() == 3) { + value = cv::Scalar(value_[0], value_[1], value_[2]); + } else { + value = cv::Scalar(value_[0], value_[1], value_[2], value_[3]); + } + + // top, bottom, left, right + cv::cuda::copyMakeBorder(*im, *im, 0, pad_h, 0, pad_w, cv::BORDER_CONSTANT, + value); + mat->SetHeight(origin_h + pad_h); + mat->SetWidth(origin_w + pad_w); + return true; +} +#endif + +bool StridePad::Run(Mat* mat, int stride, const std::vector& value, + ProcLib lib) { + auto p = StridePad(stride, value); + return p(mat, lib); +} + +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/common/processors/stride_pad.h b/csrcs/fastdeploy/vision/common/processors/stride_pad.h new file mode 100644 index 00000000000..c002ca697bb --- /dev/null +++ b/csrcs/fastdeploy/vision/common/processors/stride_pad.h @@ -0,0 +1,44 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/vision/common/processors/base.h" + +namespace fastdeploy { +namespace vision { + +class StridePad : public Processor { + public: + // only support pad with left-top padding mode + StridePad(int stride, const std::vector& value) { + stride_ = stride; + value_ = value; + } + bool CpuRun(Mat* mat); +#ifdef ENABLE_OPENCV_CUDA + bool GpuRun(Mat* mat); +#endif + std::string Name() { return "StridePad"; } + + static bool Run(Mat* mat, int stride, + const std::vector& value = std::vector(), + ProcLib lib = ProcLib::OPENCV_CPU); + + private: + int stride_ = 32; + std::vector value_; +}; +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/common/processors/transform.h b/csrcs/fastdeploy/vision/common/processors/transform.h index 08073b4e423..fed3d0c9a25 100644 --- a/csrcs/fastdeploy/vision/common/processors/transform.h +++ b/csrcs/fastdeploy/vision/common/processors/transform.h @@ -21,5 +21,7 @@ #include "fastdeploy/vision/common/processors/hwc2chw.h" #include "fastdeploy/vision/common/processors/normalize.h" #include "fastdeploy/vision/common/processors/pad.h" +#include "fastdeploy/vision/common/processors/pad_to_size.h" #include "fastdeploy/vision/common/processors/resize.h" #include "fastdeploy/vision/common/processors/resize_by_short.h" +#include "fastdeploy/vision/common/processors/stride_pad.h" diff --git a/csrcs/fastdeploy/vision/ppdet/build_preprocess.cc b/csrcs/fastdeploy/vision/ppdet/build_preprocess.cc new file mode 100644 index 00000000000..20348214e91 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/build_preprocess.cc @@ -0,0 +1,86 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/ppdet/ppyoloe.h" +#include "yaml-cpp/yaml.h" + +namespace fastdeploy { +namespace vision { + +bool BuildPreprocessPipelineFromConfig( + std::vector>* processors, + const std::string& config_file) { + processors->clear(); + YAML::Node cfg; + try { + cfg = YAML::LoadFile(config_file); + } catch (YAML::BadFile& e) { + FDERROR << "Failed to load yaml file " << config_file + << ", maybe you should check this file." << std::endl; + return false; + } + + processors->push_back(std::make_shared()); + + for (const auto& op : cfg["Preprocess"]) { + std::string op_name = op["type"].as(); + if (op_name == "NormalizeImage") { + auto mean = op["mean"].as>(); + auto std = op["std"].as>(); + bool is_scale = op["is_scale"].as(); + processors->push_back(std::make_shared(mean, std, is_scale)); + } else if (op_name == "Resize") { + bool keep_ratio = op["keep_ratio"].as(); + auto target_size = op["target_size"].as>(); + int interp = op["interp"].as(); + FDASSERT(target_size.size(), + "Require size of target_size be 2, but now it's " + + std::to_string(target_size.size()) + "."); + if (!keep_ratio) { + int width = target_size[1]; + int height = target_size[0]; + processors->push_back( + std::make_shared(width, height, -1.0, -1.0, interp, false)); + } else { + int min_target_size = std::min(target_size[0], target_size[1]); + int max_target_size = std::max(target_size[0], target_size[1]); + processors->push_back(std::make_shared( + min_target_size, interp, true, max_target_size)); + } + } else if (op_name == "Permute") { + // Do nothing, do permute as the last operation + continue; + } else if (op_name == "Pad") { + auto size = op["size"].as>(); + auto value = op["fill_value"].as>(); + processors->push_back(std::make_shared("float")); + processors->push_back( + std::make_shared(size[1], size[0], value)); + } else if (op_name == "PadStride") { + auto stride = op["stride"].as(); + processors->push_back( + std::make_shared(stride, std::vector(3, 0))); + } else { + FDERROR << "Unexcepted preprocess operator: " << op_name << "." + << std::endl; + return false; + } + } + processors->push_back(std::make_shared()); + return true; +} + +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/model.h b/csrcs/fastdeploy/vision/ppdet/model.h new file mode 100644 index 00000000000..17541d7fef3 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/model.h @@ -0,0 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/ppdet/picodet.h" +#include "fastdeploy/vision/ppdet/ppyolo.h" +#include "fastdeploy/vision/ppdet/ppyoloe.h" +#include "fastdeploy/vision/ppdet/rcnn.h" +#include "fastdeploy/vision/ppdet/yolov3.h" +#include "fastdeploy/vision/ppdet/yolox.h" diff --git a/csrcs/fastdeploy/vision/ppdet/picodet.cc b/csrcs/fastdeploy/vision/ppdet/picodet.cc new file mode 100644 index 00000000000..5f912b8cf45 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/picodet.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/ppdet/picodet.h" +#include "yaml-cpp/yaml.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +PicoDet::PicoDet(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT}; + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + background_label = -1; + keep_top_k = 100; + nms_eta = 1; + nms_threshold = 0.6; + nms_top_k = 1000; + normalized = true; + score_threshold = 0.025; + CheckIfContainDecodeAndNMS(); + initialized = Initialize(); +} + +bool PicoDet::CheckIfContainDecodeAndNMS() { + YAML::Node cfg; + try { + cfg = YAML::LoadFile(config_file_); + } catch (YAML::BadFile& e) { + FDERROR << "Failed to load yaml file " << config_file_ + << ", maybe you should check this file." << std::endl; + return false; + } + + if (cfg["arch"].as() == "PicoDet") { + FDERROR << "The arch in config file is PicoDet, which means this model " + "doesn contain box decode and nms, please export model with " + "decode and nms." + << std::endl; + return false; + } + return true; +} + +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/picodet.h b/csrcs/fastdeploy/vision/ppdet/picodet.h new file mode 100644 index 00000000000..7b45b9baf17 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/picodet.h @@ -0,0 +1,36 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/ppdet/ppyoloe.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +class FASTDEPLOY_DECL PicoDet : public PPYOLOE { + public: + PicoDet(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::PADDLE); + + // Only support picodet contains decode and nms + bool CheckIfContainDecodeAndNMS(); + + virtual std::string ModelName() const { return "PaddleDetection/PicoDet"; } +}; +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/ppdet_pybind.cc b/csrcs/fastdeploy/vision/ppdet/ppdet_pybind.cc index bd1fc4621fc..bcc1a047815 100644 --- a/csrcs/fastdeploy/vision/ppdet/ppdet_pybind.cc +++ b/csrcs/fastdeploy/vision/ppdet/ppdet_pybind.cc @@ -27,5 +27,60 @@ void BindPPDet(pybind11::module& m) { self.Predict(&mat, &res); return res; }); + + pybind11::class_(ppdet_module, + "PPYOLO") + .def(pybind11::init()) + .def("predict", [](vision::ppdet::PPYOLO& self, pybind11::array& data) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res); + return res; + }); + + pybind11::class_(ppdet_module, + "PicoDet") + .def(pybind11::init()) + .def("predict", [](vision::ppdet::PicoDet& self, pybind11::array& data) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res); + return res; + }); + + pybind11::class_(ppdet_module, "YOLOX") + .def(pybind11::init()) + .def("predict", [](vision::ppdet::YOLOX& self, pybind11::array& data) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res); + return res; + }); + + pybind11::class_(ppdet_module, + "FasterRCNN") + .def(pybind11::init()) + .def("predict", + [](vision::ppdet::FasterRCNN& self, pybind11::array& data) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res); + return res; + }); + + pybind11::class_(ppdet_module, + "YOLOv3") + .def(pybind11::init()) + .def("predict", [](vision::ppdet::YOLOv3& self, pybind11::array& data) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res); + return res; + }); } } // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/ppyolo.cc b/csrcs/fastdeploy/vision/ppdet/ppyolo.cc new file mode 100644 index 00000000000..194ad4f69ef --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/ppyolo.cc @@ -0,0 +1,78 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/ppdet/ppyolo.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +PPYOLO::PPYOLO(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::PDINFER}; + valid_gpu_backends = {Backend::PDINFER}; + has_nms_ = true; + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool PPYOLO::Initialize() { + if (!BuildPreprocessPipelineFromConfig()) { + FDERROR << "Failed to build preprocess pipeline from configuration file." + << std::endl; + return false; + } + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool PPYOLO::Preprocess(Mat* mat, std::vector* outputs) { + int origin_w = mat->Width(); + int origin_h = mat->Height(); + for (size_t i = 0; i < processors_.size(); ++i) { + if (!(*(processors_[i].get()))(mat)) { + FDERROR << "Failed to process image data in " << processors_[i]->Name() + << "." << std::endl; + return false; + } + } + + outputs->resize(3); + (*outputs)[0].Allocate({1, 2}, FDDataType::FP32, "im_shape"); + (*outputs)[2].Allocate({1, 2}, FDDataType::FP32, "scale_factor"); + float* ptr0 = static_cast((*outputs)[0].MutableData()); + ptr0[0] = mat->Height(); + ptr0[1] = mat->Width(); + float* ptr2 = static_cast((*outputs)[2].MutableData()); + ptr2[0] = mat->Height() * 1.0 / origin_h; + ptr2[1] = mat->Width() * 1.0 / origin_w; + (*outputs)[1].name = "image"; + mat->ShareWithTensor(&((*outputs)[1])); + // reshape to [1, c, h, w] + (*outputs)[1].shape.insert((*outputs)[1].shape.begin(), 1); + return true; +} + +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/ppyolo.h b/csrcs/fastdeploy/vision/ppdet/ppyolo.h new file mode 100644 index 00000000000..b17f54b3e6a --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/ppyolo.h @@ -0,0 +1,25 @@ +#pragma once +#include "fastdeploy/vision/ppdet/ppyoloe.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +class FASTDEPLOY_DECL PPYOLO : public PPYOLOE { + public: + PPYOLO(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::PADDLE); + + virtual std::string ModelName() const { return "PaddleDetection/PPYOLO"; } + + virtual bool Preprocess(Mat* mat, std::vector* outputs); + virtual bool Initialize(); + + protected: + PPYOLO() {} +}; +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/ppyoloe.cc b/csrcs/fastdeploy/vision/ppdet/ppyoloe.cc index 5152db3fa26..0e7d00c64b7 100644 --- a/csrcs/fastdeploy/vision/ppdet/ppyoloe.cc +++ b/csrcs/fastdeploy/vision/ppdet/ppyoloe.cc @@ -85,12 +85,6 @@ bool PPYOLOE::BuildPreprocessPipelineFromConfig() { return false; } - if (cfg["arch"].as() != "YOLO") { - FDERROR << "Require the arch of model is YOLO, but arch defined in " - "config file is " - << cfg["arch"].as() << "." << std::endl; - return false; - } processors_.push_back(std::make_shared()); for (const auto& op : cfg["Preprocess"]) { @@ -107,21 +101,38 @@ bool PPYOLOE::BuildPreprocessPipelineFromConfig() { FDASSERT(target_size.size(), "Require size of target_size be 2, but now it's " + std::to_string(target_size.size()) + "."); - FDASSERT(!keep_ratio, - "Only support keep_ratio is false while deploy " - "PaddleDetection model."); - int width = target_size[1]; - int height = target_size[0]; - processors_.push_back( - std::make_shared(width, height, -1.0, -1.0, interp, false)); + if (!keep_ratio) { + int width = target_size[1]; + int height = target_size[0]; + processors_.push_back( + std::make_shared(width, height, -1.0, -1.0, interp, false)); + } else { + int min_target_size = std::min(target_size[0], target_size[1]); + int max_target_size = std::max(target_size[0], target_size[1]); + processors_.push_back(std::make_shared( + min_target_size, interp, true, max_target_size)); + } } else if (op_name == "Permute") { - processors_.push_back(std::make_shared()); + // Do nothing, do permute as the last operation + continue; + // processors_.push_back(std::make_shared()); + } else if (op_name == "Pad") { + auto size = op["size"].as>(); + auto value = op["fill_value"].as>(); + processors_.push_back(std::make_shared("float")); + processors_.push_back( + std::make_shared(size[1], size[0], value)); + } else if (op_name == "PadStride") { + auto stride = op["stride"].as(); + processors_.push_back( + std::make_shared(stride, std::vector(3, 0))); } else { FDERROR << "Unexcepted preprocess operator: " << op_name << "." << std::endl; return false; } } + processors_.push_back(std::make_shared()); return true; } @@ -217,8 +228,7 @@ bool PPYOLOE::Postprocess(std::vector& infer_result, return true; } -bool PPYOLOE::Predict(cv::Mat* im, DetectionResult* result, - float conf_threshold, float iou_threshold) { +bool PPYOLOE::Predict(cv::Mat* im, DetectionResult* result) { Mat mat(*im); std::vector processed_data; if (!Preprocess(&mat, &processed_data)) { @@ -227,6 +237,7 @@ bool PPYOLOE::Predict(cv::Mat* im, DetectionResult* result, return false; } + float* tmp = static_cast(processed_data[1].Data()); std::vector infer_result; if (!Infer(processed_data, &infer_result)) { FDERROR << "Failed to inference while using model:" << ModelName() << "." diff --git a/csrcs/fastdeploy/vision/ppdet/ppyoloe.h b/csrcs/fastdeploy/vision/ppdet/ppyoloe.h index d86508fa184..3b7e24479c9 100644 --- a/csrcs/fastdeploy/vision/ppdet/ppyoloe.h +++ b/csrcs/fastdeploy/vision/ppdet/ppyoloe.h @@ -1,3 +1,17 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #pragma once #include "fastdeploy/fastdeploy_model.h" #include "fastdeploy/vision/common/processors/transform.h" @@ -16,7 +30,7 @@ class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel { const RuntimeOption& custom_option = RuntimeOption(), const Frontend& model_format = Frontend::PADDLE); - std::string ModelName() const { return "PaddleDetection/PPYOLOE"; } + virtual std::string ModelName() const { return "PaddleDetection/PPYOLOE"; } virtual bool Initialize(); @@ -27,10 +41,11 @@ class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel { virtual bool Postprocess(std::vector& infer_result, DetectionResult* result); - virtual bool Predict(cv::Mat* im, DetectionResult* result, - float conf_threshold = 0.5, float nms_threshold = 0.7); + virtual bool Predict(cv::Mat* im, DetectionResult* result); + + protected: + PPYOLOE() {} - private: std::vector> processors_; std::string config_file_; // configuration for nms @@ -47,6 +62,11 @@ class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel { // and get parameters from the operator void GetNmsInfo(); }; + +// Read configuration and build pipeline to process input image +bool BuildPreprocessPipelineFromConfig( + std::vector>* processors, + const std::string& config_file); } // namespace ppdet } // namespace vision } // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/rcnn.cc b/csrcs/fastdeploy/vision/ppdet/rcnn.cc new file mode 100644 index 00000000000..c976293a80e --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/rcnn.cc @@ -0,0 +1,84 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/ppdet/rcnn.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +FasterRCNN::FasterRCNN(const std::string& model_file, + const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::PDINFER}; + valid_gpu_backends = {Backend::PDINFER}; + has_nms_ = true; + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool FasterRCNN::Initialize() { + if (!BuildPreprocessPipelineFromConfig()) { + FDERROR << "Failed to build preprocess pipeline from configuration file." + << std::endl; + return false; + } + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool FasterRCNN::Preprocess(Mat* mat, std::vector* outputs) { + int origin_w = mat->Width(); + int origin_h = mat->Height(); + float scale[2] = {1.0, 1.0}; + for (size_t i = 0; i < processors_.size(); ++i) { + if (!(*(processors_[i].get()))(mat)) { + FDERROR << "Failed to process image data in " << processors_[i]->Name() + << "." << std::endl; + return false; + } + if (processors_[i]->Name().find("Resize") != std::string::npos) { + scale[0] = mat->Height() * 1.0 / origin_h; + scale[1] = mat->Width() * 1.0 / origin_w; + } + } + + outputs->resize(3); + (*outputs)[0].Allocate({1, 2}, FDDataType::FP32, "im_shape"); + (*outputs)[2].Allocate({1, 2}, FDDataType::FP32, "scale_factor"); + float* ptr0 = static_cast((*outputs)[0].MutableData()); + ptr0[0] = mat->Height(); + ptr0[1] = mat->Width(); + float* ptr2 = static_cast((*outputs)[2].MutableData()); + ptr2[0] = scale[0]; + ptr2[1] = scale[1]; + (*outputs)[1].name = "image"; + mat->ShareWithTensor(&((*outputs)[1])); + // reshape to [1, c, h, w] + (*outputs)[1].shape.insert((*outputs)[1].shape.begin(), 1); + return true; +} + +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/rcnn.h b/csrcs/fastdeploy/vision/ppdet/rcnn.h new file mode 100644 index 00000000000..2a9255a5492 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/rcnn.h @@ -0,0 +1,39 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/ppdet/ppyoloe.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +class FASTDEPLOY_DECL FasterRCNN : public PPYOLOE { + public: + FasterRCNN(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::PADDLE); + + virtual std::string ModelName() const { return "PaddleDetection/FasterRCNN"; } + + virtual bool Preprocess(Mat* mat, std::vector* outputs); + virtual bool Initialize(); + + protected: + FasterRCNN() {} +}; +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/yolov3.cc b/csrcs/fastdeploy/vision/ppdet/yolov3.cc new file mode 100644 index 00000000000..a02853dbbb8 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/yolov3.cc @@ -0,0 +1,64 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/ppdet/yolov3.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +YOLOv3::YOLOv3(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::PDINFER}; + valid_gpu_backends = {Backend::PDINFER}; + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool YOLOv3::Preprocess(Mat* mat, std::vector* outputs) { + int origin_w = mat->Width(); + int origin_h = mat->Height(); + for (size_t i = 0; i < processors_.size(); ++i) { + if (!(*(processors_[i].get()))(mat)) { + FDERROR << "Failed to process image data in " << processors_[i]->Name() + << "." << std::endl; + return false; + } + } + + outputs->resize(3); + (*outputs)[0].Allocate({1, 2}, FDDataType::FP32, "im_shape"); + (*outputs)[2].Allocate({1, 2}, FDDataType::FP32, "scale_factor"); + float* ptr0 = static_cast((*outputs)[0].MutableData()); + ptr0[0] = mat->Height(); + ptr0[1] = mat->Width(); + float* ptr2 = static_cast((*outputs)[2].MutableData()); + ptr2[0] = mat->Height() * 1.0 / origin_h; + ptr2[1] = mat->Width() * 1.0 / origin_w; + (*outputs)[1].name = "image"; + mat->ShareWithTensor(&((*outputs)[1])); + // reshape to [1, c, h, w] + (*outputs)[1].shape.insert((*outputs)[1].shape.begin(), 1); + return true; +} + +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/yolov3.h b/csrcs/fastdeploy/vision/ppdet/yolov3.h new file mode 100644 index 00000000000..27b1352c9c2 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/yolov3.h @@ -0,0 +1,35 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/ppdet/ppyoloe.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +class FASTDEPLOY_DECL YOLOv3 : public PPYOLOE { + public: + YOLOv3(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::PADDLE); + + virtual std::string ModelName() const { return "PaddleDetection/YOLOv3"; } + + virtual bool Preprocess(Mat* mat, std::vector* outputs); +}; +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/yolox.cc b/csrcs/fastdeploy/vision/ppdet/yolox.cc new file mode 100644 index 00000000000..44f4ec0552f --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/yolox.cc @@ -0,0 +1,72 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/ppdet/yolox.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +YOLOX::YOLOX(const std::string& model_file, const std::string& params_file, + const std::string& config_file, const RuntimeOption& custom_option, + const Frontend& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT}; + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + background_label = -1; + keep_top_k = 1000; + nms_eta = 1; + nms_threshold = 0.65; + nms_top_k = 10000; + normalized = true; + score_threshold = 0.001; + initialized = Initialize(); +} + +bool YOLOX::Preprocess(Mat* mat, std::vector* outputs) { + int origin_w = mat->Width(); + int origin_h = mat->Height(); + float scale[2] = {1.0, 1.0}; + for (size_t i = 0; i < processors_.size(); ++i) { + if (!(*(processors_[i].get()))(mat)) { + FDERROR << "Failed to process image data in " << processors_[i]->Name() + << "." << std::endl; + return false; + } + if (processors_[i]->Name().find("Resize") != std::string::npos) { + scale[0] = mat->Height() * 1.0 / origin_h; + scale[1] = mat->Width() * 1.0 / origin_w; + } + } + + outputs->resize(2); + (*outputs)[0].name = InputInfoOfRuntime(0).name; + mat->ShareWithTensor(&((*outputs)[0])); + + // reshape to [1, c, h, w] + (*outputs)[0].shape.insert((*outputs)[0].shape.begin(), 1); + + (*outputs)[1].Allocate({1, 2}, FDDataType::FP32, InputInfoOfRuntime(1).name); + float* ptr = static_cast((*outputs)[1].MutableData()); + ptr[0] = scale[0]; + ptr[1] = scale[1]; + return true; +} +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppdet/yolox.h b/csrcs/fastdeploy/vision/ppdet/yolox.h new file mode 100644 index 00000000000..e689674a4ec --- /dev/null +++ b/csrcs/fastdeploy/vision/ppdet/yolox.h @@ -0,0 +1,35 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/vision/ppdet/ppyoloe.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +class FASTDEPLOY_DECL YOLOX : public PPYOLOE { + public: + YOLOX(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::PADDLE); + + virtual bool Preprocess(Mat* mat, std::vector* outputs); + + virtual std::string ModelName() const { return "PaddleDetection/YOLOX"; } +}; +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc b/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc index a84ead937aa..a07d896a452 100644 --- a/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc +++ b/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc @@ -343,7 +343,6 @@ bool YOLOv5Lite::Predict(cv::Mat* im, DetectionResult* result, #ifdef FASTDEPLOY_DEBUG TIMERECORD_START(0) #endif - std::cout << nms_iou_threshold << nms_iou_threshold << std::endl; Mat mat(*im); std::vector input_tensors(1); diff --git a/fastdeploy/__init__.py b/fastdeploy/__init__.py index 6a23cd3d2c4..b389669a371 100644 --- a/fastdeploy/__init__.py +++ b/fastdeploy/__init__.py @@ -16,6 +16,11 @@ import os import sys +try: + import paddle +except: + pass + def add_dll_search_dir(dir_path): os.environ["path"] = dir_path + ";" + os.environ["path"] diff --git a/fastdeploy/vision/ppdet/__init__.py b/fastdeploy/vision/ppdet/__init__.py index 661ef0e1fcd..08d39a36b88 100644 --- a/fastdeploy/vision/ppdet/__init__.py +++ b/fastdeploy/vision/ppdet/__init__.py @@ -27,7 +27,7 @@ def __init__(self, model_format=Frontend.PADDLE): super(PPYOLOE, self).__init__(runtime_option) - assert model_format == Frontend.PADDLE, "PPYOLOE only support model format of Frontend.Paddle now." + assert model_format == Frontend.PADDLE, "PPYOLOE model only support model format of Frontend.Paddle now." self._model = C.vision.ppdet.PPYOLOE(model_file, params_file, config_file, self._runtime_option, model_format) @@ -36,3 +36,83 @@ def __init__(self, def predict(self, input_image): assert input_image is not None, "The input image data is None." return self._model.predict(input_image) + + +class PPYOLO(PPYOLOE): + def __init__(self, + model_file, + params_file, + config_file, + runtime_option=None, + model_format=Frontend.PADDLE): + super(PPYOLOE, self).__init__(runtime_option) + + assert model_format == Frontend.PADDLE, "PPYOLO model only support model format of Frontend.Paddle now." + self._model = C.vision.ppdet.PPYOLO(model_file, params_file, + config_file, self._runtime_option, + model_format) + assert self.initialized, "PPYOLO model initialize failed." + + +class YOLOX(PPYOLOE): + def __init__(self, + model_file, + params_file, + config_file, + runtime_option=None, + model_format=Frontend.PADDLE): + super(PPYOLOE, self).__init__(runtime_option) + + assert model_format == Frontend.PADDLE, "YOLOX model only support model format of Frontend.Paddle now." + self._model = C.vision.ppdet.YOLOX(model_file, params_file, + config_file, self._runtime_option, + model_format) + assert self.initialized, "YOLOX model initialize failed." + + +class PicoDet(PPYOLOE): + def __init__(self, + model_file, + params_file, + config_file, + runtime_option=None, + model_format=Frontend.PADDLE): + super(PPYOLOE, self).__init__(runtime_option) + + assert model_format == Frontend.PADDLE, "PicoDet model only support model format of Frontend.Paddle now." + self._model = C.vision.ppdet.PicoDet(model_file, params_file, + config_file, self._runtime_option, + model_format) + assert self.initialized, "PicoDet model initialize failed." + + +class FasterRCNN(PPYOLOE): + def __init__(self, + model_file, + params_file, + config_file, + runtime_option=None, + model_format=Frontend.PADDLE): + super(PPYOLOE, self).__init__(runtime_option) + + assert model_format == Frontend.PADDLE, "FasterRCNN model only support model format of Frontend.Paddle now." + self._model = C.vision.ppdet.FasterRCNN( + model_file, params_file, config_file, self._runtime_option, + model_format) + assert self.initialized, "FasterRCNN model initialize failed." + + +class YOLOv3(PPYOLOE): + def __init__(self, + model_file, + params_file, + config_file, + runtime_option=None, + model_format=Frontend.PADDLE): + super(PPYOLOE, self).__init__(runtime_option) + + assert model_format == Frontend.PADDLE, "YOLOv3 model only support model format of Frontend.Paddle now." + self._model = C.vision.ppdet.YOLOv3(model_file, params_file, + config_file, self._runtime_option, + model_format) + assert self.initialized, "YOLOv3 model initialize failed." diff --git a/model_zoo/vision/ppyoloe/README.md b/model_zoo/vision/ppyoloe/README.md deleted file mode 100644 index 42d18104ad8..00000000000 --- a/model_zoo/vision/ppyoloe/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# PaddleDetection/PPYOLOE部署示例 - -- 当前支持PaddleDetection版本为[release/2.4](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4) - -本文档说明如何进行[PPYOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/ppyoloe)的快速部署推理。本目录结构如下 -``` -. -├── cpp # C++ 代码目录 -│   ├── CMakeLists.txt # C++ 代码编译CMakeLists文件 -│   ├── README.md # C++ 代码编译部署文档 -│   └── ppyoloe.cc # C++ 示例代码 -├── README.md # PPYOLOE 部署文档 -└── ppyoloe.py # Python示例代码 -``` - -## 安装FastDeploy - -使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` -``` -# 安装fastdeploy-python工具 -pip install fastdeploy-python -``` - -## Python部署 - -执行如下代码即会自动下载PPYOLOE模型和测试图片 -``` -python ppyoloe.py -``` - -执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 -``` -DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] -162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 -414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 -163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 -267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 -581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 -104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 -348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 -364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 -75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 -328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 -504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 -379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 -25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 -``` - -## 其它文档 - -- [C++部署](./cpp/README.md) -- [PPYOLOE API文档](./api.md) diff --git a/model_zoo/vision/ppyoloe/api.md b/model_zoo/vision/ppyoloe/api.md deleted file mode 100644 index 1c5cbcaadbd..00000000000 --- a/model_zoo/vision/ppyoloe/api.md +++ /dev/null @@ -1,74 +0,0 @@ -# PPYOLOE API说明 - -## Python API - -### PPYOLOE类 -``` -fastdeploy.vision.ultralytics.PPYOLOE(model_file, params_file, config_file, runtime_option=None, model_format=fd.Frontend.PADDLE) -``` -PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 - -**参数** - -> * **model_file**(str): 模型文件路径 -> * **params_file**(str): 参数文件路径 -> * **config_file**(str): 模型推理配置文件 -> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 -> * **model_format**(Frontend): 模型格式 - -#### predict函数 -> ``` -> PPYOLOE.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) -> ``` -> 模型预测结口,输入图像直接输出检测结果。 -> -> **参数** -> -> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 -> > * **conf_threshold**(float): 检测框置信度过滤阈值 -> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) - -示例代码参考[ppyoloe.py](./ppyoloe.py) - - -## C++ API - -### PPYOLOE类 -``` -fastdeploy::vision::ultralytics::PPYOLOE( - const string& model_file, - const string& params_file, - const string& config_file, - const RuntimeOption& runtime_option = RuntimeOption(), - const Frontend& model_format = Frontend::ONNX) -``` -PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 - -**参数** - -> * **model_file**(str): 模型文件路径 -> * **params_file**(str): 参数文件路径 -> * **config_file**(str): 模型推理配置文件 -> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 -> * **model_format**(Frontend): 模型格式 - -#### Predict函数 -> ``` -> YOLOv5::Predict(cv::Mat* im, DetectionResult* result, -> float conf_threshold = 0.25, -> float nms_iou_threshold = 0.5) -> ``` -> 模型预测接口,输入图像直接输出检测结果。 -> -> **参数** -> -> > * **im**: 输入图像,注意需为HWC,BGR格式 -> > * **result**: 检测结果,包括检测框,各个框的置信度 -> > * **conf_threshold**: 检测框置信度过滤阈值 -> > * **nms_iou_threshold**: NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) - -示例代码参考[cpp/yolov5.cc](cpp/yolov5.cc) - -## 其它API使用 - -- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) diff --git a/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt b/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt deleted file mode 100644 index 6222a00da39..00000000000 --- a/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -PROJECT(ppyoloe_demo C CXX) -CMAKE_MINIMUM_REQUIRED (VERSION 3.16) - -# 在低版本ABI环境中,通过如下代码进行兼容性编译 -# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) - -# 指定下载解压后的fastdeploy库路径 -set(FASTDEPLOY_INSTALL_DIR /fastdeploy/CustomOp/FastDeploy/build1/fastdeploy-linux-x64-gpu-0.3.0) - -include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) - -# 添加FastDeploy依赖头文件 -include_directories(${FASTDEPLOY_INCS}) - -add_executable(ppyoloe_demo ${PROJECT_SOURCE_DIR}/ppyoloe.cc) -# 添加FastDeploy库依赖 -target_link_libraries(ppyoloe_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/ppyoloe/cpp/README.md b/model_zoo/vision/ppyoloe/cpp/README.md deleted file mode 100644 index 1027c2eeb24..00000000000 --- a/model_zoo/vision/ppyoloe/cpp/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# 编译PPYOLOE示例 - - -``` -# 下载和解压预测库 -wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz -tar xvf fastdeploy-linux-x64-0.0.3.tgz - -# 编译示例代码 -mkdir build & cd build -cmake .. -make -j - -# 下载模型和图片 -wget https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz -tar xvf ppyoloe_crn_l_300e_coco.tgz -wget https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg - -# 执行 -./ppyoloe_demo -``` - -执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 -``` -DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] -162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 -414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 -163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 -267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 -581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 -104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 -348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 -364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 -75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 -328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 -504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 -379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 -25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 -``` diff --git a/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc b/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc deleted file mode 100644 index e63f29e62a5..00000000000 --- a/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "fastdeploy/vision.h" - -int main() { - namespace vis = fastdeploy::vision; - - std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; - std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; - std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; - std::string img_path = "000000014439_640x640.jpg"; - std::string vis_path = "vis.jpeg"; - - auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); - if (!model.Initialized()) { - std::cerr << "Init Failed." << std::endl; - return -1; - } - - cv::Mat im = cv::imread(img_path); - cv::Mat vis_im = im.clone(); - - vis::DetectionResult res; - if (!model.Predict(&im, &res)) { - std::cerr << "Prediction Failed." << std::endl; - return -1; - } else { - std::cout << "Prediction Done!" << std::endl; - } - - // 输出预测框结果 - std::cout << res.Str() << std::endl; - - // 可视化预测结果 - vis::Visualize::VisDetection(&vis_im, res); - cv::imwrite(vis_path, vis_im); - std::cout << "Detect Done! Saved: " << vis_path << std::endl; - return 0; -} diff --git a/model_zoo/vision/ppyoloe/ppyoloe.py b/model_zoo/vision/ppyoloe/ppyoloe.py deleted file mode 100644 index a3b12c1dc6b..00000000000 --- a/model_zoo/vision/ppyoloe/ppyoloe.py +++ /dev/null @@ -1,24 +0,0 @@ -import fastdeploy as fd -import cv2 - -# 下载模型和测试图片 -model_url = "https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz" -test_jpg_url = "https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg" -fd.download_and_decompress(model_url, ".") -fd.download(test_jpg_url, ".", show_progress=True) - -# 加载模型 -model = fd.vision.ppdet.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel", - "ppyoloe_crn_l_300e_coco/model.pdiparams", - "ppyoloe_crn_l_300e_coco/infer_cfg.yml") - -# 预测图片 -im = cv2.imread("000000014439_640x640.jpg") -result = model.predict(im) - -# 可视化结果 -fd.vision.visualize.vis_detection(im, result) -cv2.imwrite("vis_result.jpg", im) - -# 输出预测结果 -print(result) diff --git a/setup.py b/setup.py index e57dcd49338..15f5fc29d09 100644 --- a/setup.py +++ b/setup.py @@ -371,9 +371,13 @@ def run(self): for f1 in os.listdir(lib_dir_name): release_dir = os.path.join(lib_dir_name, f1) if f1 == "Release" and not os.path.isfile(release_dir): - if os.path.exists(os.path.join("fastdeploy/libs/third_libs", f)): - shutil.rmtree(os.path.join("fastdeploy/libs/third_libs", f)) - shutil.copytree(release_dir, os.path.join("fastdeploy/libs/third_libs", f, "lib")) + if os.path.exists( + os.path.join("fastdeploy/libs/third_libs", f)): + shutil.rmtree( + os.path.join("fastdeploy/libs/third_libs", f)) + shutil.copytree(release_dir, + os.path.join("fastdeploy/libs/third_libs", + f, "lib")) if platform.system().lower() == "windows": release_dir = os.path.join(".setuptools-cmake-build", "Release") @@ -398,6 +402,9 @@ def run(self): path)) rpaths = ":".join(rpaths) command = "patchelf --set-rpath '{}' ".format(rpaths) + pybind_so_file + print( + "=========================Set rpath for library===================") + print(command) # The sw_64 not suppot patchelf, so we just disable that. if platform.machine() != 'sw_64' and platform.machine() != 'mips64': assert os.system(