Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,6 @@ endif(BUILD_FASTDEPLOY_PYTHON)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.4.0")
string(STRIP "${CMAKE_CXX_COMPILER_VERSION}" CMAKE_CXX_COMPILER_VERSION)
message(WARNING "[WARNING] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.")
message(FATAL_ERROR "[ERROR] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.")
endif()
endif()
2 changes: 1 addition & 1 deletion FastDeploy.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,6 @@ message(STATUS " ENABLE_VISION : ${ENABLE_VISION}")
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.4.0")
string(STRIP "${CMAKE_CXX_COMPILER_VERSION}" CMAKE_CXX_COMPILER_VERSION)
message(WARNING "[WARNING] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.")
message(FATAL_ERROR "[ERROR] FastDeploy require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.")
endif()
endif()
36 changes: 18 additions & 18 deletions csrcs/fastdeploy/backends/tensorrt/trt_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ std::vector<int> toVec(const nvinfer1::Dims& dim) {

bool CheckDynamicShapeConfig(const paddle2onnx::OnnxReader& reader,
const TrtBackendOption& option) {
//paddle2onnx::ModelTensorInfo inputs[reader.NumInputs()];
//std::string input_shapes[reader.NumInputs()];
// paddle2onnx::ModelTensorInfo inputs[reader.NumInputs()];
// std::string input_shapes[reader.NumInputs()];
std::vector<paddle2onnx::ModelTensorInfo> inputs(reader.NumInputs());
std::vector<std::string> input_shapes(reader.NumInputs());
for (int i = 0; i < reader.NumInputs(); ++i) {
Expand Down Expand Up @@ -374,27 +374,27 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model,
1U << static_cast<uint32_t>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);

auto builder = SampleUniquePtr<nvinfer1::IBuilder>(
builder_ = SampleUniquePtr<nvinfer1::IBuilder>(
nvinfer1::createInferBuilder(sample::gLogger.getTRTLogger()));
if (!builder) {
if (!builder_) {
FDERROR << "Failed to call createInferBuilder()." << std::endl;
return false;
}
auto network = SampleUniquePtr<nvinfer1::INetworkDefinition>(
builder->createNetworkV2(explicitBatch));
if (!network) {
network_ = SampleUniquePtr<nvinfer1::INetworkDefinition>(
builder_->createNetworkV2(explicitBatch));
if (!network_) {
FDERROR << "Failed to call createNetworkV2()." << std::endl;
return false;
}
auto config =
SampleUniquePtr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
auto config = SampleUniquePtr<nvinfer1::IBuilderConfig>(
builder_->createBuilderConfig());
if (!config) {
FDERROR << "Failed to call createBuilderConfig()." << std::endl;
return false;
}

if (option.enable_fp16) {
if (!builder->platformHasFastFp16()) {
if (!builder_->platformHasFastFp16()) {
FDWARNING << "Detected FP16 is not supported in the current GPU, "
"will use FP32 instead."
<< std::endl;
Expand All @@ -403,25 +403,25 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model,
}
}

auto parser = SampleUniquePtr<nvonnxparser::IParser>(
nvonnxparser::createParser(*network, sample::gLogger.getTRTLogger()));
if (!parser) {
parser_ = SampleUniquePtr<nvonnxparser::IParser>(
nvonnxparser::createParser(*network_, sample::gLogger.getTRTLogger()));
if (!parser_) {
FDERROR << "Failed to call createParser()." << std::endl;
return false;
}
if (!parser->parse(onnx_model.data(), onnx_model.size())) {
if (!parser_->parse(onnx_model.data(), onnx_model.size())) {
FDERROR << "Failed to parse ONNX model by TensorRT." << std::endl;
return false;
}

FDINFO << "Start to building TensorRT Engine..." << std::endl;
bool fp16 = builder->platformHasFastFp16();
builder->setMaxBatchSize(option.max_batch_size);
bool fp16 = builder_->platformHasFastFp16();
builder_->setMaxBatchSize(option.max_batch_size);

config->setMaxWorkspaceSize(option.max_workspace_size);

if (option.max_shape.size() > 0) {
auto profile = builder->createOptimizationProfile();
auto profile = builder_->createOptimizationProfile();
FDASSERT(option.max_shape.size() == option.min_shape.size() &&
option.min_shape.size() == option.opt_shape.size(),
"[TrtBackend] Size of max_shape/opt_shape/min_shape in "
Expand Down Expand Up @@ -459,7 +459,7 @@ bool TrtBackend::CreateTrtEngine(const std::string& onnx_model,
}

SampleUniquePtr<IHostMemory> plan{
builder->buildSerializedNetwork(*network, *config)};
builder_->buildSerializedNetwork(*network_, *config)};
if (!plan) {
FDERROR << "Failed to call buildSerializedNetwork()." << std::endl;
return false;
Expand Down
3 changes: 3 additions & 0 deletions csrcs/fastdeploy/backends/tensorrt/trt_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,9 @@ class TrtBackend : public BaseBackend {
private:
std::shared_ptr<nvinfer1::ICudaEngine> engine_;
std::shared_ptr<nvinfer1::IExecutionContext> context_;
SampleUniquePtr<nvonnxparser::IParser> parser_;
SampleUniquePtr<nvinfer1::IBuilder> builder_;
SampleUniquePtr<nvinfer1::INetworkDefinition> network_;
cudaStream_t stream_{};
std::vector<void*> bindings_;
std::vector<TrtValueInfo> inputs_desc_;
Expand Down
6 changes: 3 additions & 3 deletions csrcs/fastdeploy/fastdeploy_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ bool FastDeployModel::InitRuntime() {
<< std::endl;
return false;
}
runtime_ = new Runtime();
runtime_ = std::unique_ptr<Runtime>(new Runtime());
if (!runtime_->Init(runtime_option)) {
return false;
}
Expand Down Expand Up @@ -88,7 +88,7 @@ bool FastDeployModel::CreateCpuBackend() {
continue;
}
runtime_option.backend = valid_cpu_backends[i];
runtime_ = new Runtime();
runtime_ = std::unique_ptr<Runtime>(new Runtime());
if (!runtime_->Init(runtime_option)) {
return false;
}
Expand All @@ -111,7 +111,7 @@ bool FastDeployModel::CreateGpuBackend() {
continue;
}
runtime_option.backend = valid_gpu_backends[i];
runtime_ = new Runtime();
runtime_ = std::unique_ptr<Runtime>(new Runtime());
if (!runtime_->Init(runtime_option)) {
return false;
}
Expand Down
22 changes: 11 additions & 11 deletions csrcs/fastdeploy/fastdeploy_model.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ namespace fastdeploy {

class FASTDEPLOY_DECL FastDeployModel {
public:
virtual std::string ModelName() const { return "NameUndefined"; };
virtual std::string ModelName() const { return "NameUndefined"; }

virtual bool InitRuntime();
virtual bool CreateCpuBackend();
Expand Down Expand Up @@ -47,21 +47,21 @@ class FASTDEPLOY_DECL FastDeployModel {
virtual bool DebugEnabled();

private:
Runtime* runtime_ = nullptr;
std::unique_ptr<Runtime> runtime_;
bool runtime_initialized_ = false;
bool debug_ = false;
};

#define TIMERECORD_START(id) \
TimeCounter tc_##id; \
#define TIMERECORD_START(id) \
TimeCounter tc_##id; \
tc_##id.Start();

#define TIMERECORD_END(id, prefix) \
if (DebugEnabled()) { \
tc_##id.End(); \
FDLogger() << __FILE__ << "(" << __LINE__ << "):" << __FUNCTION__ << " " \
<< prefix << " duration = " << tc_##id.Duration() << "s." \
<< std::endl; \
#define TIMERECORD_END(id, prefix) \
if (DebugEnabled()) { \
tc_##id.End(); \
FDLogger() << __FILE__ << "(" << __LINE__ << "):" << __FUNCTION__ << " " \
<< prefix << " duration = " << tc_##id.Duration() << "s." \
<< std::endl; \
}

} // namespace fastdeploy
} // namespace fastdeploy
2 changes: 1 addition & 1 deletion csrcs/fastdeploy/vision.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
#include "fastdeploy/vision/megvii/yolox.h"
#include "fastdeploy/vision/meituan/yolov6.h"
#include "fastdeploy/vision/ppcls/model.h"
#include "fastdeploy/vision/ppdet/ppyoloe.h"
#include "fastdeploy/vision/ppdet/model.h"
#include "fastdeploy/vision/ppogg/yolov5lite.h"
#include "fastdeploy/vision/ppseg/model.h"
#include "fastdeploy/vision/rangilyu/nanodet_plus.h"
Expand Down
141 changes: 141 additions & 0 deletions csrcs/fastdeploy/vision/common/processors/pad_to_size.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/vision/common/processors/pad_to_size.h"

namespace fastdeploy {
namespace vision {

bool PadToSize::CpuRun(Mat* mat) {
if (mat->layout != Layout::HWC) {
FDERROR << "PadToSize: The input data must be Layout::HWC format!"
<< std::endl;
return false;
}
if (mat->Channels() > 4) {
FDERROR << "PadToSize: Only support channels <= 4." << std::endl;
return false;
}
if (mat->Channels() != value_.size()) {
FDERROR
<< "PadToSize: Require input channels equals to size of padding value, "
"but now channels = "
<< mat->Channels() << ", the size of padding values = " << value_.size()
<< "." << std::endl;
return false;
}
int origin_w = mat->Width();
int origin_h = mat->Height();
if (origin_w > width_) {
FDERROR << "PadToSize: the input width:" << origin_w
<< " is greater than the target width: " << width_ << "."
<< std::endl;
return false;
}
if (origin_h > height_) {
FDERROR << "PadToSize: the input height:" << origin_h
<< " is greater than the target height: " << height_ << "."
<< std::endl;
return false;
}
if (origin_w == width_ && origin_h == height_) {
return true;
}

cv::Mat* im = mat->GetCpuMat();
cv::Scalar value;
if (value_.size() == 1) {
value = cv::Scalar(value_[0]);
} else if (value_.size() == 2) {
value = cv::Scalar(value_[0], value_[1]);
} else if (value_.size() == 3) {
value = cv::Scalar(value_[0], value_[1], value_[2]);
} else {
value = cv::Scalar(value_[0], value_[1], value_[2], value_[3]);
}
// top, bottom, left, right
cv::copyMakeBorder(*im, *im, 0, height_ - origin_h, 0, width_ - origin_w,
cv::BORDER_CONSTANT, value);
mat->SetHeight(height_);
mat->SetWidth(width_);
return true;
}

#ifdef ENABLE_OPENCV_CUDA
bool PadToSize::GpuRun(Mat* mat) {
if (mat->layout != Layout::HWC) {
FDERROR << "PadToSize: The input data must be Layout::HWC format!"
<< std::endl;
return false;
}
if (mat->Channels() > 4) {
FDERROR << "PadToSize: Only support channels <= 4." << std::endl;
return false;
}
if (mat->Channels() != value_.size()) {
FDERROR
<< "PadToSize: Require input channels equals to size of padding value, "
"but now channels = "
<< mat->Channels() << ", the size of padding values = " << value_.size()
<< "." << std::endl;
return false;
}

int origin_w = mat->Width();
int origin_h = mat->Height();
if (origin_w > width_) {
FDERROR << "PadToSize: the input width:" << origin_w
<< " is greater than the target width: " << width_ << "."
<< std::endl;
return false;
}
if (origin_h > height_) {
FDERROR << "PadToSize: the input height:" << origin_h
<< " is greater than the target height: " << height_ << "."
<< std::endl;
return false;
}
if (origin_w == width_ && origin_h == height_) {
return true;
}

cv::cuda::GpuMat* im = mat->GetGpuMat();
cv::Scalar value;
if (value_.size() == 1) {
value = cv::Scalar(value_[0]);
} else if (value_.size() == 2) {
value = cv::Scalar(value_[0], value_[1]);
} else if (value_.size() == 3) {
value = cv::Scalar(value_[0], value_[1], value_[2]);
} else {
value = cv::Scalar(value_[0], value_[1], value_[2], value_[3]);
}

// top, bottom, left, right
cv::cuda::copyMakeBorder(*im, *im, 0, height_ - origin_h, 0,
width_ - origin_w, cv::BORDER_CONSTANT, value);
mat->SetHeight(height_);
mat->SetWidth(width_);
return true;
}
#endif

bool PadToSize::Run(Mat* mat, int width, int height,
const std::vector<float>& value, ProcLib lib) {
auto p = PadToSize(width, height, value);
return p(mat, lib);
}

} // namespace vision
} // namespace fastdeploy
46 changes: 46 additions & 0 deletions csrcs/fastdeploy/vision/common/processors/pad_to_size.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "fastdeploy/vision/common/processors/base.h"

namespace fastdeploy {
namespace vision {

class PadToSize : public Processor {
public:
// only support pad with left-top padding mode
PadToSize(int width, int height, const std::vector<float>& value) {
width_ = width;
height_ = height;
value_ = value;
}
bool CpuRun(Mat* mat);
#ifdef ENABLE_OPENCV_CUDA
bool GpuRun(Mat* mat);
#endif
std::string Name() { return "PadToSize"; }

static bool Run(Mat* mat, int width, int height,
const std::vector<float>& value,
ProcLib lib = ProcLib::OPENCV_CPU);

private:
int width_;
int height_;
std::vector<float> value_;
};
} // namespace vision
} // namespace fastdeploy
Loading