Skip to content

Commit

Permalink
Code style refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
Nuzhny007 committed May 19, 2023
1 parent e37522a commit 2a73e75
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 61 deletions.
8 changes: 4 additions & 4 deletions src/Detector/tensorrt_yolo/YoloONNX.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ bool YoloONNX::VerifyOutputAspectRatio(size_t imgIdx, std::vector<tensor_rt::Res
else
{
outputs.push_back(output);
++i;
++i;
}
#endif
}
Expand Down Expand Up @@ -749,14 +749,14 @@ void YoloONNX::ProcessBBoxesOutput(size_t imgIdx, const std::vector<float*>& out

int objectsCount = m_outpuDims[1].d[1];

const float fw = static_cast<float>(frameSize.width) / static_cast<float>(m_inputDims.d[3]);
const float fh = static_cast<float>(frameSize.height) / static_cast<float>(m_inputDims.d[2]);
const float fw = static_cast<float>(frameSize.width) / static_cast<float>(m_inputDims.d[3]);
const float fh = static_cast<float>(frameSize.height) / static_cast<float>(m_inputDims.d[2]);

//std::cout << "Dets[" << imgIdx << "] = " << dets[imgIdx] << ", objectsCount = " << objectsCount << std::endl;

const size_t step1 = imgIdx * objectsCount;
const size_t step2 = 4 * imgIdx * objectsCount;
for (size_t i = 0; i < dets[imgIdx]; ++i)
for (size_t i = 0; i < static_cast<size_t>(dets[imgIdx]); ++i)
{
// Box
const size_t k = i * 4;
Expand Down
2 changes: 1 addition & 1 deletion src/Detector/tensorrt_yolo/class_detector.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ namespace tensor_rt
struct Result
{
cv::RotatedRect m_rrect;
cv::Rect m_brect;
cv::Rect m_brect;
int m_id = -1;
float m_prob = 0.f;
cv::Mat m_boxMask;
Expand Down
47 changes: 1 addition & 46 deletions src/Detector/tensorrt_yolo/common/sampleEngines.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -700,21 +700,13 @@ void setMemoryPoolLimits(nvinfer1::IBuilderConfig& config, BuildOptions const& b
{
auto const roundToBytes = [](double const sizeInMB) { return static_cast<size_t>(sizeInMB * (1 << 20)); };
if (build.workspace >= 0)
{
config.setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, roundToBytes(build.workspace));
}
if (build.dlaSRAM >= 0)
{
config.setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_MANAGED_SRAM, roundToBytes(build.dlaSRAM));
}
if (build.dlaLocalDRAM >= 0)
{
config.setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_LOCAL_DRAM, roundToBytes(build.dlaLocalDRAM));
}
if (build.dlaGlobalDRAM >= 0)
{
config.setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_GLOBAL_DRAM, roundToBytes(build.dlaGlobalDRAM));
}
}

} // namespace
Expand Down Expand Up @@ -920,14 +912,10 @@ bool setupNetworkAndConfig(const BuildOptions& build, const SystemOptions& sys,
config.setAvgTimingIterations(build.avgTiming);

if (build.fp16)
{
config.setFlag(nvinfer1::BuilderFlag::kFP16);
}

if (build.int8)
{
config.setFlag(nvinfer1::BuilderFlag::kINT8);
}

if (build.int8 && !build.fp16)
{
Expand All @@ -948,9 +936,7 @@ bool setupNetworkAndConfig(const BuildOptions& build, const SystemOptions& sys,
{
const auto& layer = network.getLayer(i);
if (layer->getType() == nvinfer1::LayerType::kQUANTIZE || layer->getType() == nvinfer1::LayerType::kDEQUANTIZE)
{
return true;
}
}
return false;
};
Expand Down Expand Up @@ -1011,26 +997,18 @@ bool setupNetworkAndConfig(const BuildOptions& build, const SystemOptions& sys,
auto const isDynamicInput = std::any_of(dims.d, dims.d + dims.nbDims, [](int32_t dim) { return dim == -1; });

if (profileCalib)
{
elemCount.push_back(volume(profileCalib->getDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT)));
}
else if (profile && isDynamicInput)
{
elemCount.push_back(volume(profile->getDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT)));
}
else
{
elemCount.push_back(volume(input->getDimensions()));
}
}

config.setInt8Calibrator(new RndInt8Calibrator(1, elemCount, build.calibration, network, err));
}

if (build.directIO)
{
config.setFlag(nvinfer1::BuilderFlag::kDIRECT_IO);
}

switch (build.precisionConstraints)
{
Expand All @@ -1044,24 +1022,16 @@ bool setupNetworkAndConfig(const BuildOptions& build, const SystemOptions& sys,
}

if (!build.layerPrecisions.empty() && build.precisionConstraints != PrecisionConstraints::kNONE)
{
setLayerPrecisions(network, build.layerPrecisions);
}

if (!build.layerOutputTypes.empty() && build.precisionConstraints != PrecisionConstraints::kNONE)
{
setLayerOutputTypes(network, build.layerOutputTypes);
}

if (build.safe)
{
config.setEngineCapability(sys.DLACore != -1 ? nvinfer1::EngineCapability::kDLA_STANDALONE : nvinfer1::EngineCapability::kSAFETY);
}

if (build.restricted)
{
config.setFlag(nvinfer1::BuilderFlag::kSAFETY_SCOPE);
}

if (sys.DLACore != -1)
{
Expand All @@ -1072,18 +1042,11 @@ bool setupNetworkAndConfig(const BuildOptions& build, const SystemOptions& sys,
config.setFlag(nvinfer1::BuilderFlag::kPREFER_PRECISION_CONSTRAINTS);

if (sys.fallback)
{
config.setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);
}
else
{
// Reformatting runs on GPU, so avoid I/O reformatting.
else // Reformatting runs on GPU, so avoid I/O reformatting
config.setFlag(nvinfer1::BuilderFlag::kDIRECT_IO);
}
if (!build.int8)
{
config.setFlag(nvinfer1::BuilderFlag::kFP16);
}
}
else
{
Expand Down Expand Up @@ -1220,9 +1183,7 @@ std::pair<std::vector<std::string>, std::vector<nvinfer1::WeightsRole>> getMissi
std::vector<std::string> layerNameStrs(nbMissing);
std::transform(layerNames.begin(), layerNames.end(), layerNameStrs.begin(), [](char const* name) {
if (name == nullptr)
{
return std::string{};
}
return std::string{name};
});
return {layerNameStrs, weightsRoles};
Expand Down Expand Up @@ -1319,13 +1280,9 @@ bool getEngineBuildEnv(const ModelOptions& model, const BuildOptions& build, con
bool createEngineSuccess {false};

if (build.load)
{
createEngineSuccess = loadEngineToEnv(build.engine, sys.DLACore, build.safe, build.consistency, env, err);
}
else
{
createEngineSuccess = modelToBuildEnv(model, build, sys, env, err);
}

SMP_RETVAL_IF_FALSE(createEngineSuccess, "Failed to create engine from model.", false, err);

Expand Down Expand Up @@ -1492,9 +1449,7 @@ bool timeRefit(nvinfer1::INetworkDefinition const& network, nvinfer1::ICudaEngin
{
bool const success = refitter->setWeights(layer->getName(), roleWeights.first, roleWeights.second);
if (!success)
{
return false;
}
}
}
}
Expand Down
6 changes: 0 additions & 6 deletions src/Detector/tensorrt_yolo/common/sampleUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -323,9 +323,7 @@ class Bindings
for (auto& b : mNames)
{
if (mBindings[b.second].isInput)
{
mBindings[b.second].buffer->hostToDevice(stream);
}
}
}

Expand All @@ -334,9 +332,7 @@ class Bindings
for (auto& b : mNames)
{
if (!mBindings[b.second].isInput)
{
mBindings[b.second].buffer->deviceToHost(stream);
}
}
}

Expand Down Expand Up @@ -451,9 +447,7 @@ class Bindings
{
const auto binding = n.second;
if (predicate(mBindings[binding]))
{
bindings.insert(n);
}
}
return bindings;
}
Expand Down
8 changes: 4 additions & 4 deletions src/Detector/tensorrt_yolo/trt_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -255,10 +255,10 @@ std::vector<BBoxInfo> nmsAllClasses(const float nmsThresh,

for (auto& boxes : splitBoxes)
{
if (tensor_rt::YOLOV5 == model_type)
boxes = diou_nms(nmsThresh, boxes);
else
boxes = nonMaximumSuppression(nmsThresh, boxes);
if (tensor_rt::YOLOV5 == model_type)
boxes = diou_nms(nmsThresh, boxes);
else
boxes = nonMaximumSuppression(nmsThresh, boxes);
result.insert(result.end(), boxes.begin(), boxes.end());
}
return result;
Expand Down

0 comments on commit 2a73e75

Please sign in to comment.