Skip to content

Commit

Permalink
YOLOv5 files updated
Browse files Browse the repository at this point in the history
* Added supported version information
* Not needed to use libmyplugins.so anymore
  • Loading branch information
marcoslucianops committed Jan 10, 2021
1 parent 5b45057 commit 470ed82
Show file tree
Hide file tree
Showing 6 changed files with 539 additions and 7 deletions.
25 changes: 20 additions & 5 deletions YOLOv5.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ NVIDIA DeepStream SDK 5.0.1 configuration for YOLOv5 models

Thanks [DanaHan](https://github.com/DanaHan/Yolov5-in-Deepstream-5.0), [wang-xinyu](https://github.com/wang-xinyu/tensorrtx) and [Ultralytics](https://github.com/ultralytics/yolov5)

Supported version: YOLOv5 3.0/3.1

##

* [Requirements](#requirements)
Expand Down Expand Up @@ -46,6 +48,16 @@ pip3 install scipy
pip3 install tqdm
```

* Pandas
```
pip3 install pandas
```

* seaborn
```
pip3 install seaborn
```

* PyTorch
```
pip3 install torch torchvision
Expand Down Expand Up @@ -77,6 +89,12 @@ git clone https://github.com/wang-xinyu/tensorrtx.git
git clone https://github.com/ultralytics/yolov5.git
```

Note: checkout TensorRTX repo to 3.0/3.1 YOLOv5 version
```
cd tensorrtx
git checkout '6d0f5cb'
```

2. Download latest YoloV5 (YOLOv5s, YOLOv5m, YOLOv5l or YOLOv5x) weights to yolov5/weights directory (example for YOLOv5s)
```
wget https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5s.pt -P yolov5/weights/
Expand Down Expand Up @@ -112,8 +130,6 @@ f = open('yolov5s.wts', 'w')
```
mv yolov5converter/yololayer.cu tensorrtx/yolov5/yololayer.cu
mv yolov5converter/yololayer.h tensorrtx/yolov5/yololayer.h
mv yolov5converter/hardswish.cu tensorrtx/yolov5/hardswish.cu
mv yolov5converter/hardswish.h tensorrtx/yolov5/hardswish.h
```

2. Move generated yolov5s.wts file to tensorrtx/yolov5 folder (example for YOLOv5s)
Expand All @@ -130,7 +146,7 @@ cmake ..
make
```

4. Convert to TensorRT model (yolov5s.engine and libmyplugins.so files will be generated in tensorrtx/yolov5/build folder)
4. Convert to TensorRT model (yolov5s.engine file will be generated in tensorrtx/yolov5/build folder)
```
sudo ./yolov5 -s
```
Expand All @@ -139,7 +155,6 @@ sudo ./yolov5 -s
```
mkdir /opt/nvidia/deepstream/deepstream-5.0/sources/yolo
cp yolov5s.engine /opt/nvidia/deepstream/deepstream-5.0/sources/yolo/yolov5s.engine
cp libmyplugins.so /opt/nvidia/deepstream/deepstream-5.0/sources/yolo/libmyplugins.so
```

<br />
Expand Down Expand Up @@ -179,7 +194,7 @@ Use my edited [deepstream_app_config.txt](https://raw.githubusercontent.com/marc

Run command
```
LD_PRELOAD=./libmyplugins.so deepstream-app -c deepstream_app_config.txt
deepstream-app -c deepstream_app_config.txt
```

<br />
Expand Down
1 change: 0 additions & 1 deletion external/yolov5/config_infer_primary.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ cluster-mode=4
maintain-aspect-ratio=0
parse-bbox-func-name=NvDsInferParseCustomYoloV5
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so
engine-create-func-name=NvDsInferYoloCudaEngineGet

[class-attrs-all]
pre-cluster-threshold=0.25
4 changes: 3 additions & 1 deletion external/yolov5/nvdsinfer_custom_impl_Yolo/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib6
LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group

INCS:= $(wildcard *.h)
SRCFILES:= nvdsparsebbox_Yolo.cpp
SRCFILES:= nvdsparsebbox_Yolo.cpp \
yololayer.cu

TARGET_LIB:= libnvdsinfer_custom_impl_Yolo.so

Expand All @@ -48,3 +49,4 @@ $(TARGET_LIB) : $(TARGET_OBJS)

clean:
rm -rf $(TARGET_LIB)
rm -rf $(TARGET_OBJS)
94 changes: 94 additions & 0 deletions external/yolov5/nvdsinfer_custom_impl_Yolo/utils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
#ifndef __TRT_UTILS_H_
#define __TRT_UTILS_H_

#include <iostream>
#include <vector>
#include <algorithm>
#include <cudnn.h>

#ifndef CUDA_CHECK

#define CUDA_CHECK(callstr) \
{ \
cudaError_t error_code = callstr; \
if (error_code != cudaSuccess) { \
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
assert(0); \
} \
}

#endif

namespace Tn
{
class Profiler : public nvinfer1::IProfiler
{
public:
void printLayerTimes(int itrationsTimes)
{
float totalTime = 0;
for (size_t i = 0; i < mProfile.size(); i++)
{
printf("%-40.40s %4.3fms\n", mProfile[i].first.c_str(), mProfile[i].second / itrationsTimes);
totalTime += mProfile[i].second;
}
printf("Time over all layers: %4.3f\n", totalTime / itrationsTimes);
}
private:
typedef std::pair<std::string, float> Record;
std::vector<Record> mProfile;

virtual void reportLayerTime(const char* layerName, float ms)
{
auto record = std::find_if(mProfile.begin(), mProfile.end(), [&](const Record& r){ return r.first == layerName; });
if (record == mProfile.end())
mProfile.push_back(std::make_pair(layerName, ms));
else
record->second += ms;
}
};

//Logger for TensorRT info/warning/errors
class Logger : public nvinfer1::ILogger
{
public:

Logger(): Logger(Severity::kWARNING) {}

Logger(Severity severity): reportableSeverity(severity) {}

void log(Severity severity, const char* msg) override
{
// suppress messages with severity enum value greater than the reportable
if (severity > reportableSeverity) return;

switch (severity)
{
case Severity::kINTERNAL_ERROR: std::cerr << "INTERNAL_ERROR: "; break;
case Severity::kERROR: std::cerr << "ERROR: "; break;
case Severity::kWARNING: std::cerr << "WARNING: "; break;
case Severity::kINFO: std::cerr << "INFO: "; break;
default: std::cerr << "UNKNOWN: "; break;
}
std::cerr << msg << std::endl;
}

Severity reportableSeverity{Severity::kWARNING};
};

template<typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}

template<typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}

#endif
Loading

0 comments on commit 470ed82

Please sign in to comment.