Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backend TensorRT 7 #174

Merged
merged 2 commits into from
May 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions engine/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ set (CMAKE_CXX_STANDARD 17)
option(USE_PROFILING "Build with profiling" OFF)
option(USE_RL "Build with reinforcement learning support" OFF)
option(BACKEND_TENSORRT "Build with TensorRT support" ON)
option(BACKEND_TENSORRT_7 "Build with deprecated TensorRT 7 support" OFF)
option(BACKEND_MXNET "Build with MXNet backend (Blas/IntelMKL/CUDA/TensorRT) support" OFF)
option(BACKEND_TORCH "Build with Torch backend (CPU/GPU) support" OFF)
option(BACKEND_OPENVINO "Build with OpenVino backend (CPU/GPU) support" OFF)
Expand Down Expand Up @@ -413,6 +414,11 @@ if (USE_RL)
add_definitions(-DDISABLE_UCI_INFO)
endif()

if(BACKEND_TENSORRT_7)
set(BACKEND_TENSORRT ON)
add_definitions(-DTENSORRT7)
endif()

if (BACKEND_TENSORRT)
# build CrazyAra with TensorRT support, requires a working TensorRT-MXNet library package
message(STATUS "Enabled TensorRT support")
Expand All @@ -428,6 +434,13 @@ if (BACKEND_TENSORRT)
link_directories("$ENV{CUDA_PATH}/lib64")
link_directories("$ENV{CUDA_PATH}/lib/x64")
link_directories("$ENV{TENSORRT_PATH}/lib")
if(BACKEND_TENSORRT_7)
if(WIN32)
find_library(TENSORRT_LIBRARY_MYELIN myelin64_1
HINTS ${TENSORRT_PATH}
PATH_SUFFIXES lib lib64 lib/x64)
endif()
endif()
include_directories("$ENV{TENSORRT_PATH}/include")
include_directories("$ENV{TENSORRT_PATH}/samples/common/")
add_definitions(-DTENSORRT)
Expand All @@ -437,6 +450,9 @@ add_executable(${PROJECT_NAME} ${source_files})

if (BACKEND_TENSORRT)
target_link_libraries(${PROJECT_NAME} nvonnxparser nvinfer cudart ${CUDART_LIB} ${CUBLAS_LIB} ${CUDNN_LIB})
if(BACKEND_TENSORRT_7)
target_link_libraries(${PROJECT_NAME} myelin)
endif()
endif()

if (BACKEND_OPENVINO)
Expand Down
8 changes: 8 additions & 0 deletions engine/src/nn/tensorrtapi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,11 +228,15 @@ ICudaEngine* TensorrtAPI::create_cuda_engine_from_onnx()
set_config_settings(config, 1_GiB, calibrator, calibrationStream);

// build an engine from the TensorRT network with a given configuration struct
#ifdef TENSORRT7
return builder->buildEngineWithConfig(*network, *config);
#else
SampleUniquePtr<IHostMemory> serializedModel{builder->buildSerializedNetwork(*network, *config)};
SampleUniquePtr<IRuntime> runtime{createInferRuntime(sample::gLogger.getTRTLogger())};

// build an engine from the serialized model
return runtime->deserializeCudaEngine(serializedModel->data(), serializedModel->size());;
#endif
}

ICudaEngine* TensorrtAPI::get_cuda_engine() {
Expand All @@ -244,7 +248,11 @@ ICudaEngine* TensorrtAPI::get_cuda_engine() {
if (buffer) {
info_string("deserialize engine:", trtFilePath);
unique_ptr<IRuntime, samplesCommon::InferDeleter> runtime{createInferRuntime(gLogger)};
#ifdef TENSORRT7
engine = runtime->deserializeCudaEngine(buffer, bufferSize, nullptr);
#else
engine = runtime->deserializeCudaEngine(buffer, bufferSize);
#endif
}

if (!engine) {
Expand Down
9 changes: 5 additions & 4 deletions engine/src/rl/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
# Base this Dockerfile from the official NVIDIA-MXNet docker contaienr
# see release page for all current available releases:
# https://docs.nvidia.com/deeplearning/frameworks/mxnet-release-notes/running.html
FROM nvcr.io/nvidia/mxnet:22.03-py3
FROM nvcr.io/nvidia/mxnet:20.09-py3

MAINTAINER QueensGambit

Expand All @@ -39,11 +39,11 @@ RUN cd /root \
&& make install

# Clone TensorRT repository for TensorRT backend
# checkout commit for tag 8.2.1
# checkout commit for tag 20.09 ("git checkout tags/20.09" is currently not working)
RUN cd /root \
&& git clone https://github.com/NVIDIA/TensorRT \
&& cd TensorRT \
&& git checkout 6f38570b74066ef464744bc789f8512191f1cbc0
&& git checkout f693a6d723ef2766be36deb5e7987cd50159973a
ENV TENSORRT_PATH /root/TensorRT/
ENV CUDA_PATH /usr/local/cuda/

Expand Down Expand Up @@ -98,7 +98,7 @@ RUN apt-get update -y \
RUN cd /root/CrazyAra/engine \
&& mkdir build \
&& cd build \
&& cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DUSE_RL=ON .. \
&& cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DUSE_RL=ON -DBACKEND_TENSORRT_7 .. \
&& make -j8

# Rename the config files
Expand All @@ -115,3 +115,4 @@ RUN apt-get update -y \
CMD cd /root/CrazyAra/engine/src/rl/ \
&& /bin/bash