Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TFTRT: Enable ISliceLayer for TRT version 5.1.3.1+ #26848

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
12 changes: 6 additions & 6 deletions tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc
Expand Up @@ -449,7 +449,7 @@ nvinfer1::ITensor* Converter::CreateConstantLayer(
if (!layer) return nullptr;
const nvinfer1::DataType trt_dtype = trt_weights.type;
nvinfer1::ITensor* trt_tensor = layer->getOutput(0);
#if !IS_TRT_VERSION_GE(5, 1, 3)
#if !IS_TRT_VERSION_GE(5, 1, 3, 0)
// TODO(laigd): there is a bug in TensorRT 5.0 library that, if we don't set
// the data type below, it will always be kFLOAT regardless what the data type
// of the weights is. Once NVIDIA fixes this bug, we should remove the data
Expand Down Expand Up @@ -676,13 +676,13 @@ class TRT_TensorOrWeights::SimpleITensor : public nvinfer1::ITensor {

void setLocation(nvinfer1::TensorLocation location) override {}

#if IS_TRT_VERSION_GE(5, 0, 0)
#if IS_TRT_VERSION_GE(5, 0, 0, 0)
bool setDynamicRange(float min, float max) override { return true; }

float getDynamicRange() const override { return 0; }
#endif

#if IS_TRT_VERSION_GE(5, 1, 0)
#if IS_TRT_VERSION_GE(5, 1, 0, 0)
bool dynamicRangeIsSet() const override { return true; }

void resetDynamicRange() override {}
Expand Down Expand Up @@ -1301,7 +1301,7 @@ void Converter::MaybeApplyQuantizationRanges() {
// Infer ranges across marked ops.
PropagateQuantizationRanges();
// Apply ranges.
#if IS_TRT_VERSION_GE(5, 0, 0)
#if IS_TRT_VERSION_GE(5, 0, 0, 0)
for (auto pair : quantization_ranges_) {
nvinfer1::ITensor* tensor = pair.first;
const float range = pair.second;
Expand Down Expand Up @@ -2318,7 +2318,7 @@ Status ConvertStridedSliceHelper(OpConverterParams* params,
}
// TRT 5.1 adds a slice layer. For older versions, we attempt to use the
// padding layer with negative padding.
#if IS_TRT_VERSION_GE(5, 1, 0) && 0
#if IS_TRT_VERSION_GE(5, 1, 3, 1)
// TODO(laigd): TRT 5.1 RC has a bug when ISliceLayer is used along with
// IConcatenationLayer, so disable ISliceLayer for now until it's fixed.
// Use ISliceLayer.
Expand Down Expand Up @@ -3239,7 +3239,7 @@ UnaryOperationMap() {
{"Sqrt", nvinfer1::UnaryOperation::kSQRT},
{"Abs", nvinfer1::UnaryOperation::kABS},
{"Reciprocal", nvinfer1::UnaryOperation::kRECIP},
#if IS_TRT_VERSION_GE(5, 1, 0)
#if IS_TRT_VERSION_GE(5, 1, 0, 0)
{"Sin", nvinfer1::UnaryOperation::kSIN},
{"Cos", nvinfer1::UnaryOperation::kCOS},
{"Tan", nvinfer1::UnaryOperation::kTAN},
Expand Down
6 changes: 4 additions & 2 deletions tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h
Expand Up @@ -43,11 +43,13 @@ extern const char* const kOutputPHName;

namespace convert {

#define IS_TRT_VERSION_GE(major, minor, patch) \
#define IS_TRT_VERSION_GE(major, minor, patch, build) \
((NV_TENSORRT_MAJOR > major) || \
(NV_TENSORRT_MAJOR == major && NV_TENSORRT_MINOR > minor) || \
(NV_TENSORRT_MAJOR == major && NV_TENSORRT_MINOR == minor && \
NV_TENSORRT_PATCH >= patch))
NV_TENSORRT_PATCH > patch) || \
(NV_TENSORRT_MAJOR == major && NV_TENSORRT_MINOR == minor && \
NV_TENSORRT_PATCH == patch && NV_TENSORRT_BUILD >= build))

struct EngineConnection {
// Constructs a non-control edge.
Expand Down
12 changes: 6 additions & 6 deletions tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc
Expand Up @@ -238,7 +238,7 @@ class FakeITensor : public nvinfer1::ITensor {
location_ = location;
}

#if IS_TRT_VERSION_GE(5, 0, 0)
#if IS_TRT_VERSION_GE(5, 0, 0, 0)
bool setDynamicRange(float min, float max) override {
dynamic_range_ = std::max(std::abs(min), std::abs(max));
return true;
Expand All @@ -247,7 +247,7 @@ class FakeITensor : public nvinfer1::ITensor {
float getDynamicRange() const override { return dynamic_range_; }
#endif

#if IS_TRT_VERSION_GE(5, 1, 0)
#if IS_TRT_VERSION_GE(5, 1, 0, 0)
bool dynamicRangeIsSet() const override { return true; }

void resetDynamicRange() override {}
Expand Down Expand Up @@ -850,7 +850,7 @@ TEST_F(ConverterTest, MaybeApplyQuantizationRanges) {

// Input range should be inferred along the chain and applied to tensors.
int8_converter.MaybeApplyQuantizationRanges();
#if IS_TRT_VERSION_GE(5, 0, 0)
#if IS_TRT_VERSION_GE(5, 0, 0, 0)
EXPECT_EQ(input.getDynamicRange(), 5.0f);
EXPECT_EQ(infer_1.getDynamicRange(), 5.0f);
EXPECT_EQ(infer_2.getDynamicRange(), 5.0f);
Expand Down Expand Up @@ -2710,7 +2710,7 @@ TEST_F(OpConverterTest, ConvertStridedSlice) {
RunValidationAndConversion(node_def);
}
// TRT 5.1+ supports strides
#if IS_TRT_VERSION_GE(5, 1, 0)
#if IS_TRT_VERSION_GE(5, 1, 0, 0)
{
// Negative strides, should fail.
Reset();
Expand Down Expand Up @@ -2773,7 +2773,7 @@ TEST_F(OpConverterTest, ConvertStridedSlice) {
// Same input is used for all tests.
const std::vector<float> ok_input = {1, 2, 3, 4, 5, 6};

#if IS_TRT_VERSION_GE(5, 1, 0)
#if IS_TRT_VERSION_GE(5, 1, 0, 0)
const int kStridedSliceOKCases = 23;
#else
const int kStridedSliceOKCases = 19;
Expand Down Expand Up @@ -2900,7 +2900,7 @@ TEST_F(OpConverterTest, ConvertStridedSlice) {
/*end_mask=*/get_mask({1, 0, 0, 0}),
/*expected_output_dims=*/{1, 2, 3},
/*expected_output=*/{1, 2, 3, 4, 5, 6}},
#if IS_TRT_VERSION_GE(5, 1, 0)
#if IS_TRT_VERSION_GE(5, 1, 0, 0)
// Strides
TestParams{/*input_dims=*/{6},
/*begin=*/{0, 0}, /*end=*/{0, 5}, /*strides=*/{1, 2},
Expand Down