Skip to content

Commit

Permalink
fix: Update "reduceAxes" variable in GlobalPoolingConverter function …
Browse files Browse the repository at this point in the history
…and add corresponding uTests

Signed-off-by: Ruoqian Guo <ruoqiang@nvidia.com>
  • Loading branch information
ruoqianguo committed Dec 30, 2021
1 parent 7191959 commit f6f5e3e
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 1 deletion.
3 changes: 2 additions & 1 deletion core/conversion/converters/impl/pooling.cpp
Expand Up @@ -16,8 +16,9 @@ bool GlobalPoolingConverter(
nvinfer1::PoolingType pool_type) {
auto in = args[0].ITensorOrFreeze(ctx);
nvinfer1::Dims dims = in->getDimensions();
auto out_size = util::toDims(args[1].unwrapToIntList());
// Generate a bitmask of all 1s except the last 2 bits (N and C axes)
uint32_t reduceAxes = ((1 << dims.nbDims) - 1) & ~0b11;
uint32_t reduceAxes = ((1 << dims.nbDims) - 1) ^ ((1 << (dims.nbDims - out_size.nbDims)) - 1);
auto* new_layer = ctx->net->addReduce(
*in,
pool_type == nvinfer1::PoolingType::kMAX ? nvinfer1::ReduceOperation::kMAX : nvinfer1::ReduceOperation::kAVG,
Expand Down
52 changes: 52 additions & 0 deletions tests/core/conversion/converters/test_pooling.cpp
Expand Up @@ -436,6 +436,32 @@ TEST(Converters, ATenAdaptiveAvgPool2DConvertsCorrectly) {
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6));
}

TEST(Converters, ATenAdaptiveAvgPool2DGlobalPoolingConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor):
%2 : int = prim::Constant[value=1]()
%3 : int = prim::Constant[value=1]()
%6 : int[] = prim::ListConstruct(%2, %3)
%10 : Tensor = aten::adaptive_avg_pool2d(%0, %6)
return (%10))IR";

auto g = std::make_shared<torch::jit::Graph>();
torch::jit::parseIR(graph, g.get());

// PyTorch PyTorch adaptive_avg_pool2d needs a 4D input or a 3D input
auto in = at::randint(-5, 5, {64, 16, 32, 32}, at::kCUDA);

auto jit_in = at::clone(in);
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in});

auto trt_in = at::clone(in);
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {trt_in});

ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6));
}

TEST(Converters, ATenAdaptiveAvgPool2DConvertsCorrectlyWithDynamicInput) {
const auto graph = R"IR(
graph(%0 : Tensor):
Expand Down Expand Up @@ -488,6 +514,32 @@ TEST(Converters, ATenAdaptiveAvgPool1DConvertsCorrectly) {
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 1.0));
}

TEST(Converters, ATenAdaptiveAvgPool1DGlobalPoolingConvertsCorrectly) {
const auto graph =
R"IR(
graph(%0 : Tensor):
%2 : int = prim::Constant[value=1]()
%6 : int[] = prim::ListConstruct(%2)
%10 : Tensor = aten::adaptive_avg_pool1d(%0, %6)
return (%10))IR";

auto g = std::make_shared<torch::jit::Graph>();
torch::jit::parseIR(graph, g.get());

// PyTorch adaptive_avg_pool1d needs a 3D input or a 2D input
auto in = at::randint(-5, 5, {3, 16}, at::kCUDA);

auto jit_in = at::clone(in);
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in});

auto trt_in = at::clone(in);
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {trt_in});

ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6));
}

TEST(Converters, ATenAdaptiveMaxPool2DConvertsCorrectly) {
const auto graph = R"IR(
graph(%0 : Tensor):
Expand Down

0 comments on commit f6f5e3e

Please sign in to comment.