Skip to content

Commit

Permalink
Merge pull request #207 from NVIDIA-AI-IOT/adaptive_max_pool2d
Browse files Browse the repository at this point in the history
added support for adaptive_max_pool2d using regular pooling
  • Loading branch information
jaybdub committed Dec 17, 2019
2 parents 07831d6 + 72f6ec2 commit d526b24
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 0 deletions.
1 change: 1 addition & 0 deletions torch2trt/converters/__init__.py
Expand Up @@ -6,6 +6,7 @@

from .activation import *
from .adaptive_avg_pool2d import *
from .adaptive_max_pool2d import *
from .AdaptiveAvgPool2d import *
from .add import *
from .avg_pool2d import *
Expand Down
36 changes: 36 additions & 0 deletions torch2trt/converters/adaptive_max_pool2d.py
@@ -0,0 +1,36 @@
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test


@tensorrt_converter('torch.nn.functional.adaptive_max_pool2d')
def convert_adaptive_max_pool2d(ctx):
input = ctx.method_args[0]
output = ctx.method_return

output_size = ctx.method_args[1]
if isinstance(output_size, int):
output_size = (output_size, ) * 2

stride = (input._trt.shape[-2] // output_size[-2], input._trt.shape[-1] // output_size[-1])

kernel_size = stride
layer = ctx.network.add_pooling(
input=input._trt, type=trt.PoolingType.MAX, window_size=kernel_size)
layer.stride = stride

output._trt = layer.get_output(0)


@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_1x1():
return torch.nn.AdaptiveMaxPool2d((1, 1))


@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_2x2():
return torch.nn.AdaptiveMaxPool2d((2, 2))


@add_module_test(torch.float32, torch.device('cuda'), [(1, 3, 224, 224)])
def test_adaptive_max_pool2d_3x3():
return torch.nn.AdaptiveMaxPool2d((3, 3))

0 comments on commit d526b24

Please sign in to comment.