Traceback (most recent call last):
File "test.py", line 22, in <module>
mod, params = relay.frontend.from_pytorch(trace, input_shapes)
File "/workplace/software/tvm/tvm/python/tvm/relay/frontend/pytorch.py", line 4970, in from_pytorch
outputs = converter.convert_operators(operator_nodes, outputs, ret_name)
File "/workplace/software/tvm/tvm/python/tvm/relay/frontend/pytorch.py", line 4256, in convert_operators
return [_wrap_const(outputs[ret_name]) for ret_name in ret_names]
File "/workplace/software/tvm/tvm/python/tvm/relay/frontend/pytorch.py", line 4256, in <listcomp>
return [_wrap_const(outputs[ret_name]) for ret_name in ret_names]
File "/workplace/software/tvm/tvm/python/tvm/relay/frontend/pytorch.py", line 4403, in _wrap_const
return _expr.const(c)
File "/workplace/software/tvm/tvm/python/tvm/relay/expr.py", line 677, in const
value.dtype, None
AttributeError: 'NoneType' object has no attribute 'dtype'
import torch
from tvm import relay
import tvm
import numpy as np
from torch.nn import Module
para_0 = torch.randn([1, 2, 2], dtype=torch.float32)
class adaptive_max_pool1d(Module):
def forward(self, *args):
return torch.nn.functional.adaptive_max_pool1d(args[0], 2, True)
m = adaptive_max_pool1d().float().eval()
print(m)
input_data=para_0
torch_outputs = m(input_data)
print(torch_outputs)
trace = torch.jit.trace(m, input_data)
input_shapes = [('input0', torch.Size([1, 2, 2]))]
mod, params = relay.frontend.from_pytorch(trace, input_shapes)
The PyTorch model with the
adaptive_max_pool1dlayer will lead to a crash.Actual behavior
Steps to reproduce
Triage
cc @echuraev @shingjan