Skip to content

Commit

Permalink
[quantized] Fix return values of _get_name() in quantized ConvTranspo…
Browse files Browse the repository at this point in the history
…se (#97678)

This PR fixes incorrect return values of _get_name() in quantized `ConvTranspose?d`.

Pull Request resolved: #97678
Approved by: https://github.com/vkuzo, https://github.com/kit1980
  • Loading branch information
kiszk authored and pytorchmergebot committed Apr 7, 2023
1 parent 88208c6 commit 482f87a
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 6 deletions.
6 changes: 3 additions & 3 deletions torch/ao/nn/quantized/dynamic/modules/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
groups, bias, dilation, padding_mode, **factory_kwargs)

def _get_name(self):
return 'DynamicQuantizedConvTranpose1d'
return 'DynamicQuantizedConvTranspose1d'

def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
Expand Down Expand Up @@ -328,7 +328,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
groups, bias, dilation, padding_mode, **factory_kwargs)

def _get_name(self):
return 'DynamicQuantizedConvTranpose2d'
return 'DynamicQuantizedConvTranspose2d'

def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
Expand Down Expand Up @@ -389,7 +389,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
groups, bias, dilation, padding_mode, **factory_kwargs)

def _get_name(self):
return 'DynamicQuantizedConvTranpose3d'
return 'DynamicQuantizedConvTranspose3d'

def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
Expand Down
6 changes: 3 additions & 3 deletions torch/ao/nn/quantized/modules/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -730,7 +730,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)

def _get_name(self):
return 'QuantizedConvTranpose1d'
return 'QuantizedConvTranspose1d'

def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
self._packed_params = torch.ops.quantized.conv_transpose1d_prepack(
Expand Down Expand Up @@ -821,7 +821,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)

def _get_name(self):
return 'QuantizedConvTranpose2d'
return 'QuantizedConvTranspose2d'

def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
self._packed_params = torch.ops.quantized.conv_transpose2d_prepack(
Expand Down Expand Up @@ -914,7 +914,7 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)

def _get_name(self):
return 'QuantizedConvTranpose3d'
return 'QuantizedConvTranspose3d'

def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
self._packed_params = torch.ops.quantized.conv_transpose3d_prepack(
Expand Down

0 comments on commit 482f87a

Please sign in to comment.