-
Notifications
You must be signed in to change notification settings - Fork 21.4k
/
conv_relu.py
138 lines (110 loc) · 5.36 KB
/
conv_relu.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import torch
import torch.nn.intrinsic
import torch.nn.intrinsic.qat
import torch.nn.functional as F
import torch.nn.quantized as nnq
from torch.nn.utils import fuse_conv_bn_weights
_reverse_repeat_padding = nnq.modules.conv._reverse_repeat_padding
class ConvReLU1d(nnq.Conv1d):
r"""
A ConvReLU1d module is a fused module of Conv1d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.Conv1d`.
Attributes:
Same as torch.nn.quantized.Conv1d
"""
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU1d # type: ignore[assignment]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'):
super(ConvReLU1d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias,
padding_mode=padding_mode)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
if self.padding_mode != 'zeros':
# Padding in Conv1d is stored as (p, p), need to get (p,)
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return torch.ops.quantized.conv1d_relu(
input, self._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU1d'
@classmethod
def from_float(cls, mod):
if type(mod) == torch.nn.intrinsic.qat.ConvBnReLU1d:
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
mod.bn.eps, mod.bn.weight, mod.bn.bias)
return super(ConvReLU1d, cls).from_float(mod)
class ConvReLU2d(nnq.Conv2d):
r"""
A ConvReLU2d module is a fused module of Conv2d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.Conv2d`.
Attributes:
Same as torch.nn.quantized.Conv2d
"""
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU2d # type: ignore[assignment]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'):
super(ConvReLU2d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias,
padding_mode=padding_mode)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
if self.padding_mode != 'zeros':
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return torch.ops.quantized.conv2d_relu(
input, self._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU2d'
@classmethod
def from_float(cls, mod):
if type(mod) == torch.nn.intrinsic.qat.ConvBnReLU2d:
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
mod.bn.eps, mod.bn.weight, mod.bn.bias)
return super(ConvReLU2d, cls).from_float(mod)
class ConvReLU3d(nnq.Conv3d):
r"""
A ConvReLU3d module is a fused module of Conv3d and ReLU
We adopt the same interface as :class:`torch.nn.quantized.Conv3d`.
Attributes: Same as torch.nn.quantized.Conv3d
"""
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU3d # type: ignore[assignment]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'):
assert padding_mode != 'reflect', "Conv3d does not support reflection padding"
super(ConvReLU3d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias,
padding_mode=padding_mode)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
if self.padding_mode != 'zeros':
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return torch.ops.quantized.conv3d_relu(
input, self._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU3d'
@classmethod
def from_float(cls, mod):
# TODO: Add qat support for ConvReLU3d and ConvBnReLU3d
return super(ConvReLU3d, cls).from_float(mod)