Skip to content

Commit

Permalink
[Operator] Add leaky_relu and conv2d_transpose operator (#3)
Browse files Browse the repository at this point in the history
  • Loading branch information
yaoyaoding committed Oct 29, 2022
1 parent cfe3605 commit e2e1ed1
Show file tree
Hide file tree
Showing 9 changed files with 237 additions and 9 deletions.
86 changes: 83 additions & 3 deletions python/hidet/graph/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
Please refers to https://github.com/onnx/onnx/blob/main/onnx/onnx.proto for proto structure of onnx format.
"""
# pylint: disable=unused-argument
from typing import List, Union, Optional, Dict, Callable, Type
from typing import List, Union, Optional, Dict, Callable, Type, Sequence, Set
from collections import defaultdict
import warnings
import os
Expand Down Expand Up @@ -117,6 +117,12 @@ def run_v13(self, inputs: List[Tensor]) -> List[Tensor]:
def run_v14(self, inputs: List[Tensor]) -> List[Tensor]:
return NotImplemented

def run_v15(self, inputs: List[Tensor]) -> List[Tensor]:
return NotImplemented

def run_v16(self, inputs: List[Tensor]) -> List[Tensor]:
return NotImplemented

@staticmethod
def tensor2list(tensor: Tensor) -> Union[List, int, float]:
return tensor.cpu().numpy().tolist()
Expand Down Expand Up @@ -867,6 +873,58 @@ def run(self, inputs: List[Tensor]) -> List[Tensor]:
return [randn([1]) for name in self.output_names]


@register_onnx_operator
class OnnxLeakyRelu(OnnxOperator):
def run_v1(self, inputs: List[Tensor]) -> List[Tensor]:
alpha = self.attrs.get('alpha', 0.01)
return [ops.leaky_relu(inputs[0], alpha)]


@register_onnx_operator
class OnnxConvTranspose(OnnxOperator):
def run_v1(self, inputs: List[Tensor]) -> List[Tensor]:
from hidet.graph.ops.definitions.utils import normalize_stride

data, weight = inputs[:2]
if len(data.shape) != 4:
raise ValueError('Currently, only support 2D ConvTranspose.')
auto_pad: str = self.attrs.get('auto_pad', 'NOTSET')
dilations: Union[int, List[int]] = self.attrs.get('dilations', 1)
group: int = self.attrs.get('group', 1)
output_padding: Union[int, List[int]] = self.attrs.get('output_padding', 0)
output_shape: Optional[List[int]] = self.attrs.get('output_shape', None)
pads: Union[int, List[int]] = self.attrs.get('pads', 0)
strides: int = self.attrs.get('strides', 1)

if auto_pad != 'NOTSET':
raise NotImplementedError('auto_pad {} is not supported yet.'.format(auto_pad))
if output_shape is not None:
raise NotImplementedError('output_shape is not supported yet.')
if isinstance(dilations, int):
dilations = [dilations] * 2
if any(d != 1 for d in dilations):
raise NotImplementedError('dilations {} is not supported yet.'.format(dilations))

output_padding = normalize_stride(output_padding)

if len(pads) == 4 and any(p < 0 for p in pads[2:]):
# sometimes upstream framework may export onnx model with negative pads
# this is a workaround to fix it
# remove this when upstream framework fix their bug
for i, p in enumerate(pads[2:]):
if p < 0:
pads[2 + i] = 0
output_padding[i] += -p

output = ops.conv2d_transpose(
data, weight, stride=strides, padding=pads, groups=group, output_padding=output_padding
)
if len(inputs) > 2:
bias: Tensor = inputs[2] # 1D tensor added on channel axis
output = output + ops.unsqueeze(bias, [0, 2, 3])
return [output]


def dispatch(node, op_sets: List[int]) -> OnnxOperator:
op_type = node.op_type
if op_type not in dispatch_table:
Expand All @@ -877,6 +935,22 @@ def dispatch(node, op_sets: List[int]) -> OnnxOperator:
return op


def dispatch_operators(nodes: Sequence[onnx.NodeProto], op_sets: List[int]) -> List[OnnxOperator]:
dispatched: List[OnnxOperator] = []
unsupported: Set[str] = set()

for node in nodes:
op_type: str = node.op_type
if op_type not in dispatch_table:
unsupported.add(op_type)
else:
op_cls: Type[OnnxOperator] = dispatch_table[op_type]
dispatched.append(op_cls(node, op_sets))
if len(unsupported) > 0:
raise NotImplementedError("Operator(s) {} from onnx have not been supported yet.".format(list(unsupported)))
return dispatched


def run_trt(node: OnnxOperator, inputs: List[Tensor]) -> List[Tensor]:
# pylint: disable=no-member
import onnxruntime
Expand Down Expand Up @@ -925,8 +999,8 @@ def __init__(self, graph: onnx.GraphProto, op_sets: List[int], env_tensors: Opti
self.parameters[param.name] = from_numpy(numpy_array).cuda()
self.input_names: List[str] = [input.name for input in graph.input if input.name not in self.parameters]
self.output_names: List[str] = [output.name for output in graph.output]
self.operators: List[OnnxOperator] = []
self.operators: List[OnnxOperator] = [dispatch(node, op_sets=self.op_sets) for node in graph.node]
self.operators: List[OnnxOperator] = dispatch_operators(graph.node, op_sets)
# self.operators: List[OnnxOperator] = [dispatch(node, op_sets=self.op_sets) for node in graph.node]
self.env_tensors: Dict[str, Tensor] = env_tensors if env_tensors else {}
self.usage_count: Dict[str, int] = self.count_usage()

Expand All @@ -951,6 +1025,12 @@ def forward(self, *args):
if isinstance(operator, OnnxIf):
operator.env_tensors = name2tensor
outputs = operator.run(inputs)
if not isinstance(outputs, (tuple, list)):
raise ValueError(
'Operator "{}" should return a sequence of tensors, got {}.'.format(
operator.node.op_type, type(outputs)
)
)

check = False
if check:
Expand Down
3 changes: 2 additions & 1 deletion python/hidet/graph/ops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
from . import definitions

from .definitions.conv2d import conv2d, conv2d_winograd, conv2d_gemm, conv2d_gemm_image_transform
from .definitions.conv2d_transpose import conv2d_transpose
from .definitions.matmul import batch_matmul, matmul
from .definitions.pool import max_pool2d, avg_pool2d
from .definitions.softmax import softmax
from .definitions.activation import relu, sigmoid, clip, relu6
from .definitions.activation import relu, leaky_relu, sigmoid, clip, relu6
from .definitions.norm import batch_norm_infer, instance_norm, layer_norm
from .definitions.image import resize2d
from .definitions.arithmatic import add, sub, multiply, divide, neg, sqrt, rsqrt, sin, cos, pow, erf, tanh, where
Expand Down
4 changes: 3 additions & 1 deletion python/hidet/graph/ops/definitions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from .conv2d import conv2d_gemm_image_transform, conv2d_gemm_filter_transform, conv2d_gemm_inverse_transform
from .conv2d import conv2d_winograd_image_transform, conv2d_winograd_filter_transform, conv2d_winograd_inverse_transform

from .conv2d_transpose import conv2d_transpose

from .matmul import batch_matmul, matmul
from .pool import max_pool2d, avg_pool2d
from .softmax import softmax
Expand All @@ -12,7 +14,7 @@
from .arithmatic import add, sub, multiply, divide, neg, sqrt, rsqrt, where, max, min, reciprocal, exp, log
from .compare import equal, less_than, greater_than, less_or_equal, greater_or_equal, cond_not
from .reduce import reduce_mean, reduce_min, reduce_max, reduce_sum, reduce_var, argmin, argmax
from .transform import squeeze, unsqueeze, flatten, concat, cast, take, rearrange, strided_slice, split
from .transform import squeeze, unsqueeze, flatten, concat, cast, take, rearrange, strided_slice, split, pad, conv_pad
from .cumulative import cumsum
from .special import barrier

Expand Down
16 changes: 15 additions & 1 deletion python/hidet/graph/ops/definitions/activation.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from typing import Optional
import math
from hidet.ir import primitives as prim
from hidet.ir.expr import const_like
from hidet.ir.expr import const_like, if_then_else
from .utils import Tensor
from .arithmatic import UnaryElementwiseOp

Expand All @@ -11,6 +11,16 @@ def __init__(self, x):
super().__init__(x, op=lambda v: prim.max(v, const_like(0.0, v)), name='relu')


class LeakyReluOp(UnaryElementwiseOp):
def __init__(self, x, alpha):
super().__init__(
x,
op=lambda v: if_then_else(v >= 0, v, v * const_like(alpha, v)),
name='leaky_relu',
attributes={'alpha': alpha},
)


class SigmoidOp(UnaryElementwiseOp):
def __init__(self, x):
super().__init__(x, op=lambda v: const_like(1.0, v) / (const_like(1.0, v) + prim.exp(-v)), name='sigmoid')
Expand Down Expand Up @@ -41,6 +51,10 @@ def relu(x) -> Tensor:
return ReluOp(x).get_output(0)


def leaky_relu(x: Tensor, alpha: float) -> Tensor:
return LeakyReluOp(x, alpha).get_output(0)


def sigmoid(x: Tensor) -> Tensor:
return SigmoidOp(x).get_output(0)

Expand Down
6 changes: 3 additions & 3 deletions python/hidet/graph/ops/definitions/conv2d/conv2d.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import List, Union
from typing import List, Union, Sequence
from hidet.graph.ops.definitions.utils import Task, Operator, Tensor, TensorNode
from hidet.graph.ops.definitions.utils import compute, input_like, normalize_stride, reduce

Expand Down Expand Up @@ -40,7 +40,7 @@ def __init__(self, data: TensorNode, weight: TensorNode, stride: List[int], grou


class Conv2dOp(Operator):
def __init__(self, x: Tensor, w: Tensor, stride: List[int], groups: int):
def __init__(self, x: Tensor, w: Tensor, stride: Sequence[int], groups: int):
stride = normalize_stride(stride)
super().__init__(
inputs=[x, w],
Expand All @@ -49,5 +49,5 @@ def __init__(self, x: Tensor, w: Tensor, stride: List[int], groups: int):
)


def conv2d(data: Tensor, weight: Tensor, stride: Union[int, List[int]], groups: int = 1) -> Tensor:
def conv2d(data: Tensor, weight: Tensor, stride: Union[int, Sequence[int]], groups: int = 1) -> Tensor:
return Conv2dOp(data, weight, stride, groups).get_output(0)
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .conv2d_transpose import conv2d_transpose, Conv2dTransposeOp
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
from typing import Sequence, Union
from hidet.ir.expr import if_then_else, And
from hidet.graph.ops.definitions.utils import Task, Operator, Tensor, TensorNode
from hidet.graph.ops.definitions.utils import compute, input_like, normalize_stride, reduce, normalize_padding


class Conv2dTransposeTask(Task):
def __init__(
self,
data: TensorNode,
weight: TensorNode,
stride: Sequence[int], # [sx, sy]
padding: Sequence[int], # [px0, py0, px1, py1]
groups: int,
output_padding: Sequence[int], # [opx, opy]
):
n, oc, p, q = data.const_shape()
oc, wc, kx, ky = weight.const_shape()
c = wc * groups
sx, sy = stride
px0, py0, px1, py1 = padding
h = (p - 1) * sx + -px0 - px1 + kx + output_padding[0]
w = (q - 1) * sy + -py0 - py1 + ky + output_padding[1]

if output_padding[0] >= stride[0] or output_padding[1] >= stride[1]:
raise ValueError(
'Conv2dTranspose expect the output_padding < stride, \n'
'but got output_padding, stride: {}, {}'.format(output_padding, stride)
)
if any(p < 0 for p in padding):
raise ValueError('Negative padding is not supported.')

out_group_size = oc // groups
output = compute(
name='out',
shape=[n, c, h, w],
fcompute=lambda ni, ci, hi, wi: reduce(
shape=[out_group_size, kx, ky],
fcompute=lambda ogi, kxi, kyi: if_then_else(
cond=And.join(
hi + px0 >= kxi,
hi + px0 < p * sx + kxi,
(hi + px0 - kxi) % sx == 0,
wi + py0 >= kyi,
wi + py0 < q * sy + kyi,
(wi + py0 - kyi) % sy == 0,
),
then_expr=(
data[ni, (ci // wc) * out_group_size + ogi, (hi + px0 - kxi) // sx, (wi + py0 - kyi) // sy]
* weight[(ci // wc) * out_group_size + ogi, ci % wc, kxi, kyi]
),
else_expr=0.0,
),
reduce_type='sum',
),
)
super().__init__(name='conv2d_transpose', inputs=[data, weight], outputs=[output])


class Conv2dTransposeOp(Operator):
def __init__(
self,
x: Tensor,
w: Tensor,
stride: Sequence[int],
padding: Sequence[int],
groups: int,
output_padding: Sequence[int],
):
stride = normalize_stride(stride)
padding = normalize_padding(padding)
output_padding = normalize_stride(output_padding) # normalize output padding same as stride
super().__init__(
inputs=[x, w],
task=Conv2dTransposeTask(input_like(x, 'x'), input_like(w, 'w'), stride, padding, groups, output_padding),
attributes={'stride': stride, 'groups': groups, 'output_padding': output_padding},
)


def conv2d_transpose(
data: Tensor,
weight: Tensor,
stride: Union[int, Sequence[int]],
padding: Union[int, Sequence[int]],
groups: int = 1,
output_padding: Union[int, Sequence[int]] = 0,
) -> Tensor:
return Conv2dTransposeOp(data, weight, stride, padding, groups, output_padding).get_output(0)
File renamed without changes.
42 changes: 42 additions & 0 deletions tests/graph/operators/test_conv2d_transpose.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import pytest
import numpy as np
import torch
import hidet


@pytest.mark.parametrize(
'in_channels, out_channels, kernel_size, stride, pads, groups, height, width, output_padding',
[[10, 20, (5, 5), (3, 2), [2, 1], 5, 11, 10, (2, 1)]],
)
def test_conv2d_transpose(in_channels, out_channels, kernel_size, stride, pads, groups, height, width, output_padding):
torch_data = torch.ones(1, in_channels, height, width, dtype=torch.float32).cuda()
torch_weight = torch.ones(
out_channels, in_channels // groups, kernel_size[0], kernel_size[1], dtype=torch.float32
).cuda()

torch_output = torch.nn.functional.conv2d(
torch_data, torch_weight, stride=stride, padding=pads, groups=groups, bias=None, dilation=1
)
hidet_data = hidet.from_torch(torch_data)
hidet_weight = hidet.from_torch(torch_weight)
hidet_output = hidet.ops.conv_pad(hidet_data, pads)
hidet_output = hidet.ops.conv2d(hidet_output, hidet_weight, stride, groups)
np.testing.assert_allclose(hidet_output.numpy(), torch_output.cpu().numpy(), atol=1e-5)
torch_transpose_output = torch.nn.functional.conv_transpose2d(
torch_output,
torch_weight,
stride=stride,
padding=pads,
groups=groups,
bias=None,
dilation=1,
output_padding=output_padding,
)
hidet_transpose_output = hidet.ops.conv2d_transpose(
hidet_output, hidet_weight, stride, pads, groups, output_padding=output_padding
)
np.testing.assert_allclose(hidet_transpose_output.numpy(), torch_transpose_output.cpu().numpy(), atol=1e-5)


if __name__ == '__main__':
pytest.main([__file__])

0 comments on commit e2e1ed1

Please sign in to comment.