Skip to content

Commit

Permalink
[Relay/Topi][Op] Added native DepthToSpace and SpaceToDepth Operators (
Browse files Browse the repository at this point in the history
…apache#4566)

* Added tvm function stencil for subpixel operations to topi.

* Topi subpixel operators added and tested.

* Added subpixel attrs.

* Added depth_to_space relay attributes.

* depth_to_space fully working.

* Fixed NHWC shape bug.

* SpaceToDepth in and all tests passing.

* lint fixes.

* Added string include

* Fixed topi formatting.

* Added DCR/CDR mode to depthtospace operator.
  • Loading branch information
jwfromm authored and zhiics committed Dec 31, 2019
1 parent 831796a commit a8792ba
Show file tree
Hide file tree
Showing 16 changed files with 732 additions and 110 deletions.
20 changes: 20 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -821,6 +821,26 @@ struct DeformableConv2DAttrs : public tvm::AttrsNode<DeformableConv2DAttrs> {
}
};

/*! \brief Attributes used in subpixel operators */
struct SubPixelAttrs : public tvm::AttrsNode<SubPixelAttrs> {
int block_size;
std::string layout;
std::string mode;

TVM_DECLARE_ATTRS(SubPixelAttrs, "relay.attrs.SubPixelAttrs") {
TVM_ATTR_FIELD(block_size)
.describe("The size of subpixel blocks to compose or decompose.")
.set_default(1);
TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
"Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(mode).set_default("DCR").describe(
"Indicates order in which channels are accessed. Must be one of"
"DCR or CDR.");
}
}; // struct SubPixelAttrs

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_NN_H_
50 changes: 2 additions & 48 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,34 +540,7 @@ def _impl_v11(cls, inputs, attr, params):

block_size = int(attr['blocksize'])
mode = attr.get("mode", "DCR")

# handle NCHW layout
indata = infer_value_simulated(inputs[0], params)
in_n, in_c, in_h, in_w = indata.shape

# reshape to proper output
new_c = int(in_c / (block_size * block_size))
new_h = in_h * block_size
new_w = in_w * block_size
newshape = (in_n, new_c, new_h, new_w)

if mode == "DCR":
# expand input to larger dimension.
expanded = _op.reshape(inputs[0],
newshape=(in_n, block_size, block_size, new_c, in_h, in_w))
# reorder to expand spatial blocks.
transposed = _op.transpose(expanded, axes=(0, 3, 4, 1, 5, 2))

else: # CRD mode
# expand input to larger dimension.
expanded = _op.reshape(inputs[0],
newshape=(in_n, new_c, block_size, block_size, in_h, in_w))
# reorder to expand spatial blocks.
transposed = _op.transpose(expanded, axes=(0, 1, 4, 2, 5, 3))

return AttrCvt(op_name="reshape",
extras={'newshape': newshape},
ignores=['mode', 'blocksize'])([transposed], attr)
return _op.nn.depth_to_space(inputs[0], block_size, mode=mode)


class SpaceToDepth(OnnxOpConverter):
Expand All @@ -578,26 +551,7 @@ class SpaceToDepth(OnnxOpConverter):
def _impl_v1(cls, inputs, attr, params):

block_size = int(attr['blocksize'])

# handle NCHW layout
indata = infer_value_simulated(inputs[0], params)
in_n, in_c, in_h, in_w = indata.shape

# reshape to proper output
new_c = in_c * (block_size * block_size)
new_h = int(in_h / block_size)
new_w = int(in_w / block_size)
newshape = (in_n, new_c, new_h, new_w)

# expand input to larger dimension.
expanded = _op.reshape(inputs[0],
newshape=(in_n, in_c, new_h, block_size, new_w, block_size))
# reorder to expand spatial blocks.
transposed = _op.transpose(expanded, axes=(0, 3, 5, 1, 2, 4))

return AttrCvt(op_name="reshape",
extras={'newshape': newshape},
ignores=['blocksize'])([transposed], attr)
return _op.nn.space_to_depth(inputs[0], block_size)


class Concat(OnnxOpConverter):
Expand Down
66 changes: 4 additions & 62 deletions python/tvm/relay/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -728,76 +728,18 @@ def _impl(inputs, attr, params):

def _depth_to_space():
def _impl(inputs, attr, params):
# Need to handle data layouts differently.
input_shape = attr['_input_shapes'][inputs[0]]
block_size = int(attr['block_size'])
if attr['data_format'].decode("utf-8") == 'NHWC':
in_n, in_h, in_w, in_c = input_shape
new_c = int(in_c / (block_size * block_size))

# First expand input to larger dimension.
expanded = _op.reshape(
inputs[0], newshape=(in_n, in_h, in_w, block_size, block_size, new_c))
# Now reorder to expand spatial blocks.
transposed = _op.transpose(expanded, axes=(0, 1, 3, 2, 4, 5))
# Finally reshape to proper output.
new_h = in_h * block_size
new_w = in_w * block_size
newshape = (in_n, new_h, new_w, new_c)

else: # Handle NCHW layout
in_n, in_c, in_h, in_w = input_shape
new_c = int(in_c / (block_size * block_size))

expanded = _op.reshape(
inputs[0], newshape=(in_n, block_size, block_size, new_c, in_h, in_w))
transposed = _op.transpose(expanded, axes=(0, 3, 4, 1, 5, 2))
new_h = in_h * block_size
new_w = in_w * block_size
newshape = (in_n, new_c, new_h, new_w)

return AttrCvt(
op_name="reshape",
extras={'newshape': newshape},
ignores=['data_format', 'block_size'])([transposed], attr)
layout = attr['data_format'].decode("utf-8")
return _op.nn.depth_to_space(inputs[0], block_size, layout)

return _impl


def _space_to_depth():
def _impl(inputs, attr, params):
# Need to handle data layouts differently.
input_shape = attr['_input_shapes'][inputs[0]]
block_size = int(attr['block_size'])
if attr['data_format'].decode("utf-8") == 'NHWC':
in_n, in_h, in_w, in_c = input_shape
new_h = int(in_h / block_size)
new_w = int(in_w / block_size)

# First expand input to larger dimension.
expanded = _op.reshape(
inputs[0], newshape=(in_n, new_h, block_size, new_w, block_size, in_c))
# Now reorder to expand spatial blocks.
transposed = _op.transpose(expanded, axes=(0, 1, 3, 2, 4, 5))
# Finally reshape to proper output.
new_c = in_c * block_size * block_size
newshape = (in_n, new_h, new_w, new_c)

else: # Handle NCHW layout
in_n, in_c, in_h, in_w = input_shape
new_h = int(in_h / block_size)
new_w = int(in_w / block_size)

expanded = _op.reshape(
inputs[0], newshape=(in_n, in_c, new_h, block_size, new_w, block_size))
transposed = _op.transpose(expanded, axes=(0, 3, 5, 1, 2, 4))
new_c = int(in_c * block_size * block_size)
newshape = (in_n, new_c, new_h, new_w)

return AttrCvt(
op_name="reshape",
extras={'newshape': newshape},
ignores=['data_format', 'block_size'])([transposed], attr)
layout = attr['data_format'].decode("utf-8")
return _op.nn.space_to_depth(inputs[0], block_size, layout)

return _impl

Expand Down
22 changes: 22 additions & 0 deletions python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -904,6 +904,28 @@ def compute_cross_entropy_with_logits(attrs, inputs, out_dtype, target):
x, y = inputs
return [-topi.sum(x * y) / x.shape[0]]


@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype, target):
block_size = attrs.block_size
layout = attrs.layout
mode = attrs.mode
return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)]

reg.register_schedule("nn.depth_to_space", schedule_injective)
reg.register_pattern("nn.depth_to_space", OpPattern.INJECTIVE)


@reg.register_compute("nn.space_to_depth")
def compute_space_to_depth(attrs, inputs, out_dtype, target):
block_size = attrs.block_size
layout = attrs.layout
return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)]

reg.register_schedule("nn.space_to_depth", schedule_injective)
reg.register_pattern("nn.space_to_depth", OpPattern.INJECTIVE)


# shape func
@script
def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn):
Expand Down
50 changes: 50 additions & 0 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2079,3 +2079,53 @@ def cross_entropy_with_logits(predictions, targets):
The computed result.
"""
return _make.cross_entropy_with_logits(predictions, targets)


def depth_to_space(data, block_size, layout='NCHW', mode='DCR'):
"""Convert channels into spatial blocks.
Parameters
----------
data : tvm.relay.Expr
Input data with channels divisible by block_size**2
block_size : int
Size of blocks to convert channels into.
layout : string
One of NCHW or NHWC, indicates channel axis.
mode : string
One of DCR or CDR, indicates which order channels
are accessed in.
Returns
-------
result : tvm.relay.Expr
Tensor with shape [in_batch, in_channel / block_size * block_size,
in_height * block_size, in_width * block_size]
"""
return _make.depth_to_space(data, block_size, layout, mode)


def space_to_depth(data, block_size, layout='NCHW'):
"""Convert spatial blocks into channels.
Parameters
----------
data : tvm.relay.Expr
Input data with spatial dimensions divisible by block_size
block_size : int
Size of blocks to decompose into channels.
layout : string
One of NCHW or NHWC, indicates channel axis.
Returns
-------
result : tvm.relay.Expr
Tensor with shape [in_batch, in_channel * block_size * block_size,
in_height / block_size, in_width / block_size]
"""
return _make.space_to_depth(data, block_size, layout)
5 changes: 5 additions & 0 deletions python/tvm/relay/op/op_attrs.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,3 +299,8 @@ class BinaryDenseAttrs(Attrs):
@register_relay_attr_node
class Conv2DTransposeAttrs(Attrs):
"""Attributes used in Transposed Conv2D operators"""


@register_relay_attr_node
class SubPixelAttrs(Attrs):
"""Attributes used in depth to space and space to depth operators"""
118 changes: 118 additions & 0 deletions src/relay/op/nn/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <topi/nn/softmax.h>
#include <topi/nn/flatten.h>
#include <vector>
#include <string>
#include "../type_relations.h"
#include "../../pass/alter_op_layout.h"
#include "../op_common.h"
Expand Down Expand Up @@ -959,6 +960,123 @@ Accept logits.
.set_support_level(10)
.add_type_rel("CrossEntropy", CrossEntropyRel);

// Depth to space and space to depth
TVM_REGISTER_NODE_TYPE(SubPixelAttrs);

bool DepthToSpaceRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 2);
const auto* data = types[0].as<TensorTypeNode>();
if (data == nullptr) return false;

static const Layout kNCHW("NCHW");

const SubPixelAttrs* param = attrs.as<SubPixelAttrs>();
CHECK(param != nullptr);
const int block_size = param->block_size;
const Layout in_layout(param->layout);
auto layout_converter = BijectiveLayoutNode::make(in_layout, kNCHW);
CHECK(layout_converter.defined())
<< "DepthToSpace only support input layouts that are convertible from NCHW."
<< " But got " << in_layout;

auto oshape = layout_converter.ForwardShape(data->shape);
oshape.Set(1, indexdiv(oshape[1], (block_size * block_size)));
oshape.Set(2, oshape[2] * block_size);
oshape.Set(3, oshape[3] * block_size);

// Assign output type
reporter->Assign(types[1],
TensorTypeNode::make(layout_converter.BackwardShape(oshape), data->dtype));

return true;
}

// Positional relay function to create DepthToSpace operator
// used by frontend FFI
Expr MakeDepthToSpace(Expr data, int block_size, std::string layout, std::string mode) {
auto attrs = make_node<SubPixelAttrs>();
attrs->block_size = block_size;
attrs->layout = std::move(layout);
attrs->mode = std::move(mode);
static const Op& op = Op::Get("nn.depth_to_space");
return CallNode::make(op, {data}, Attrs(attrs), {});
}

TVM_REGISTER_API("relay.op.nn._make.depth_to_space").set_body_typed(MakeDepthToSpace);

RELAY_REGISTER_OP("nn.depth_to_space")
.describe(R"code(Rearrange input channels into spatial pixels.
- **data**: data is a 4D array of shape
(batch, in_channels, in_height, in_width) for NCHW
- **out**: Output is a 4D array of shape
(batch, in_channels / block_size * block_size, in_height * block_size, in_width * block_size) for NCHW.
)code" TVM_ADD_FILELINE)
.set_attrs_type<SubPixelAttrs>()
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor")
.set_support_level(5)
.add_type_rel("DepthToSpace", DepthToSpaceRel);

bool SpaceToDepthRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(types.size(), 2);
const auto* data = types[0].as<TensorTypeNode>();
if (data == nullptr) return false;

static const Layout kNCHW("NCHW");

const SubPixelAttrs* param = attrs.as<SubPixelAttrs>();
CHECK(param != nullptr);
const int block_size = param->block_size;
const Layout in_layout(param->layout);
auto layout_converter = BijectiveLayoutNode::make(in_layout, kNCHW);
CHECK(layout_converter.defined())
<< "SpaceToDepth only support input layouts that are convertible from NCHW."
<< " But got " << in_layout;

auto oshape = layout_converter.ForwardShape(data->shape);
oshape.Set(1, oshape[1] * (block_size * block_size));
oshape.Set(2, indexdiv(oshape[2], block_size));
oshape.Set(3, indexdiv(oshape[3], block_size));

// Assign output type
reporter->Assign(types[1],
TensorTypeNode::make(layout_converter.BackwardShape(oshape), data->dtype));

return true;
}

// Positional relay function to create SpaceToDepth operator
// used by frontend FFI
Expr MakeSpaceToDepth(Expr data, int block_size, std::string layout) {
auto attrs = make_node<SubPixelAttrs>();
attrs->block_size = block_size;
attrs->layout = std::move(layout);
static const Op& op = Op::Get("nn.space_to_depth");
return CallNode::make(op, {data}, Attrs(attrs), {});
}

TVM_REGISTER_API("relay.op.nn._make.space_to_depth").set_body_typed(MakeSpaceToDepth);

RELAY_REGISTER_OP("nn.space_to_depth")
.describe(R"code(Rearrange spatial pixels into new output channels.
- **data**: data is a 4D array of shape
(batch, in_channels, in_height, in_width) for NCHW
- **out**: Output is a 4D array of shape
(batch, in_channels * block_size * block_size, in_height / block_size, in_width / block_size) for NCHW.
)code" TVM_ADD_FILELINE)
.set_attrs_type<SubPixelAttrs>()
.set_num_inputs(1)
.add_argument("data", "Tensor", "The input tensor")
.set_support_level(5)
.add_type_rel("SpaceToDepth", SpaceToDepthRel);

} // namespace relay
} // namespace tvm

0 comments on commit a8792ba

Please sign in to comment.