Skip to content

Commit

Permalink
[NNVM] Move FTVMCompute registration of cast, greter, less to C++. (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
nishi-t authored and tqchen committed Jul 3, 2018
1 parent d27ff24 commit 035696f
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 13 deletions.
13 changes: 0 additions & 13 deletions nnvm/python/nnvm/top/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,6 @@ def _compute(attrs, x, _):
reg.register_schedule("copy", _fschedule_broadcast)

# cast
@reg.register_compute("cast")
def compute_cast(attrs, inputs, _):
"""Compute definition of cast"""
dtype = attrs.get_string("dtype")
return topi.cast(inputs[0], dtype)
reg.register_pattern("cast", OpPattern.ELEMWISE)
reg.register_schedule("cast", _fschedule_broadcast)

Expand Down Expand Up @@ -210,18 +205,10 @@ def compute_cast(attrs, inputs, _):
reg.register_schedule("ones_like", _fschedule_elemwise)

# greater
@reg.register_compute("greater")
def compute_greater(_, inputs, out_info):
"""Compute definition of greater"""
return topi.greater(inputs[0], inputs[1]).astype('float32')
reg.register_pattern("greater", OpPattern.ELEMWISE)
reg.register_schedule("greater", _fschedule_elemwise)

# less
@reg.register_compute("less")
def compute_less(_, inputs, out_info):
"""Compute definition of less"""
return topi.less(inputs[0], inputs[1]).astype('float32')
reg.register_pattern("less", OpPattern.ELEMWISE)
reg.register_schedule("less", _fschedule_elemwise)

Expand Down
12 changes: 12 additions & 0 deletions nnvm/src/top/tensor/elemwise.cc
Original file line number Diff line number Diff line change
Expand Up @@ -781,6 +781,12 @@ with 1.0 if (left > right), otherwise 0.0 element-wise.
.add_argument("rhs", "Tensor", "Second input")
.set_num_inputs(2)
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>)
.set_attr<FTVMCompute>(
"FTVMCompute", [](const NodeAttrs& attrs,
const Array<Tensor>& inputs,
const Array<Tensor>& out_info) {
return Array<Tensor>{ topi::cast(topi::greater(inputs[0], inputs[1]), out_info[0]->dtype) };
})
.set_support_level(4);


Expand All @@ -793,6 +799,12 @@ with 1.0 if (left < right), otherwise 0.0 element-wise.
.add_argument("rhs", "Tensor", "Second input")
.set_num_inputs(2)
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>)
.set_attr<FTVMCompute>(
"FTVMCompute", [](const NodeAttrs& attrs,
const Array<Tensor>& inputs,
const Array<Tensor>& out_info) {
return Array<Tensor>{ topi::cast(topi::less(inputs[0], inputs[1]), out_info[0]->dtype) };
})
.set_support_level(4);

NNVM_REGISTER_INDICATOR_OP(_max_mask)
Expand Down
10 changes: 10 additions & 0 deletions nnvm/src/top/tensor/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
#include "../elemwise_op_common.h"
#include "topi/nn/flatten.h"
#include "topi/transform.h"
#include "topi/elemwise.h"
#include "topi/detail/constant_utils.h"
#include "../../compiler/compile_engine.h"

namespace nnvm {
namespace top {
Expand Down Expand Up @@ -413,6 +415,14 @@ NNVM_REGISTER_OP(cast)
.set_attr<FCorrectLayout>("FCorrectLayout", ElemwiseArbitraryLayout<1, 1>)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr<FTVMCompute>(
"FTVMCompute", [](const NodeAttrs& attrs,
const Array<Tensor>& inputs,
const Array<Tensor>& out_info) {
const CastParam& param = nnvm::get<CastParam>(attrs.parsed);
Type dtype = GetTVMType(param.dtype);
return Array<Tensor>{ topi::cast(inputs[0], dtype) };
})
.set_support_level(1);


Expand Down

0 comments on commit 035696f

Please sign in to comment.