Skip to content

Commit

Permalink
[onert] Adding shape inference for Abs and its test case (Samsung#1380)
Browse files Browse the repository at this point in the history
* [onert] Adding shape inference for Abs and its test case

This adds shape inference for Abs and its test case.

Signed-off-by: Hyun Sik Yoon <hyunsik.yoon.1024@gmail.com>

* fix compilation error after rebasing
  • Loading branch information
hyunsik-yoon committed May 26, 2020
1 parent a78d9f6 commit 995abab
Show file tree
Hide file tree
Showing 10 changed files with 162 additions and 34 deletions.
16 changes: 16 additions & 0 deletions runtime/onert/core/include/util/ShapeInference.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ class StaticInferer : public ir::OperationVisitor
private:
// TODO Define visitors for operations. List them in alphabetic order.
// Remove TODO when any op starting from the alphabet is added
void visit(const ir::operation::Abs &op);
void visit(const ir::operation::Add &op);
void visit(const ir::operation::Concat &op);
// TODO write op starting from D
Expand All @@ -118,6 +119,13 @@ class StaticInferer : public ir::OperationVisitor
// TODO write op starting from U
// TODO write op starting from Z

private:
/**
* @brief Performs shape inference for unary op whose output shape is
* always same with input shape
*/
void handleSimpleUnaryOp(const ir::Operation &op, const ir::OperandIndex input_idx);

private:
ir::Operands &_operands;
};
Expand All @@ -143,6 +151,7 @@ class DynamicInferer : public ir::OperationVisitor
public:
// TODO Define visitors for operations. List them in alphabetic order.
// Remove TODO when any op starting from the alphabet is added
void visit(const ir::operation::Abs &op);
void visit(const ir::operation::Add &op);
// TODO write op starting from C
// TODO write op starting from D
Expand All @@ -159,6 +168,13 @@ class DynamicInferer : public ir::OperationVisitor
// TODO write op starting from U
// TODO write op starting from Z

private:
/**
* @brief Performs shape inference and memory allocation for unary op whose output shape is
* always same with input shape
*/
void handleSimpleUnaryOp(const ir::Operation &op, const ir::OperandIndex input_idx);

private:
/**
* @brief To get operand-level info, e.g., ir::Operand::isConstant()
Expand Down
35 changes: 35 additions & 0 deletions runtime/onert/core/src/util/shapeinf/Abs.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
/*
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "util/ShapeInference.h"

namespace onert
{
namespace shape_inference
{

void StaticInferer::visit(const ir::operation::Abs &op)
{
handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Abs::Input::INPUT));
}

void DynamicInferer::visit(const ir::operation::Abs &op)
{
handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Abs::INPUT));
}

} // namespace shape_inference
} // namespace onert
39 changes: 39 additions & 0 deletions runtime/onert/core/src/util/shapeinf/ShapeInference.cc
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,26 @@ ir::Shape inferMaxPoolShape(const ir::Shape &in_shape, const ir::operation::MaxP
- For visit() of each operator, find each op's C file
*/

void StaticInferer::handleSimpleUnaryOp(const ir::Operation &op, const ir::OperandIndex input_idx)
{
const auto &input = _operands.at(input_idx);

// get mutable output operand
const auto output_idx = op.getOutputs().at(0);
ir::Operand &output = _operands.at(output_idx);

// if input is dynamic, output also becomes dynamic
if (input.info().isDynamic())
{
output.info().setDynamic();
return;
}

// re-sizing output shape
ir::Shape new_shape = input.info().shape();
output.info().shape(new_shape);
}

void StaticInferer::dump()
{
auto get_shape_str = [](const ir::Shape &shape) {
Expand Down Expand Up @@ -269,5 +289,24 @@ void StaticInferer::visit(const ir::operation::Concat &op)
- For visit() of each operator, find each op's C file
*/

void DynamicInferer::handleSimpleUnaryOp(const ir::Operation &op, const ir::OperandIndex input_ind)
{
// check if output is not dynamic
auto output_ind = op.getOutputs().at(0);
auto output = _tensor_registry->getITensor(output_ind);
if (!output->is_dynamic())
return;

// getting output shape
auto input = _tensor_registry->getITensor(input_ind);
auto output_shape = getShape(input.get());

// set output shape and output buffer
setShape(output.get(), output_shape);

_dynamic_tensor_manager->allocate(output_ind, output_shape);
assert(output->buffer() != nullptr);
}

} // namespace shape_inference
} // namespace onert
35 changes: 2 additions & 33 deletions runtime/onert/core/src/util/shapeinf/Tanh.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,43 +23,12 @@ namespace shape_inference

void StaticInferer::visit(const ir::operation::Tanh &op)
{
const auto input_idx{op.getInputs().at(ir::operation::Tanh::Input::INPUT)};
const auto &input = _operands.at(input_idx);

// get mutable output operand
const auto output_idx = op.getOutputs().at(0);
ir::Operand &output = _operands.at(output_idx);

// if input is dynamic, output also becomes dynamic
if (input.info().isDynamic())
{
output.info().setDynamic();
return;
}

// re-sizing output shape
ir::Shape new_shape = input.info().shape();
output.info().shape(new_shape);
handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Tanh::Input::INPUT));
}

void DynamicInferer::visit(const ir::operation::Tanh &op)
{
// check if output is not dynamic
auto output_ind = op.getOutputs().at(0);
auto output = _tensor_registry->getITensor(output_ind);
if (!output->is_dynamic())
return;

// getting output shape
auto input_ind = op.getInputs().at(ir::operation::Tanh::Input::INPUT);
auto input = _tensor_registry->getITensor(input_ind);
auto output_shape = getShape(input.get());

// set output shape and output buffer
setShape(output.get(), output_shape);

_dynamic_tensor_manager->allocate(output_ind, output_shape);
assert(output->buffer() != nullptr);
handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Tanh::INPUT));
}

} // namespace shape_inference
Expand Down
1 change: 1 addition & 0 deletions tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_cl
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
GeneratedTests.abs_
GeneratedTests.abs_dynamic_nnfw
GeneratedTests.add_dynamic
GeneratedTests.cast_float16_to_float16
GeneratedTests.cast_float16_to_float32
Expand Down
1 change: 1 addition & 0 deletions tests/nnapi/nnapi_gtest.skip.aarch64-linux.acl_neon
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
GeneratedTests.abs_
GeneratedTests.abs_dynamic_nnfw
GeneratedTests.add_dynamic
GeneratedTests.cast_float16_to_float16
GeneratedTests.cast_float16_to_float32
Expand Down
1 change: 1 addition & 0 deletions tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_cl
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
GeneratedTests.abs_
GeneratedTests.abs_dynamic_nnfw
GeneratedTests.add_dynamic
GeneratedTests.cast_float16_to_float16
GeneratedTests.cast_float16_to_float32
Expand Down
2 changes: 1 addition & 1 deletion tests/nnapi/nnapi_gtest.skip.armv7l-linux.acl_neon
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
GeneratedTests.abs_
GeneratedTests.abs_dynamic
GeneratedTests.abs_dynamic_nnfw
GeneratedTests.add_dynamic
GeneratedTests.cast_float16_to_float16
GeneratedTests.cast_float16_to_float32
Expand Down
1 change: 1 addition & 0 deletions tests/nnapi/nnapi_gtest.skip.noarch.interp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ GeneratedTests.abs_1D_float_nnfw
GeneratedTests.abs_2D_float_nnfw
GeneratedTests.abs_3D_float_nnfw
GeneratedTests.abs_4D_float_nnfw
GeneratedTests.abs_dynamic_nnfw
GeneratedTests.add_broadcast_quant8
GeneratedTests.add_dynamic
GeneratedTests.add_quant8
Expand Down
65 changes: 65 additions & 0 deletions tests/nnapi/specs/V1_2/abs_dynamic_nnfw.mod.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#
# Copyright (C) 2018 The Android Open Source Project
# Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# refer to tanh_v1_dynamic.mod.py about the structore

# This adds reshape as the first op in a model and
# returns output of reshape, which is dynamic tensor

'''
Testing Abs op when the input is dynamic.
input [1, 2, 3] shape [3] (value of shape will be [1, 2, 3])
| |
+-------------+
|
Reshape (added by DynamicInputGenerator since it generates its output to be dynamic)
|
| dynamic tensor at compilation time but the shape will be [1, 2, 3] at execution time
|
Abs
|
output (dynamic tensor, [1, 2, 3] at execution time)
'''

import dynamic_tensor

model = Model()

model_input_shape = [1, 2, 3]

dynamic_layer = dynamic_tensor.DynamicInputGenerator(model, model_input_shape)

test_node_input = dynamic_layer.getTestNodeInput()

# write ABS test. input is `test_input`

# note output shape is used by expected output's shape
model_output = Output("output", "TENSOR_FLOAT32", "{1, 2, 3}")

model.Operation("ABS", test_node_input).To(model_output)

model_input_data = [1, -2, 3, -4, 5, -6]
model_output_data = [1, 2, 3, 4, 5, 6]

Example({
# use these two as input
dynamic_layer.getModelInput(): model_input_data,
dynamic_layer.getShapeInput() : model_input_shape,

model_output: model_output_data,
})

0 comments on commit 995abab

Please sign in to comment.