Skip to content

Commit

Permalink
Merge branch 'feature/20171211-file-format-converter' of ml-git.ubiq.…
Browse files Browse the repository at this point in the history
…sony.co.jp:nnabla/nnabla into feature/20171211-file-format-converter
  • Loading branch information
YukioOobuchi committed Mar 19, 2018
2 parents 0ae28fa + 3120c76 commit 9cddd96
Show file tree
Hide file tree
Showing 10 changed files with 127 additions and 97 deletions.
18 changes: 0 additions & 18 deletions doc/functions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -460,15 +460,6 @@ Global average pooling. It pools an averaged value from the whole image
- Input variable.
-

* Argument(s)

.. list-table::

* - Name
- Type
- Default
- Description

* Output(s)

.. list-table::
Expand Down Expand Up @@ -3309,15 +3300,6 @@ In the backward pass, the simple Straight-Through Estimator (STE) is applied,
- Input variable
-

* Argument(s)

.. list-table::

* - Name
- Type
- Default
- Description

* Output(s)

.. list-table::
Expand Down
31 changes: 14 additions & 17 deletions include/nbla/function/global_average_pooling.hpp
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//
// http://www.apache.org/licenses/LICENSE-2.0
//
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Expand All @@ -29,22 +29,16 @@ NBLA_REGISTER_FUNCTION_HEADER(GlobalAveragePooling);
@copydetails BasePooling
\ingroup FunctionImplGrp
*/
template <typename T>
class GlobalAveragePooling : public BaseFunction<> {
template <typename T> class GlobalAveragePooling : public BaseFunction<> {
protected:

public:
GlobalAveragePooling(const Context &ctx) : BaseFunction<>(ctx) {}
virtual ~GlobalAveragePooling() {}
virtual shared_ptr<Function> copy() const {
return create_GlobalAveragePooling(ctx_);
}
virtual vector<dtypes> in_types() {
return vector<dtypes>{ get_dtype<T>() };
}
virtual vector<dtypes> out_types() {
return vector<dtypes>{ get_dtype<T>() };
return create_GlobalAveragePooling(ctx_);
}
virtual vector<dtypes> in_types() { return vector<dtypes>{get_dtype<T>()}; }
virtual vector<dtypes> out_types() { return vector<dtypes>{get_dtype<T>()}; }
virtual int min_inputs() { return 1; }
virtual int min_outputs() { return 1; }
virtual string name() { return "GlobalAveragePooling"; }
Expand All @@ -53,11 +47,14 @@ class GlobalAveragePooling : public BaseFunction<> {
}

protected:
NBLA_API virtual void setup_impl(const Variables &inputs, const Variables &outputs);
NBLA_API virtual void forward_impl(const Variables &inputs, const Variables &outputs);
NBLA_API virtual void backward_impl(const Variables &inputs, const Variables &outputs,
NBLA_API virtual void setup_impl(const Variables &inputs,
const Variables &outputs);
NBLA_API virtual void forward_impl(const Variables &inputs,
const Variables &outputs);
NBLA_API virtual void backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum);
const vector<bool> &accum);
};
}
#endif
2 changes: 2 additions & 0 deletions python/pytest.ini
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
[pytest]
#addopts = -vv -s -k test_sliced_data_iterator
#addopts = -vv -s -k test_logical
#addopts = -vv -s -x
1 change: 1 addition & 0 deletions python/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
'contextlib2',
'futures',
'h5py',
'onnx',
'protobuf',
'requests',
'scikit-image',
Expand Down
30 changes: 18 additions & 12 deletions python/src/nnabla/utils/converter/onnx/exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from .utils import *
from onnx import (ModelProto, TensorProto, TensorShapeProto)

# Dictionary used to convert NNabla function names to ONNX op_type
# Dictionary used to convert NNabla function names to ONNX op_type
nnabla_function_type_to_onnx_optype = {
"ReLU": "Relu",
"Concatenate": "Concat",
Expand All @@ -30,10 +30,10 @@

def convert_to_node(func, variables):
n = onnx.helper.make_node(
nnabla_function_type_to_onnx_optype.get(func.type, func.type),
func.input,
func.output,
name=func.name)
nnabla_function_type_to_onnx_optype.get(func.type, func.type),
func.input,
func.output,
name=func.name)
if func.type == "Concatenate":
# ONNX requires axis setting as a parameter
# for the concat op_type.
Expand All @@ -56,7 +56,8 @@ def convert_to_node(func, variables):
elif func.type == "MaxPooling":
mpp = func.max_pooling_param
if not mpp.ignore_border:
raise ValueError("MaxPooling with ignore_border=False is not supported")
raise ValueError(
"MaxPooling with ignore_border=False is not supported")
# Copy kernel, stride, and pads values
k = onnx.helper.make_attribute("kernel_shape", mpp.kernel.dim)
s = onnx.helper.make_attribute("strides", mpp.stride.dim)
Expand All @@ -80,7 +81,8 @@ def convert_to_node(func, variables):
weight_shape = weight_var[0].shape
# The base axis for weights is the next axis from the data's base axis
weight_base = cp.base_axis + 1
k = onnx.helper.make_attribute("kernel_shape", weight_shape.dim[weight_base:])
k = onnx.helper.make_attribute(
"kernel_shape", weight_shape.dim[weight_base:])
d = onnx.helper.make_attribute("dilations", cp.dilation.dim)
s = onnx.helper.make_attribute("strides", cp.stride.dim)
p = onnx.helper.make_attribute("pads", cp.pad.dim)
Expand All @@ -92,7 +94,7 @@ def convert_to_node(func, variables):
# "Conv" or "Pool" contained.
# Caffe2 issue is here:
# https://github.com/caffe2/caffe2/issues/1971
# Becuase a GlobalAveragePooling operator does not contain a kernel, we get an error at the
# Becuase a GlobalAveragePooling operator does not contain a kernel, we get an error at the
# following code if we have a specific name.
# https://github.com/caffe2/caffe2/blob/master/caffe2/operators/conv_pool_op_base.h#L167
# The above caffe2 code should be checking the node's operator name and not the node's name.
Expand All @@ -110,13 +112,16 @@ def convert_to_node(func, variables):

def nnp_model_to_onnx_graph(graph, nnp):
if len(nnp.network) != 1:
raise ValueError("NNP with only a single network is currently supported")
raise ValueError(
"NNP with only a single network is currently supported")
if len(nnp.executor) != 1:
raise ValueError("NNP with only a single executor is currently supported")
raise ValueError(
"NNP with only a single executor is currently supported")
net = nnp.network[0]
exe = nnp.executor[0]
if exe.network_name != net.name:
raise ValueError("Names of the included network and executor's target network do not match")
raise ValueError(
"Names of the included network and executor's target network do not match")
graph.name = net.name
# store all variable shape info to use later
var_dict = {}
Expand All @@ -130,7 +135,8 @@ def nnp_model_to_onnx_graph(graph, nnp):
init = graph.initializer.add()
init.name = param.variable_name
init.dims.extend(param.shape.dim)
init.data_type = TensorProto.FLOAT # We should be only getting float data from NNabla
# We should be only getting float data from NNabla
init.data_type = TensorProto.FLOAT
init.raw_data = struct.pack("{}f".format(len(param.data)), *param.data)
# init.float_data.extend(param.data)

Expand Down
36 changes: 24 additions & 12 deletions python/src/nnabla/utils/converter/onnx/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,8 @@ def onnx_value_info_proto_to_variable(info, network):
def convert_to_function(node, base_name, func_counter):
"""Convert given node to corresponding function"""
func = nnabla_pb2.Function()
func.type = onnx_optype_to_nnabla_function_type.get(node.op_type, node.op_type)
func.type = onnx_optype_to_nnabla_function_type.get(
node.op_type, node.op_type)
# NNabla requires each function to have a unique name.
# If the node's name already has something set,
# we are going to use it.
Expand Down Expand Up @@ -103,7 +104,8 @@ def convert_to_function(node, base_name, func_counter):
for attr in node.attribute:
if attr.name == "is_test":
if attr.type != AttributeProto.INT:
raise ValueError("Dropout is_test must be a single integer")
raise ValueError(
"Dropout is_test must be a single integer")
if attr.i != 0:
# is_test is True meaning we will not be applying dropout.
# We are simply going to pass through the input values
Expand Down Expand Up @@ -139,22 +141,26 @@ def convert_to_function(node, base_name, func_counter):
# (it will be inferred from weight input)
if attr.name == "pads":
if attr.type != AttributeProto.INTS:
raise ValueError("Only INTS are supported for pads in Conv op_type")
raise ValueError(
"Only INTS are supported for pads in Conv op_type")
pads.extend(attr.ints)
dims.append(len(pads))
elif attr.name == "strides":
if attr.type != AttributeProto.INTS:
raise ValueError("Only INTS are supported for strides in Conv op_type")
raise ValueError(
"Only INTS are supported for strides in Conv op_type")
strides.extend(attr.ints)
dims.append(len(strides))
elif attr.name == "dilations":
if attr.type != AttributeProto.INTS:
raise ValueError("Only INTS are supported for dilations in Conv op_type")
raise ValueError(
"Only INTS are supported for dilations in Conv op_type")
dilations.extend(attr.ints)
dims.append(len(dilations))
elif attr.name == "group":
if attr.type != AttributeProto.INT:
raise ValueError("Only INT is supported for group in Conv op_type")
raise ValueError(
"Only INT is supported for group in Conv op_type")
cp.group = attr.int
# NNabla requires for the dimensions of strides, pads, dilations to match.
# We align the dimensions for all three attributes to the shortest one
Expand All @@ -178,17 +184,20 @@ def convert_to_function(node, base_name, func_counter):
for attr in node.attribute:
if attr.name == "strides":
if attr.type != AttributeProto.INTS:
raise ValueError("Only INTS are supported for strides in MaxPool op_type")
raise ValueError(
"Only INTS are supported for strides in MaxPool op_type")
strides.extend(attr.ints)
dims.append(len(strides))
elif attr.name == "pads":
if attr.type != AttributeProto.INTS:
raise ValueError("Only INTS are supported for pads in MaxPool op_type")
raise ValueError(
"Only INTS are supported for pads in MaxPool op_type")
pads.extend(attr.ints)
dims.append(len(pads))
elif attr.name == "kernel_shape":
if attr.type != AttributeProto.INTS:
raise ValueError("Only INTS are supported for kernel_shape in MaxPool op_type")
raise ValueError(
"Only INTS are supported for kernel_shape in MaxPool op_type")
kernel.extend(attr.ints)
dims.append(len(kernel))
# NNabla requires for the dimensions of strides, pads, kernels to match.
Expand Down Expand Up @@ -219,7 +228,8 @@ def onnx_graph_to_nnp_protobuf(pb, graph):
for n in graph.node:
# We do not allow any operator from an unknown domain
if not (n.domain == '' or n.domain == NNABLA_DOMAIN):
raise ValueError("Unsupported operator from domain {} was found".format(n.domain))
raise ValueError(
"Unsupported operator from domain {} was found".format(n.domain))
f = convert_to_function(n, graph.name, func_counter)
# Gather all unique names for input and output
for i in f.input:
Expand Down Expand Up @@ -312,9 +322,11 @@ def onnx_model_to_nnp_protobuf(model):
# ONNX opset.
# Check if we have the correct version
if opset.version < MIN_ONNX_OPSET_VERSION:
raise ValueError("Older ONNX opsets are currently not supported")
raise ValueError(
"Older ONNX opsets are currently not supported")
else:
raise ValueError("Unsupported opset from domain {}".format(opset.domain))
raise ValueError(
"Unsupported opset from domain {}".format(opset.domain))

# convert onnx model to nnabla protobuf
# logger.log(99, "Converting ONNX made by {}.".format(model.producer_name))
Expand Down
2 changes: 0 additions & 2 deletions python/src/nnabla/utils/converter/onnx/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,3 @@

SOFTMAX_WARNING = """Softmax on NNabla will calculate on the specified axis ONLY. If the incoming tensor is two dimensional (for example N*C*1*1),
NNabla's Softmax and ONNX's Softmax should match. If the incoming tensor has more than two dimensions, the Softmax results may differ."""


6 changes: 4 additions & 2 deletions python/src/nnabla/utils/converter/supported_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,7 @@
import collections

_SupportedInfo = collections.namedtuple('_SupportedInfo', 'read export')
extensions = _SupportedInfo(read=['.nnp', '.onnx'], export=['.nnp', '.nnb', '.onnx'])
formats = _SupportedInfo(read=['NNP', 'ONNX'], export=['NNP', 'NNB', 'CSRC', 'ONNX'])
extensions = _SupportedInfo(read=['.nnp', '.onnx'], export=[
'.nnp', '.nnb', '.onnx'])
formats = _SupportedInfo(read=['NNP', 'ONNX'], export=[
'NNP', 'NNB', 'CSRC', 'ONNX'])

0 comments on commit 9cddd96

Please sign in to comment.