diff --git a/build-tools/make/build-with-docker.mk b/build-tools/make/build-with-docker.mk index 746edc352..c263cfb36 100644 --- a/build-tools/make/build-with-docker.mk +++ b/build-tools/make/build-with-docker.mk @@ -31,6 +31,7 @@ DOCKER_IMAGE_DOC ?= $(DOCKER_IMAGE_NAME_BASE)-doc:$(shell md5sum $(NNABLA_DIRECT DOCKER_IMAGE_BUILD ?= $(DOCKER_IMAGE_NAME_BASE)-build$(ARCH_SUFFIX):$(shell md5sum $(NNABLA_DIRECTORY)/docker/development/Dockerfile.build$(ARCH_SUFFIX) |cut -d \ -f 1) DOCKER_IMAGE_NNABLA ?= $(DOCKER_IMAGE_NAME_BASE)-nnabla:$(shell md5sum $(NNABLA_DIRECTORY)/docker/development/Dockerfile.build |cut -d \ -f 1) DOCKER_IMAGE_ONNX_TEST ?= $(DOCKER_IMAGE_NAME_BASE)-onnx-test$(ARCH_SUFFIX):$(shell md5sum $(NNABLA_DIRECTORY)/docker/development/Dockerfile.onnx-test$(ARCH_SUFFIX) |cut -d \ -f 1) +DOCKER_IMAGE_TF_TEST ?= $(DOCKER_IMAGE_NAME_BASE)-tf-test$(ARCH_SUFFIX):$(shell md5sum $(NNABLA_DIRECTORY)/docker/development/Dockerfile.tf-test |cut -d \ -f 1) ######################################################################################################################## # Docker images @@ -63,6 +64,12 @@ docker_image_onnx_test$(DOCKER_IMAGE_TARGET_SUFFIX): (cd $(NNABLA_DIRECTORY) && docker build $(DOCKER_BUILD_ARGS) -t $(DOCKER_IMAGE_ONNX_TEST) -f docker/development/Dockerfile.onnx-test$(ARCH_SUFFIX) .) \ fi +.PHONY: docker_image_tf_test$(DOCKER_IMAGE_TARGET_SUFFIX) +docker_image_tf_test$(DOCKER_IMAGE_TARGET_SUFFIX): + if ! docker image inspect $(DOCKER_IMAGE_TF_TEST) >/dev/null 2>/dev/null; then \ + docker pull $(shell cat $(NNABLA_DIRECTORY)/docker/development/Dockerfile.tf-test$(ARCH_SUFFIX) |grep ^FROM |awk '{print $$2}') && \ + (cd $(NNABLA_DIRECTORY) && docker build $(DOCKER_BUILD_ARGS) -t $(DOCKER_IMAGE_TF_TEST) -f docker/development/Dockerfile.tf-test$(ARCH_SUFFIX) .) \ + fi # for Android diff --git a/docker/development/Dockerfile.tf-test b/docker/development/Dockerfile.tf-test new file mode 100644 index 000000000..39ad577ba --- /dev/null +++ b/docker/development/Dockerfile.tf-test @@ -0,0 +1,133 @@ +# Copyright (c) 2017 Sony Corporation. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:16.04 + +ENV LC_ALL C +ENV LANG C +ENV LANGUAGE C + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + build-essential \ + bzip2 \ + ca-certificates \ + ccache \ + clang-format-3.8 \ + cmake \ + curl \ + g++ \ + git \ + libarchive-dev \ + libgoogle-glog-dev \ + libgtest-dev \ + libhdf5-dev \ + libiomp-dev \ + libleveldb-dev \ + liblmdb-dev \ + libopencv-dev \ + libopenmpi-dev \ + libprotobuf-dev \ + libsnappy-dev \ + libssl-dev \ + make \ + openmpi-bin \ + openmpi-doc \ + openssl \ + unzip \ + wget \ + zip \ + graphviz \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN cd /tmp \ + && curl -L https://www.libarchive.org/downloads/libarchive-3.3.2.tar.gz -o libarchive-3.3.2.tar.gz \ + && tar xfa libarchive-3.3.2.tar.gz \ + && mkdir libarchive-build \ + && cd libarchive-build \ + && cmake \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DENABLE_NETTLE=FALSE \ + -DENABLE_OPENSSL=FALSE \ + -DENABLE_LZO=FALSE \ + -DENABLE_LZMA=FALSE \ + -DENABLE_BZip2=FALSE \ + -DENABLE_LIBXML2=FALSE \ + -DENABLE_EXPAT=FALSE \ + -DENABLE_PCREPOSIX=FALSE \ + -DENABLE_LibGCC=FALSE \ + -DENABLE_CNG=FALSE \ + -DENABLE_TAR=FALSE \ + -DENABLE_TAR_SHARED=FALSE \ + -DENABLE_CPIO=FALSE \ + -DENABLE_CPIO_SHARED=FALSE \ + -DENABLE_CAT=FALSE \ + -DENABLE_CAT_SHARED=FALSE \ + -DENABLE_XATTR=FALSE \ + -DENABLE_ACL=FALSE \ + -DENABLE_ICONV=FALSE \ + -DENABLE_TEST=FALSE \ + ../libarchive-3.3.2 \ + && make \ + && make install \ + && cd / \ + && rm -rf /tmp/* + +ARG PYTHON_VERSION_MAJOR +ARG PYTHON_VERSION_MINOR +ENV PYVERNAME=${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR} + +ENV PATH /opt/miniconda3/bin:$PATH +ENV LD_LIBRARY_PATH /opt/miniconda3/lib:$LD_LIBRARY_PATH + +RUN set -xe \ + && umask 0 \ + && wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ + && bash Miniconda3-latest-Linux-x86_64.sh -b -p /opt/miniconda3 \ + && rm -rf Miniconda3-latest-Linux-x86_64.sh \ + && if test ${PYTHON_VERSION_MAJOR} -eq 3 -a ${PYTHON_VERSION_MINOR} -eq 5; \ + then \ + conda install -y conda=4.5.11 python=${PYVERNAME}; \ + else \ + conda install -y python=${PYVERNAME}; \ + conda update -y --all; \ + fi \ + && conda install -y \ + boto3 \ + future \ + h5py \ + ipython \ + mako \ + 'numpy<1.16' \ + opencv \ + pip \ + protobuf \ + pytest \ + scikit-image \ + scipy \ + tqdm \ + wheel \ + virtualenv \ + && pip install pyyaml onnx==1.3.0 future Cython autopep8 requests \ + graphviz tensorflow onnx_tf \ + && pip install cntk || true \ + && conda install -y -c pytorch pytorch-nightly-cpu \ + && rm -rf /opt/miniconda3/pkgs + +RUN cd /tmp \ + && git clone https://github.com/onnx/tensorflow-onnx.git \ + && cd tensorflow-onnx \ + && python setup.py install \ + && rm -rf /tmp/tensorflow-onnx diff --git a/python/setup.py b/python/setup.py index f8d9be541..c2e06cdcf 100644 --- a/python/setup.py +++ b/python/setup.py @@ -247,6 +247,7 @@ def extopts(library_name, library_dir): 'nnabla.utils.converter.nnabla', 'nnabla.utils.converter.nnablart', 'nnabla.utils.converter.onnx', + 'nnabla.utils.converter.tensorflow', 'nnabla.utils.factorization', 'nnabla.utils.image_utils', 'nnabla_ext', diff --git a/python/src/nnabla/utils/converter/commands.py b/python/src/nnabla/utils/converter/commands.py index 589ae43d1..a6e782dd7 100644 --- a/python/src/nnabla/utils/converter/commands.py +++ b/python/src/nnabla/utils/converter/commands.py @@ -26,19 +26,32 @@ def _import_file(args, ifiles): - if len(ifiles) == 1 and os.path.splitext(ifiles[0])[1] == '.nnp': - args.import_format = 'NNP' - if len(ifiles) == 1 and os.path.splitext(ifiles[0])[1] == '.onnx': - args.import_format = 'ONNX' + if len(ifiles) == 1: + ext = os.path.splitext(ifiles[0])[1] + if ext == '.nnp': + args.import_format = 'NNP' + elif ext == '.onnx': + args.import_format = 'ONNX' + elif ext == '.pb': + args.import_format = "TF_PB" + elif ext == '.ckpt': + args.import_format = "TF_CKPT" + if args.import_format == 'NNP': # Input file that has unsupported extension store into output nnp # archive or directory. return NnpImporter(*ifiles, expand_network=not args.nnp_no_expand_network, executor_index=args.nnp_import_executor_index).execute() + elif args.import_format == 'ONNX': from .onnx import OnnxImporter return OnnxImporter(*ifiles).execute() + + elif args.import_format == 'TF_PB' or \ + args.import_format == 'TF_CKPT': + from .tensorflow import TensorflowImporter + return TensorflowImporter(*ifiles, tf_format=args.import_format).execute() return None @@ -152,6 +165,9 @@ def _export_from_nnp(args, nnp, output, output_ext): OnnxExporter(nnp, args.batch_size, opset=opset).execute(output) else: OnnxExporter(nnp, args.batch_size).execute(output) + elif output_ext == '.pb': + from .tensorflow import TensorflowExporter + TensorflowExporter(nnp, args.batch_size).execute(output) else: print('Output file ({})'.format(output_ext) + ' is not supported or output directory does not exist.') @@ -284,7 +300,7 @@ def convert_files(args, ifiles, output): else: return _export_from_nnp(args, nnp, output, output_ext) else: - print('Import from [{}] failed.'.format(ifiles)) + print('Import from {} failed.'.format(ifiles)) return False diff --git a/python/src/nnabla/utils/converter/onnx/exporter.py b/python/src/nnabla/utils/converter/onnx/exporter.py index 5f3985bae..e347e287f 100644 --- a/python/src/nnabla/utils/converter/onnx/exporter.py +++ b/python/src/nnabla/utils/converter/onnx/exporter.py @@ -1541,6 +1541,11 @@ def dump_graph(self): print("{} : {}".format(i, in_d[i])) print(node) + def export_model_proto(self): + self.create_model() + self.create_graph() + return self._model_proto + def execute(self, file_path): # if debug, please uncomment it. # self.dump_nnp(file_path) diff --git a/python/src/nnabla/utils/converter/onnx/importer.py b/python/src/nnabla/utils/converter/onnx/importer.py index 40a12aa4a..a71c56d18 100644 --- a/python/src/nnabla/utils/converter/onnx/importer.py +++ b/python/src/nnabla/utils/converter/onnx/importer.py @@ -440,7 +440,7 @@ def add_tensor_as_parameter(pb, tensor): class OnnxImporter: - def __init__(self, file_path): + def __init__(self, file_path=''): self._file_path = file_path # We use an OrderedDict and not a set @@ -1722,6 +1722,12 @@ class nnp: nnp.other_files = [] return nnp + def import_from_onnx_model(self, onnx_model): + self._ir_version = onnx_model.ir_version + self._graph = onnx_model.graph + self._opset_import = onnx_model.opset_import + def execute(self): - self.get_onnx_graph_info() + if self._file_path != '': + self.get_onnx_graph_info() return self.onnx_model_to_nnp_protobuf() diff --git a/python/src/nnabla/utils/converter/supported_info.py b/python/src/nnabla/utils/converter/supported_info.py index e72a0ab4e..8b37c555e 100644 --- a/python/src/nnabla/utils/converter/supported_info.py +++ b/python/src/nnabla/utils/converter/supported_info.py @@ -16,7 +16,7 @@ _SupportedInfo = collections.namedtuple( '_SupportedInfo', 'import_name export_name') -extensions = _SupportedInfo(import_name=['.nnp', '.onnx'], export_name=[ - '.nnp', '.nnb', '.onnx']) -formats = _SupportedInfo(import_name=['NNP', 'ONNX'], export_name=[ - 'NNP', 'NNB', 'CSRC', 'ONNX']) +extensions = _SupportedInfo(import_name=['.nnp', '.onnx', '.ckpt', '.pb'], export_name=[ + '.nnp', '.nnb', '.onnx', '.ckpt', '.pb']) +formats = _SupportedInfo(import_name=['NNP', 'ONNX', 'TF_CKPT', 'TF_PB'], export_name=[ + 'NNP', 'NNB', 'CSRC', 'ONNX', 'TF_CKPT', 'TF_PB']) diff --git a/python/src/nnabla/utils/converter/tensorflow/__init__.py b/python/src/nnabla/utils/converter/tensorflow/__init__.py new file mode 100644 index 000000000..061da5aaa --- /dev/null +++ b/python/src/nnabla/utils/converter/tensorflow/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2017 Sony Corporation. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .importer import TensorflowImporter +from .exporter import TensorflowExporter diff --git a/python/src/nnabla/utils/converter/tensorflow/exporter.py b/python/src/nnabla/utils/converter/tensorflow/exporter.py new file mode 100644 index 000000000..498adcbe1 --- /dev/null +++ b/python/src/nnabla/utils/converter/tensorflow/exporter.py @@ -0,0 +1,28 @@ +# Copyright (c) 2019 Sony Corporation. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ..onnx import OnnxExporter +from onnx_tf.backend import prepare + + +class TensorflowExporter: + def __init__(self, nnp, batch_size): + self._nnp = nnp + self._batch_size = batch_size + + def execute(self, output): + onnx_model = OnnxExporter( + self._nnp, self._batch_size).export_model_proto() + tf_rep = prepare(onnx_model) + tf_rep.export_graph(output) diff --git a/python/src/nnabla/utils/converter/tensorflow/importer.py b/python/src/nnabla/utils/converter/tensorflow/importer.py new file mode 100644 index 000000000..e779b7678 --- /dev/null +++ b/python/src/nnabla/utils/converter/tensorflow/importer.py @@ -0,0 +1,135 @@ +# Copyright (c) 2019 Sony Corporation. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from ..onnx import OnnxImporter +import tensorflow as tf +import tf2onnx +from tf2onnx.graph import GraphUtil +from tensorflow.core.framework import graph_pb2 +from tensorflow.python.tools import freeze_graph +# import pdb + + +def _strip_node_name(name): + if name.startswith("^"): + return name[1:] + else: + return name.split(":")[0] + + +def _find_out_terminal_node(graph_def, **kwargs): + def add_postfix(names): + return ["{}:0".format(n) for n in names] + + unlike_output_types = ["Const", "Assign", "Noop", "Placeholder"] + terminal_inputs = [] + inputs = set() + outputs = set() + need_add_postfix = kwargs.get("postfix", False) + for node in graph_def.node: + strip_name = _strip_node_name(node.name) + if node.op == 'Placeholder': + terminal_inputs.append(_strip_node_name(node.name)) + outputs.add(strip_name) + inputs.update(set(node.input)) + terminal_outputs = list(filter(lambda x: x not in unlike_output_types, + outputs - inputs)) + if need_add_postfix: + terminal_inputs = add_postfix(terminal_inputs) + terminal_outputs = add_postfix(terminal_outputs) + + return terminal_inputs, terminal_outputs + + +class TensorflowImporter: + """ Import tensorflow model to nnp model. + """ + + def __init__(self, *args, **kwargs): + self._tf_file = args[0] + self._tf_format = kwargs.get("tf_format", "TF_PB") + + def _import_from_tf_pb(self, graph_def): + inputs, outputs = _find_out_terminal_node(graph_def, postfix=True) + print("inputs:{}".format(inputs)) + print("outputs:{}".format(outputs)) + + # FIXME: folding const = False + graph_def = tf2onnx.tfonnx.tf_optimize( + inputs, outputs, graph_def, False) + with tf.Graph().as_default() as tf_graph: + tf.import_graph_def(graph_def, name='') + with tf.Session(graph=tf_graph): + onnx_graph = tf2onnx.tfonnx.process_tf_graph(tf_graph, + continue_on_error=False, + verbose=False, + target=",".join( + tf2onnx.tfonnx.DEFAULT_TARGET), + opset=6, + input_names=inputs, + output_names=outputs, + inputs_as_nchw=None) + model_proto = onnx_graph.make_model("tf_model") + new_model_proto = GraphUtil.opt_transposes_with_graph(onnx_graph, + 'tf_model', + optimize=True) + if new_model_proto: + model_proto = new_model_proto + return model_proto + + def import_from_tf_pb(self): + graph_def = graph_pb2.GraphDef() + with tf.gfile.GFile(self._tf_file, 'rb') as f: + graph_def.ParseFromString(f.read()) + return self._import_from_tf_pb(graph_def) + + def import_from_tf_ckpt(self): + ckpt_path = os.path.dirname(self._tf_file) + if not ckpt_path: + raise ValueError( + "check point file should be in a special directory.") + latest_ckpt = tf.train.latest_checkpoint(ckpt_path) + saver = tf.train.import_meta_graph(latest_ckpt + ".meta") + with tf.Session() as session: + session.run( + [ + tf.global_variables_initializer(), + tf.local_variables_initializer() + ] + ) + saver.restore(session, latest_ckpt) + graph_def = session.graph.as_graph_def(add_shapes=True) + frozen_graph = freeze_graph.freeze_graph_with_def_protos( + input_graph_def=graph_def, + input_saver_def=None, + input_checkpoint=latest_ckpt, + output_node_names="biases", + restore_op_name="", + filename_tensor_name="", + output_graph=None, + clear_devices=True, + initializer_nodes="" + ) + onnx_model = self._import_from_tf_pb(frozen_graph) + return onnx_model + + def execute(self): + if self._tf_format == 'TF_PB': + onnx_model = self.import_from_tf_pb() + elif self._tf_format == 'TF_CKPT': + onnx_model = self.import_from_tf_ckpt() + onnx_importer = OnnxImporter() + onnx_importer.import_from_onnx_model(onnx_model) + return onnx_importer.execute()