diff --git a/CMakeLists.txt b/CMakeLists.txt index 121d96f08..b9356a902 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -234,7 +234,7 @@ if (NOT USE_PRE_BUILT_NGRAPH) ExternalProject_Add( ext_ngraph GIT_REPOSITORY https://github.com/NervanaSystems/ngraph - GIT_TAG v0.25.0-rc.2 + GIT_TAG v0.25.0-rc.3 CMAKE_ARGS -DNGRAPH_DISTRIBUTED_ENABLE=${NGRAPH_DISTRIBUTED_ENABLE} -DNGRAPH_INSTALL_PREFIX=${NGRAPH_ARTIFACTS_DIR} diff --git a/README.md b/README.md index 4bc0ca978..e69ba8ed5 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ Once TensorFlow's dependencies are installed, clone the `ngraph-bridge` repo: git clone https://github.com/tensorflow/ngraph-bridge.git cd ngraph-bridge - git checkout v0.18.0-rc1 + git checkout v0.18.0-rc2 Run the following Python script to build TensorFlow, nGraph, and the bridge. Use Python 3.5: diff --git a/bazel/WORKSPACE b/bazel/WORKSPACE index 75ae8df76..a242b3e27 100644 --- a/bazel/WORKSPACE +++ b/bazel/WORKSPACE @@ -25,11 +25,11 @@ tf_configure( http_archive( name = "ngraph", build_file = "//:bazel/ngraph.BUILD", - sha256 = "54bffb90bb6ed8081d549958368b4eb95b8544ff59bc38a0783e6e8b3e623d48", - strip_prefix = "ngraph-0.25.0-rc.2", + sha256 = "0b0cbd617653552d219c05bf975acfbcac513061a7b04465a71db324a9d9d7e3", + strip_prefix = "ngraph-0.25.0-rc.3", urls = [ - "https://mirror.bazel.build/github.com/NervanaSystems/ngraph/archive/v0.25.0-rc.2.tar.gz", - "https://github.com/NervanaSystems/ngraph/archive/v0.25.0-rc.2.tar.gz" + "https://mirror.bazel.build/github.com/NervanaSystems/ngraph/archive/v0.25.0-rc.3.tar.gz", + "https://github.com/NervanaSystems/ngraph/archive/v0.25.0-rc.3.tar.gz" ], ) diff --git a/build_ngtf.py b/build_ngtf.py index 3688f0195..4b5a0d4ae 100755 --- a/build_ngtf.py +++ b/build_ngtf.py @@ -53,7 +53,7 @@ def main(): ''' # Component versions - ngraph_version = "v0.25.0-rc.2" + ngraph_version = "v0.25.0-rc.3" tf_version = "v1.14.0" # Command line parser options diff --git a/ngraph_bridge/ngraph_builder.cc b/ngraph_bridge/ngraph_builder.cc index b41a4de3c..5f9d67b39 100644 --- a/ngraph_bridge/ngraph_builder.cc +++ b/ngraph_bridge/ngraph_builder.cc @@ -2513,6 +2513,21 @@ static Status TranslateLogSoftmaxOp( return Status::OK(); } +static Status TranslateSoftplusOp( + const Node* op, const std::vector& static_input_map, + Builder::OpMap& ng_op_map) { + shared_ptr ng_inp; + TF_RETURN_IF_ERROR(GetInputNodes(ng_op_map, op, &ng_inp)); + auto ng_exp = ConstructNgNode(op->name(), ng_inp); + auto constant_1 = ConstructNgNode( + op->name(), ng_inp->get_element_type(), ng_inp->get_shape(), + std::vector(ng::shape_size(ng_inp->get_shape()), "1")); + auto ng_output = ConstructNgNode( + op->name(), ConstructNgNode(op->name(), ng_exp, constant_1)); + SaveNgOp(ng_op_map, op->name(), ng_output); + return Status::OK(); +} + static Status TranslateMatMulOp( const Node* op, const std::vector& static_input_map, Builder::OpMap& ng_op_map) { @@ -4870,7 +4885,7 @@ const static std::map< {"Sigmoid", TranslateSigmoidOp}, {"SigmoidGrad", TranslateSigmoidGradOp}, {"Size", TranslateSizeOp}, {"Sign", TranslateUnaryOp}, {"Slice", TranslateSliceOp}, {"Snapshot", TranslateIdentityOp}, - {"Softmax", TranslateSoftmaxOp}, + {"Softmax", TranslateSoftmaxOp}, {"Softplus", TranslateSoftplusOp}, {"SpaceToDepth", TranslateSpaceToDepthOp}, {"SparseSoftmaxCrossEntropyWithLogits", TranslateSparseSoftmaxCrossEntropyWithLogitsOp}, diff --git a/ngraph_bridge/ngraph_mark_for_clustering.cc b/ngraph_bridge/ngraph_mark_for_clustering.cc index 6aa4fea2f..28c03ef1e 100644 --- a/ngraph_bridge/ngraph_mark_for_clustering.cc +++ b/ngraph_bridge/ngraph_mark_for_clustering.cc @@ -375,6 +375,7 @@ Status MarkForClustering(Graph* graph, const std::set skip_these_nodes, confirmation_function_map["Slice"] = SimpleConfirmationFunction(); confirmation_function_map["Snapshot"] = SimpleConfirmationFunction(); confirmation_function_map["Softmax"] = SimpleConfirmationFunction(); + confirmation_function_map["Softplus"] = SimpleConfirmationFunction(); confirmation_function_map["SpaceToDepth"] = confirmation_function_map["DepthToSpace"]; confirmation_function_map["SparseSoftmaxCrossEntropyWithLogits"] = @@ -569,6 +570,7 @@ Status MarkForClustering(Graph* graph, const std::set skip_these_nodes, type_constraint_map["Slice"]["Index"] = NGraphIndexDTypes(); type_constraint_map["Snapshot"]["T"] = NGraphDTypes(); type_constraint_map["Softmax"]["T"] = NGraphNumericDTypes(); + type_constraint_map["Softplus"]["T"] = NGraphRealDTypes(); type_constraint_map["SpaceToDepth"]["T"] = NGraphDTypes(); type_constraint_map["SparseSoftmaxCrossEntropyWithLogits"]["T"] = NGraphNumericDTypes(); diff --git a/ngraph_bridge/version.cc b/ngraph_bridge/version.cc index 84b3b41fc..150364056 100644 --- a/ngraph_bridge/version.cc +++ b/ngraph_bridge/version.cc @@ -32,7 +32,7 @@ // candidate such as v0.7.0-rc0 // The code in master will always have the last released version number // with a suffix of '-master' -#define NG_TF_VERSION_SUFFIX "-rc1" +#define NG_TF_VERSION_SUFFIX "-rc2" #define VERSION_STR_HELPER(x) #x #define VERSION_STR(x) VERSION_STR_HELPER(x) diff --git a/python/setup.in.py b/python/setup.in.py index acfc7dfae..c51716246 100644 --- a/python/setup.in.py +++ b/python/setup.in.py @@ -59,7 +59,7 @@ def get_tag(self): setup( name='ngraph_tensorflow_bridge', - version='0.18.0rc1', + version='0.18.0rc2', description='Intel nGraph compiler and runtime for TensorFlow', long_description=long_description, long_description_content_type="text/markdown", diff --git a/test/python/tensorflow/python_tests_list.txt b/test/python/tensorflow/python_tests_list.txt index 11444d4de..4432ba54f 100644 --- a/test/python/tensorflow/python_tests_list.txt +++ b/test/python/tensorflow/python_tests_list.txt @@ -344,7 +344,7 @@ reduction_ops_test.MeanReductionTest.testEmptyGradients reduction_ops_test.MeanReductionTest.testFloat32 reduction_ops_test.MeanReductionTest.testFloat64 reduction_ops_test.MeanReductionTest.testGradient -#reduction_ops_test.MeanReductionTest.testInfinity +reduction_ops_test.MeanReductionTest.testInfinity reduction_ops_test.MeanReductionTest.testInt32 reduction_ops_test.MinReductionTest.testAxesType @@ -368,7 +368,7 @@ reduction_ops_test.SumReductionTest.testFloat32 reduction_ops_test.SumReductionTest.testFloat64 reduction_ops_test.SumReductionTest.testGradient reduction_ops_test.SumReductionTest.testHighRank -#reduction_ops_test.SumReductionTest.testInfinity +reduction_ops_test.SumReductionTest.testInfinity reduction_ops_test.SumReductionTest.testInt32 reduction_ops_test.SumReductionTest.testPartialShapes diff --git a/test/python/tensorflow/python_tests_list_gpu.txt b/test/python/tensorflow/python_tests_list_gpu.txt index e3310b33b..f833ac398 100644 --- a/test/python/tensorflow/python_tests_list_gpu.txt +++ b/test/python/tensorflow/python_tests_list_gpu.txt @@ -334,7 +334,7 @@ reduction_ops_test.MeanReductionTest.testEmptyGradients reduction_ops_test.MeanReductionTest.testFloat32 reduction_ops_test.MeanReductionTest.testFloat64 reduction_ops_test.MeanReductionTest.testGradient -#reduction_ops_test.MeanReductionTest.testInfinity +reduction_ops_test.MeanReductionTest.testInfinity reduction_ops_test.MeanReductionTest.testInt32 reduction_ops_test.MinReductionTest.testAxesType @@ -358,7 +358,7 @@ reduction_ops_test.SumReductionTest.testFloat32 reduction_ops_test.SumReductionTest.testFloat64 reduction_ops_test.SumReductionTest.testGradient #reduction_ops_test.SumReductionTest.testHighRank -#reduction_ops_test.SumReductionTest.testInfinity +reduction_ops_test.SumReductionTest.testInfinity reduction_ops_test.SumReductionTest.testInt32 reduction_ops_test.SumReductionTest.testPartialShapes diff --git a/test/python/tensorflow/python_tests_list_mac.txt b/test/python/tensorflow/python_tests_list_mac.txt index 9a4abd6eb..5873d869a 100644 --- a/test/python/tensorflow/python_tests_list_mac.txt +++ b/test/python/tensorflow/python_tests_list_mac.txt @@ -344,7 +344,7 @@ reduction_ops_test.MeanReductionTest.testEmptyGradients reduction_ops_test.MeanReductionTest.testFloat32 reduction_ops_test.MeanReductionTest.testFloat64 reduction_ops_test.MeanReductionTest.testGradient -#reduction_ops_test.MeanReductionTest.testInfinity +reduction_ops_test.MeanReductionTest.testInfinity reduction_ops_test.MeanReductionTest.testInt32 reduction_ops_test.MinReductionTest.testAxesType @@ -368,7 +368,7 @@ reduction_ops_test.SumReductionTest.testFloat32 reduction_ops_test.SumReductionTest.testFloat64 reduction_ops_test.SumReductionTest.testGradient reduction_ops_test.SumReductionTest.testHighRank -#reduction_ops_test.SumReductionTest.testInfinity +reduction_ops_test.SumReductionTest.testInfinity reduction_ops_test.SumReductionTest.testInt32 reduction_ops_test.SumReductionTest.testPartialShapes diff --git a/test/python/test_softplus.py b/test/python/test_softplus.py new file mode 100644 index 000000000..25b7ff684 --- /dev/null +++ b/test/python/test_softplus.py @@ -0,0 +1,55 @@ +# ============================================================================== +# Copyright 2018-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""nGraph TensorFlow softplus test + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pytest + +import numpy as np +import tensorflow as tf + +from common import NgraphTest + + +class TestSoftplus(NgraphTest): + + def test_softplus(self): + x = tf.placeholder(tf.float32, shape=(2, 3)) + y = tf.placeholder(tf.float32, shape=(2, 3)) + z = tf.placeholder(tf.float32, shape=(2, 3)) + + a = x + y + z + b = x + y + z + c = a * b + d = tf.nn.softplus(c) + + # input value and expected value + x_np = np.full((2, 3), 1.0) + y_np = np.full((2, 3), 1.0) + z_np = np.full((2, 3), 1.0) + + sess_fn = lambda sess: sess.run((a, c, d), + feed_dict={ + x: x_np, + y: y_np, + z: z_np + }) + assert np.allclose( + self.with_ngraph(sess_fn), self.without_ngraph(sess_fn)) diff --git a/test/test_nn_ops.cpp b/test/test_nn_ops.cpp index f898acc39..63c14ae65 100644 --- a/test/test_nn_ops.cpp +++ b/test/test_nn_ops.cpp @@ -1575,6 +1575,30 @@ TEST(NNOps, SoftmaxZeroDimTest2) { opexecuter.RunTest(); } +// Test Op :"Softplus" +TEST(NNOps, Softplus) { + std::vector> input_sizes = { + {3}, {3, 2}, {5, 6}, {3, 4, 5}, {2, 3, 4, 5}}; + + vector static_input_indexes = {}; + + for (auto const& input_size : input_sizes) { + Scope root = Scope::NewRootScope(); + + Tensor input_data(DT_FLOAT, TensorShape(input_size)); + AssignInputValuesRandom(input_data, -2, 2); + + auto R = ops::Softplus(root, input_data); + vector output_datatypes = {DT_FLOAT}; + std::vector sess_run_fetchoutputs = {R}; + + OpExecuter opexecuter(root, "Softplus", static_input_indexes, + output_datatypes, sess_run_fetchoutputs); + + opexecuter.RunTest(); + } +} + // Computes softmax cross entropy cost and gradients to backpropagate. TEST(NNOps, SparseSoftmaxCrossEntropyWithLogits) { Scope root = Scope::NewRootScope(); diff --git a/tools/test_utils.py b/tools/test_utils.py index b9d750ce9..f94458128 100755 --- a/tools/test_utils.py +++ b/tools/test_utils.py @@ -124,7 +124,9 @@ def run_ngtf_pytests(venv_dir, build_dir): command_executor(["pip", "install", "-U", "pytest"]) command_executor(["pip", "install", "-U", "psutil"]) - cmd = 'python -m pytest ' + ('--junitxml=%s/xunit_pytest.xml' % build_dir) + cmd = 'python -m pytest ' + ( + '--junitxml=%s/xunit_pytest.xml' % + build_dir) + + "--ignore=" + build_dir + "/test/python/bfloat16" env = os.environ.copy() new_paths = venv_dir + '/bin/python3:' + os.path.abspath( build_dir) + ":" + os.path.abspath(mnist_dir)