From 2d26ee04901a47726e010c3c59787816d8fbe658 Mon Sep 17 00:00:00 2001 From: daquexian Date: Sun, 10 Nov 2019 23:42:35 +0800 Subject: [PATCH 1/5] Ignore batch size and improve error msg in onnx2daq --- tools/onnx2bnn/OnnxConverter.cpp | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/tools/onnx2bnn/OnnxConverter.cpp b/tools/onnx2bnn/OnnxConverter.cpp index 5da62e3..25433b4 100644 --- a/tools/onnx2bnn/OnnxConverter.cpp +++ b/tools/onnx2bnn/OnnxConverter.cpp @@ -191,10 +191,10 @@ std::vector OnnxConverter::Convert( // Please check out "dabnn_*" pases in // https://github.com/daquexian/onnx/blob/optimizer_for_bnn/onnx/optimizer/passes // for details. - vector optimizers{"eliminate_nop_pad", - "extract_constant_to_initializer", - "dabnn_convert_gemm_with_reshape_or_flatten_to_conv_and_reshape", - "dabnn_bconv_strict"}; + vector optimizers{ + "eliminate_nop_pad", "extract_constant_to_initializer", + "dabnn_convert_gemm_with_reshape_or_flatten_to_conv_and_reshape", + "dabnn_bconv_strict"}; if (level == Level::kModerate || level == Level::kAggressive) { optimizers.push_back("dabnn_bconv_moderate"); } @@ -231,13 +231,23 @@ std::vector OnnxConverter::Convert( } Shape shape; - for (const auto &dim : input.type().tensor_type().shape().dim()) { + const auto &dims = input.type().tensor_type().shape().dim(); + FORZ(i, dims.size()) { + if (i == 0) { + // We ignore the value of batch dimension since dabnn doesn't + // support batch input + shape.push_back(1); + continue; + } + const auto &dim = dims[i]; if (dim.value_case() == ONNX_NAMESPACE::TensorShapeProto_Dimension::kDimValue) { shape.push_back(static_cast(dim.dim_value())); } else { throw std::invalid_argument( - "The input of graph doesn't have dim_value"); + "Dim " + std::to_string(i) + " of input \"" + input.name() + + "\" is not static, please re-export your ONNX model with " + "static input shape"); } } Shape nhwc_shape{shape[0], shape[2], shape[3], shape[1]}; @@ -248,7 +258,6 @@ std::vector OnnxConverter::Convert( } vector binary_conv_outputs; - vector skipped_act; bool has_reshape = false; for (const auto &node : model_proto_.graph().node()) { if (has_reshape) { From c7c41bed9e6c5e1d544fc14eaf2ee4ef396800fb Mon Sep 17 00:00:00 2001 From: daquexian Date: Sun, 10 Nov 2019 23:47:17 +0800 Subject: [PATCH 2/5] Support reshape before gemm --- tools/onnx2bnn/OnnxConverter.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/onnx2bnn/OnnxConverter.cpp b/tools/onnx2bnn/OnnxConverter.cpp index 25433b4..6f0da4f 100644 --- a/tools/onnx2bnn/OnnxConverter.cpp +++ b/tools/onnx2bnn/OnnxConverter.cpp @@ -260,12 +260,14 @@ std::vector OnnxConverter::Convert( vector binary_conv_outputs; bool has_reshape = false; for (const auto &node : model_proto_.graph().node()) { - if (has_reshape) { - throw std::invalid_argument( - "Reshape can only be the last layer for now"); - } NodeAttrHelper helper(node); const auto &op = node.op_type(); + if (has_reshape && op != "Gemm") { + throw std::invalid_argument( + "Reshape can only be the last layer or precede a gemm layer " + "for now"); + } + has_reshape = false; VLOG(5) << "Node " << node.name(); if (op == "Conv") { VLOG(5) << "Start converting Conv"; From 9e375c9bcd54183d03c06ab5b37d3287e3591d57 Mon Sep 17 00:00:00 2001 From: daquexian Date: Mon, 11 Nov 2019 00:18:08 +0800 Subject: [PATCH 3/5] Fix build error in linux --- tools/onnx2bnn/OnnxConverter.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/onnx2bnn/OnnxConverter.cpp b/tools/onnx2bnn/OnnxConverter.cpp index 6f0da4f..e5de8cf 100644 --- a/tools/onnx2bnn/OnnxConverter.cpp +++ b/tools/onnx2bnn/OnnxConverter.cpp @@ -239,7 +239,7 @@ std::vector OnnxConverter::Convert( shape.push_back(1); continue; } - const auto &dim = dims[i]; + const auto &dim = dims.Get(i); if (dim.value_case() == ONNX_NAMESPACE::TensorShapeProto_Dimension::kDimValue) { shape.push_back(static_cast(dim.dim_value())); From c766981267e9a1537e46f8d8dd5d2fb5732eafea Mon Sep 17 00:00:00 2001 From: daquexian Date: Mon, 11 Nov 2019 00:18:26 +0800 Subject: [PATCH 4/5] Enhance the support for reshape --- tools/onnx2bnn/OnnxConverter.cpp | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tools/onnx2bnn/OnnxConverter.cpp b/tools/onnx2bnn/OnnxConverter.cpp index e5de8cf..71219b4 100644 --- a/tools/onnx2bnn/OnnxConverter.cpp +++ b/tools/onnx2bnn/OnnxConverter.cpp @@ -262,14 +262,12 @@ std::vector OnnxConverter::Convert( for (const auto &node : model_proto_.graph().node()) { NodeAttrHelper helper(node); const auto &op = node.op_type(); - if (has_reshape && op != "Gemm") { - throw std::invalid_argument( - "Reshape can only be the last layer or precede a gemm layer " - "for now"); - } - has_reshape = false; VLOG(5) << "Node " << node.name(); if (op == "Conv") { + if (has_reshape) { + throw std::invalid_argument("Reshape before " + op + + " is not supported"); + } VLOG(5) << "Start converting Conv"; auto strides = helper.get("strides", vector{1, 1}); auto pads = helper.get("pads", vector{0, 0, 0, 0}); @@ -319,6 +317,10 @@ std::vector OnnxConverter::Convert( VLOG(5) << "Converting Conv completed"; } else if (op == "AveragePool" || op == "MaxPool" || op == "GlobalAveragePool" || op == "GlobalMaxPool") { + if (has_reshape) { + throw std::invalid_argument("Reshape before " + op + + " is not supported"); + } VLOG(5) << "Start converting Pool"; auto input_name = m(node.input(0)); auto output_name = m(node.output(0)); @@ -407,6 +409,10 @@ std::vector OnnxConverter::Convert( layers_.push_back(layer); VLOG(5) << "Converting Relu completed"; } else if (op == "Add") { + if (has_reshape) { + throw std::invalid_argument("Reshape before " + op + + " is not supported"); + } VLOG(5) << "Start converting Add"; auto input1_name = m(node.input(0)); auto input2_name = m(node.input(1)); @@ -420,6 +426,9 @@ std::vector OnnxConverter::Convert( layers_.push_back(layer); VLOG(5) << "Converting Add completed"; } else if (op == "Gemm") { + if (has_reshape) { + has_reshape = false; + } VLOG(5) << "Start converting Gemm"; auto transA = helper.get("transA", 0); auto transB = helper.get("transB", 0); @@ -478,6 +487,10 @@ std::vector OnnxConverter::Convert( layers_.push_back(layer); VLOG(5) << "Converting Softmax completed"; } else if (op == "Concat") { + if (has_reshape) { + throw std::invalid_argument("Reshape before " + op + + " is not supported"); + } VLOG(5) << "Start converting Concat"; vector concat_inputs_str; for (const auto &onnx_input : node.input()) { From 7460af8369e140f821e761d52a9b50be1f4d6bef Mon Sep 17 00:00:00 2001 From: daquexian Date: Mon, 11 Nov 2019 00:18:46 +0800 Subject: [PATCH 5/5] Eliminate dropout by custom onnx optimizer --- third_party/onnx | 2 +- tools/onnx2bnn/OnnxConverter.cpp | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/third_party/onnx b/third_party/onnx index 20835f8..9f70c11 160000 --- a/third_party/onnx +++ b/third_party/onnx @@ -1 +1 @@ -Subproject commit 20835f8f238a232de2f1540fc109de62759e2495 +Subproject commit 9f70c118910f7174f65dac84c90d46d608d9cfd2 diff --git a/tools/onnx2bnn/OnnxConverter.cpp b/tools/onnx2bnn/OnnxConverter.cpp index 71219b4..9794f43 100644 --- a/tools/onnx2bnn/OnnxConverter.cpp +++ b/tools/onnx2bnn/OnnxConverter.cpp @@ -193,6 +193,7 @@ std::vector OnnxConverter::Convert( // for details. vector optimizers{ "eliminate_nop_pad", "extract_constant_to_initializer", + "dabnn_eliminate_dropout", "dabnn_convert_gemm_with_reshape_or_flatten_to_conv_and_reshape", "dabnn_bconv_strict"}; if (level == Level::kModerate || level == Level::kAggressive) { @@ -510,11 +511,6 @@ std::vector OnnxConverter::Convert( 0, 0, 0, 0, 0, 0, param); layers_.push_back(layer); VLOG(5) << "Converting Concat completed"; - } else if (op == "Dropout") { - VLOG(5) << "Start converting Dropout"; - // Dropout does nothing, so the output is the same as the input - name_map_[node.output(0)] = m(node.input(0)); - VLOG(5) << "Converting Dropout completed"; } else if (op == "Reshape") { VLOG(5) << "Start converting Reshape"; has_reshape = true;