Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix pool shaper bug, fix fused add/mul bug, support batchnorm, suppor… #38

Merged
merged 1 commit into from
Feb 18, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions common/Shaper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,28 @@ void Shaper::Pool(const std::string &input_name, int32_t strideX,
shape_map_[output_name] = outputDimen;
}

void Shaper::PoolNew(const std::string &input_name, int32_t padding_left,
int32_t padding_right, int32_t padding_top,
int32_t padding_bottom, int32_t stride_x, int32_t stride_y,
int32_t width, int32_t height,
const std::string &output_name) {
auto inputDimen = shape_map_.at(input_name);

Shape outputDimen;
if (height == -1 && width == -1) {
outputDimen = {inputDimen[0], 1, 1, inputDimen[3]};
} else {
outputDimen = {
inputDimen[0],
(inputDimen[1] - height + padding_top + padding_bottom) / stride_y +
1,
(inputDimen[2] - width + padding_left + padding_right) / stride_x +
1,
inputDimen[3]};
}
shape_map_[output_name] = outputDimen;
}

void Shaper::Softmax(const std::string &input_name,
const std::string &output_name) {
shape_map_[output_name] = shape_map_.at(input_name);
Expand Down
4 changes: 4 additions & 0 deletions common/Shaper.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,10 @@ class Shaper {
int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
int32_t paddingBottom, int32_t height, int32_t width,
const std::string &output_name);
void PoolNew(const std::string &input_name, int32_t strideX,
int32_t strideY, int32_t paddingLeft, int32_t paddingRight,
int32_t paddingTop, int32_t paddingBottom, int32_t height,
int32_t width, const std::string &output_name);
void Softmax(const std::string &input_name, const std::string &output_name);
void Relu(const std::string &input_name, const std::string &output_name);
void Concat(const std::vector<std::string> &input_names, uint32_t axis,
Expand Down
4 changes: 4 additions & 0 deletions dnnlibrary/include/flatbuffers_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@ inline uint32_t unpack_fbs(const DNN::FuseCode fbs) {
throw std::invalid_argument("Invalid fuse_code");
}

inline float unpack_fbs(const float fbs) {
return fbs;
}

inline uint32_t unpack_fbs(const uint32_t fbs) {
return fbs;
}
Expand Down
8 changes: 4 additions & 4 deletions dnnlibrary/src/DaqReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -219,19 +219,19 @@ void AddLayers(const DNN::Model &model, ModelBuilder &builder) {
break;
}
case DNN::LayerType::Add: {
ADD_LAYER_QUANT(add, OperationAdd, input1, input2, output);
ADD_LAYER_QUANT(add, Add, input1, input2, fuse, output);
break;
}
case DNN::LayerType::AddScalar: {
ADD_LAYER(add, OperationAdd, input1, input2, output);
ADD_LAYER(add_scalar, Add, input1, input2, fuse, output);
break;
}
case DNN::LayerType::Mul: {
ADD_LAYER_QUANT(mul, Mul, input1, input2, output);
ADD_LAYER_QUANT(mul, Mul, input1, input2, fuse, output);
break;
}
case DNN::LayerType::MulScalar: {
ADD_LAYER(mul, Mul, input1, input2, output);
ADD_LAYER(mul_scalar, Mul, input1, input2, fuse, output);
break;
}
case DNN::LayerType::FC: {
Expand Down
12 changes: 6 additions & 6 deletions dnnlibrary/src/ModelBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,9 @@ ModelBuilder::Index ModelBuilder::AddAvePool(
AddScalarOperands(input_indexes, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, fuse_code);
shaper_.Pool(input, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, output);
shaper_.PoolNew(input, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, output);
const OperandType operand_type = GetOperandType(
operand_types_.at(input).type, shaper_[output], output_quant_info);
const auto output_idx = AddOperation(ANEURALNETWORKS_AVERAGE_POOL_2D,
Expand All @@ -156,9 +156,9 @@ ModelBuilder::Index ModelBuilder::AddMaxPool(
AddScalarOperands(input_indexes, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, fuse_code);
shaper_.Pool(input, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, output);
shaper_.PoolNew(input, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, output);
const OperandType operand_type = GetOperandType(
operand_types_.at(input).type, shaper_[output], output_quant_info);
const auto output_idx = AddOperation(ANEURALNETWORKS_MAX_POOL_2D,
Expand Down
4 changes: 2 additions & 2 deletions ops.yml
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@
cpp_type: int32_t
needed_by_shaper: true
nnapi: AVERAGE_POOL_2D
shaper: Pool
shaper: PoolNew
fused: true
support_quant_asymm: true
api: 27
Expand Down Expand Up @@ -135,7 +135,7 @@
cpp_type: int32_t
needed_by_shaper: true
nnapi: MAX_POOL_2D
shaper: Pool
shaper: PoolNew
fused: true
support_quant_asymm: true
api: 27
Expand Down
67 changes: 60 additions & 7 deletions tools/onnx2daq/OnnxConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ void OnnxConverter::AddLayerDequantize(css &input_name, css &output_name) {
layers_.push_back(layer);
}

void OnnxConverter::AddLayerDropout(css &input_name, css &output_name) {
void OnnxConverter::SetIdentity(css &input_name, css &output_name) {
// Dropout does nothing, so the output is the same as the input
shaper_.Eltwise(input_name, output_name);
name_map_[output_name] = m(input_name);
Expand Down Expand Up @@ -430,8 +430,12 @@ void OnnxConverter::HandleInitializer() {
vector<char>(ptr, ptr + Product(shape) * sizeof(int32_t));
onnx_tensors_[name] = {name, data_vec, shape,
Tensor::DataType::INT32};
} else if (tensor.data_type() ==
ONNX_NAMESPACE::TensorProto_DataType_INT64) {
// TODO: shape of reshape layer
} else {
LOG(INFO) << "invalid " << tensor.data_type();
PNT(tensor.name(), tensor.data_type());
DNN_ASSERT(false, "");
}
operands_.push_back(name);
}
Expand Down Expand Up @@ -560,10 +564,6 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,

bool has_reshape = false;
for (const auto &node : model_proto_.graph().node()) {
if (has_reshape) {
throw std::invalid_argument(
"Reshape can only be the last layer for now");
}
NodeAttrHelper helper(node);
const auto &op = node.op_type();
LOG(INFO) << "Node " << node.name();
Expand All @@ -572,6 +572,11 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
LOG(INFO) << "Skip layer " << node.name();
continue;
}
if (has_reshape && op != "Gemm") {
throw std::invalid_argument(
"Reshape can only be the last layer or precede a gemm layer "
"for now");
}
if (op == "Conv") {
LOG(INFO) << "Start converting Conv";
const auto strides = helper.get("strides", vector<int>{1, 1});
Expand Down Expand Up @@ -713,6 +718,7 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
const auto beta = helper.get("beta", 1.0f);
AddLayerGemm(input_name, weight_name, bias_name, transA, transB,
alpha, beta, output_name);
has_reshape = false;
LOG(INFO) << "Converting Gemm completed";
} else if (op == "Softmax") {
LOG(INFO) << "Start converting Softmax";
Expand All @@ -732,11 +738,58 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
LOG(INFO) << "Converting Concat completed";
} else if (op == "Dropout") {
LOG(INFO) << "Start converting Dropout";
AddLayerDropout(node.input(0), node.output(0));
SetIdentity(node.input(0), node.output(0));
LOG(INFO) << "Converting Dropout completed";

} else if (op == "BatchNormalization") {
LOG(INFO) << "Start converting BatchNormalization";
DNN_ASSERT(node.output_size() == 1,
"Your onnx model may be in training mode, please export "
"it in test mode.")
const auto input_name = m(node.input(0));

const auto scale_name = m(node.input(1));
const auto bias_name = m(node.input(2));
const auto mean_name = m(node.input(3));
const auto var_name = m(node.input(4));

const auto scale_tensor = onnx_tensors_.at(scale_name);
const auto bias_tensor = onnx_tensors_.at(bias_name);
const auto mean_tensor = onnx_tensors_.at(mean_name);
const auto var_tensor = onnx_tensors_.at(var_name);

const auto eps = helper.get("epsilon", 1e-5f);

const auto output_name = m(node.output(0));

vector<float> a, b;
FORZ(i, scale_tensor.shape[0]) {
a.push_back(scale_tensor.float_data()[i] /
sqrt(var_tensor.float_data()[i] + eps));
b.push_back((scale_tensor.float_data()[i] *
-mean_tensor.float_data()[i]) /
sqrt(var_tensor.float_data()[i] + eps) +
bias_tensor.float_data()[i]);
}

const auto flat_tensor_a = DNN::CreateTensorDirect(
builder_, DNN::DataType::Float32, nullptr, &a,
&scale_tensor.shape, (output_name + "_imm_a").c_str());
const auto flat_tensor_b = DNN::CreateTensorDirect(
builder_, DNN::DataType::Float32, nullptr, &b,
&scale_tensor.shape, (output_name + "_imm_b").c_str());
tensors_.push_back(flat_tensor_a);
tensors_.push_back(flat_tensor_b);
AddLayerMul(input_name, output_name + "_imm_a",
output_name + "_imm_mul");
AddLayerAdd(output_name + "_imm_mul", output_name + "_imm_b",
output_name);

LOG(INFO) << "Converting BatchNormalization completed";
} else if (op == "Reshape") {
LOG(INFO) << "Start converting Reshape";
has_reshape = true;
SetIdentity(node.input(0), node.output(0));
LOG(INFO) << "Converting Reshape completed";
} else {
throw std::invalid_argument("Unsupported operator " + op);
Expand Down
2 changes: 1 addition & 1 deletion tools/onnx2daq/OnnxConverter.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ class OnnxConverter {
void AddLayerConcat(const std::vector<std::string> &inputs,
css &output_name, const int axis);
void AddLayerDequantize(css &input_name, css &output_name);
void AddLayerDropout(css &input_name, css &output_name);
void SetIdentity(css &input_name, css &output_name);

/**
* onnx: [filter_out_channel, filter_in_channel / group, height, width]
Expand Down