Skip to content

Commit

Permalink
[APU][RKNPU] Adapt to flatbuffer desc view (#4477)
Browse files Browse the repository at this point in the history
  • Loading branch information
hong19860320 committed Sep 29, 2020
1 parent adfa270 commit 7bc2446
Show file tree
Hide file tree
Showing 14 changed files with 112 additions and 77 deletions.
13 changes: 8 additions & 5 deletions lite/kernels/apu/bridges/concat_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ int ConcatConverter(void* ctx, OpLite* op, KernelBase* kernel) {
// Get input and output vars and op attributes
auto x_names = op_info->Input("X");
auto out_name = op_info->Output("Out").front();
auto out_scale_name = "Out0_scale";
auto axis = op_info->GetAttr<int>("axis");
auto num = x_names.size();

Expand All @@ -50,19 +51,21 @@ int ConcatConverter(void* ctx, OpLite* op, KernelBase* kernel) {
// Limitation:
// All input tensors of NEURON_TENSOR_QUANT8_ASYMM must
// have the same scale and zeroPoint as the output tensor
CHECK(op_info->HasOutputScale(out_name));
auto output_scale = op_info->GetOutputScale(out_name)[0];
CHECK(op_info->HasOutputScale(out_scale_name, true));
auto output_scale = op_info->GetOutputScale(out_scale_name, true)[0];

// Traverse all of input nodes
std::vector<std::shared_ptr<Node>> input_nodes;
NeuronOperandType xType;
for (auto& x_name : x_names) {
for (int i = 0; i < num; i++) {
auto x_name = x_names[i];
auto x_scale_name = "X" + paddle::lite::to_string(i) + "_scale";
auto x = scope->FindMutableTensor(x_name);
auto x_dims = x->dims();
std::shared_ptr<Node> x_node = nullptr;

CHECK(op_info->HasInputScale(x_name));
auto input_scale = op_info->GetInputScale(x_name)[0];
CHECK(op_info->HasInputScale(x_scale_name, true));
auto input_scale = op_info->GetInputScale(x_scale_name, true)[0];

// Add x tensor type
xType.type = NEURON_TENSOR_QUANT8_ASYMM;
Expand Down
17 changes: 10 additions & 7 deletions lite/kernels/apu/bridges/conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,17 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {

// Get input and output vars and op attributes
auto input_name = op_info->Input("Input").front();
auto input_scale_name = "Input0_scale";
auto input = scope->FindMutableTensor(input_name);
auto input_dims = input->dims();

auto filter_name = op_info->Input("Filter").front();
auto filter_scale_name = "Filter0_scale";
auto filter = scope->FindMutableTensor(filter_name);
auto filter_dims = filter->dims();

auto output_name = op_info->Output("Output").front();
auto output_scale_name = "Output0_scale";
auto output = scope->FindMutableTensor(output_name);
auto output_dims = output->dims();

Expand Down Expand Up @@ -97,13 +100,13 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
input_dims,
filter_dims);

CHECK(op_info->HasInputScale(input_name));
auto input_scale = op_info->GetInputScale(input_name)[0];
CHECK(op_info->HasInputScale(filter_name));
auto filter_scale = op_info->GetInputScale(filter_name);
CHECK(op_info->HasOutputScale(output_name));
auto output_scale = op_info->GetOutputScale(output_name)[0];
auto orig_output_scale = op_info->GetOutputScale(output_name)[0];
CHECK(op_info->HasInputScale(input_scale_name, true));
auto input_scale = op_info->GetInputScale(input_scale_name, true)[0];
CHECK(op_info->HasInputScale(filter_scale_name, true));
auto filter_scale = op_info->GetInputScale(filter_scale_name, true);
CHECK(op_info->HasOutputScale(output_scale_name, true));
auto output_scale = op_info->GetOutputScale(output_scale_name, true)[0];
auto orig_output_scale = op_info->GetOutputScale(output_scale_name, true)[0];

VLOG(3) << "strides.size(): " << strides.size() << " ,groups: " << groups
<< " ,dilations: " << dilations[0] << ":" << dilations[1];
Expand Down
15 changes: 9 additions & 6 deletions lite/kernels/apu/bridges/conv_transpose_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,19 @@ int ConvTransposeConverter(void *ctx, OpLite *op, KernelBase *kernel) {

// Get input, output and op attributes
auto input_name = op_info->Input("Input").front();
auto input_scale_name = "Input0_scale";
auto input = scope->FindMutableTensor(input_name);
auto input_dims = input->dims();
CHECK_EQ(input_dims.size(), 4);

auto filter_name = op_info->Input("Filter").front();
auto filter_scale_name = "Filter0_scale";
auto filter = scope->FindMutableTensor(filter_name);
auto filter_dims = filter->dims();
CHECK_EQ(filter_dims.size(), 4);

auto output_name = op_info->Output("Output").front();
auto output_scale_name = "Output0_scale";

auto strides = op_info->GetAttr<std::vector<int>>("strides");
CHECK_EQ(strides.size(), 2L);
Expand Down Expand Up @@ -118,12 +121,12 @@ int ConvTransposeConverter(void *ctx, OpLite *op, KernelBase *kernel) {
}
output_dims.push_back(filter_dims[1]);

CHECK(op_info->HasInputScale(input_name));
auto input_scale = op_info->GetInputScale(input_name)[0];
CHECK(op_info->HasInputScale(filter_name));
auto filter_scale = op_info->GetInputScale(filter_name);
CHECK(op_info->HasOutputScale(output_name));
auto output_scale = op_info->GetOutputScale(output_name)[0];
CHECK(op_info->HasInputScale(input_scale_name, true));
auto input_scale = op_info->GetInputScale(input_scale_name, true)[0];
CHECK(op_info->HasInputScale(filter_scale_name, true));
auto filter_scale = op_info->GetInputScale(filter_scale_name, true);
CHECK(op_info->HasOutputScale(output_scale_name, true));
auto output_scale = op_info->GetOutputScale(output_scale_name, true)[0];

VLOG(3) << "strides.size(): " << strides.size() << " ,groups: " << groups
<< " ,dilations: " << dilations[0] << ":" << dilations[1];
Expand Down
15 changes: 9 additions & 6 deletions lite/kernels/apu/bridges/elementwise_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,17 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {

// Get input and output vars and op attributes
auto x_name = op_info->Input("X").front();
auto x_scale_name = "X0_scale";
auto x = scope->FindTensor(x_name);
auto x_dims = x->dims();

auto y_name = op_info->Input("Y").front();
auto y_scale_name = "Y0_scale";
auto y = scope->FindTensor(y_name);
auto y_dims = y->dims();

auto out_name = op_info->Output("Out").front();
auto out_scale_name = "Out0_scale";
auto out = scope->FindTensor(out_name);
auto out_dims = out->dims();

Expand Down Expand Up @@ -88,12 +91,12 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {
} // End of if
VLOG(3) << "x_name" << x_name;

CHECK(op_info->HasInputScale(x_name));
auto x_scale = op_info->GetInputScale(x_name)[0];
CHECK(op_info->HasInputScale(y_name));
auto y_scale = op_info->GetInputScale(y_name)[0];
CHECK(op_info->HasOutputScale(out_name));
auto out_scale = op_info->GetOutputScale(out_name)[0];
CHECK(op_info->HasInputScale(x_scale_name, true));
auto x_scale = op_info->GetInputScale(x_scale_name, true)[0];
CHECK(op_info->HasInputScale(y_scale_name, true));
auto y_scale = op_info->GetInputScale(y_scale_name, true)[0];
CHECK(op_info->HasOutputScale(out_scale_name, true));
auto out_scale = op_info->GetOutputScale(out_scale_name, true)[0];

// Add x tensor type
NeuronOperandType xType;
Expand Down
15 changes: 9 additions & 6 deletions lite/kernels/apu/bridges/fc_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,17 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) {

// Get input and output vars and op attributes
auto input_name = op_info->Input("Input").front();
auto input_scale_name = "Input0_scale";
auto input = scope->FindMutableTensor(input_name);
auto input_dims = input->dims();
CHECK_GE(input_dims.size(), 2UL);
auto w_name = op_info->Input("W").front();
auto w_scale_name = "W0_scale";
auto w = scope->FindMutableTensor(w_name);
auto w_dims = w->dims();
CHECK_EQ(w_dims.size(), 2UL);
auto out_name = op_info->Output("Out").front();
auto out_scale_name = "Out0_scale";
auto out = scope->FindMutableTensor(out_name);
auto out_dims = out->dims();

Expand All @@ -56,12 +59,12 @@ int FCConverter(void* ctx, OpLite* op, KernelBase* kernel) {
<< " out_dims: " << out_dims << " m: " << m << " k: " << k
<< " n: " << n;

CHECK(op_info->HasInputScale(input_name));
auto input_scale = op_info->GetInputScale(input_name)[0];
CHECK(op_info->HasInputScale(w_name));
auto w_scale = op_info->GetInputScale(w_name);
CHECK(op_info->HasOutputScale(out_name));
auto out_scale = op_info->GetOutputScale(out_name)[0];
CHECK(op_info->HasInputScale(input_scale_name, true));
auto input_scale = op_info->GetInputScale(input_scale_name, true)[0];
CHECK(op_info->HasInputScale(w_scale_name, true));
auto w_scale = op_info->GetInputScale(w_scale_name, true);
CHECK(op_info->HasOutputScale(out_scale_name, true));
auto out_scale = op_info->GetOutputScale(out_scale_name, true)[0];

// Add input tensor type
NeuronOperandType inType;
Expand Down
10 changes: 6 additions & 4 deletions lite/kernels/apu/bridges/pool_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,11 @@ int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) {

// Get input and output vars and op attributes
auto x_name = op_info->Input("X").front();
auto x_scale_name = "X0_scale";
auto x = scope->FindMutableTensor(x_name);
auto x_dims = x->dims();
auto out_name = op_info->Output("Out").front();
auto out_scale_name = "Out0_scale";
auto out = scope->FindMutableTensor(out_name);
auto out_dims = out->dims();
auto pooling_type = op_info->GetAttr<std::string>("pooling_type");
Expand Down Expand Up @@ -90,10 +92,10 @@ int PoolConverter(void* ctx, OpLite* op, KernelBase* kernel) {
ksize);

// Add x tensor type
CHECK(op_info->HasInputScale(x_name));
auto x_scale = op_info->GetInputScale(x_name)[0];
CHECK(op_info->HasOutputScale(out_name));
auto out_scale = op_info->GetOutputScale(out_name)[0];
CHECK(op_info->HasInputScale(x_scale_name, true));
auto x_scale = op_info->GetInputScale(x_scale_name, true)[0];
CHECK(op_info->HasOutputScale(out_scale_name, true));
auto out_scale = op_info->GetOutputScale(out_scale_name, true)[0];

NeuronOperandType xType;
xType.type = NEURON_TENSOR_QUANT8_ASYMM;
Expand Down
10 changes: 6 additions & 4 deletions lite/kernels/apu/bridges/softmax_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,23 @@ int SoftmaxConverter(void* ctx, OpLite* op, KernelBase* kernel) {
// Get input and output vars and op attributes
auto x_name = op_info->Input("X").front();
auto x = scope->FindMutableTensor(x_name);
auto x_scale_name = "X0_scale";
auto x_dims = x->dims();
CHECK_GE(x_dims.size(), 2UL);
auto x_rank = x_dims.size();
auto out_name = op_info->Output("Out").front();
auto out_scale_name = "Out0_scale";

// Check output shape
auto axis = op_info->GetAttr<int>("axis");
if (axis < 0) {
axis += x_rank;
}

CHECK(op_info->HasInputScale(x_name));
auto input_scale = op_info->GetInputScale(x_name)[0];
CHECK(op_info->HasOutputScale(out_name));
auto out_scale = op_info->GetOutputScale(out_name)[0];
CHECK(op_info->HasInputScale(x_scale_name, true));
auto input_scale = op_info->GetInputScale(x_scale_name, true)[0];
CHECK(op_info->HasOutputScale(out_scale_name, true));
auto out_scale = op_info->GetOutputScale(out_scale_name, true)[0];

// Check output scale
NeuronOperandType xType;
Expand Down
10 changes: 6 additions & 4 deletions lite/kernels/rknpu/bridges/batch_norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {

// Get input and output vars and op attributes
auto x_name = op_info->Input("X").front();
auto x_scale_name = "X0_scale";
auto x = scope->FindMutableTensor(x_name);
auto x_dims = x->dims();
auto scale_name = op_info->Input("Scale").front();
Expand All @@ -43,6 +44,7 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto variance_name = op_info->Input("Variance").front();
auto variance = scope->FindMutableTensor(variance_name);
auto y_name = op_info->Output("Y").front();
auto y_scale_name = "Y0_scale";
auto y = scope->FindMutableTensor(y_name);
float momentum = op_info->GetAttr<float>("momentum");
float epsilon = op_info->GetAttr<float>("epsilon");
Expand All @@ -59,11 +61,11 @@ int BatchNormConverter(void* ctx, OpLite* op, KernelBase* kernel) {

if (op_info->HasAttr("enable_int8")) {
enable_int8 = op_info->GetAttr<bool>("enable_int8");
CHECK(op_info->HasInputScale(x_name));
input_scale = op_info->GetInputScale(x_name)[0];
CHECK(op_info->HasInputScale(x_scale_name, true));
input_scale = op_info->GetInputScale(x_scale_name, true)[0];
bit_length = op_info->GetAttr<int>("bit_length");
CHECK(op_info->HasOutputScale(y_name));
output_scale = op_info->GetOutputScale(y_name)[0];
CHECK(op_info->HasOutputScale(y_scale_name, true));
output_scale = op_info->GetOutputScale(y_scale_name, true)[0];

if (enable_int8) {
precision = PRECISION(kInt8);
Expand Down
15 changes: 8 additions & 7 deletions lite/kernels/rknpu/bridges/concat_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ int ConcatConverter(void* ctx, OpLite* op, KernelBase* kernel) {
auto x_names = op_info->Input("X");
auto out_name = op_info->Output("Out").front();
auto output = scope->FindMutableTensor(out_name);
auto out_scale_name = "Out0_scale";

auto axis = op_info->GetAttr<int>("axis");
auto num = x_names.size();
Expand All @@ -49,8 +50,8 @@ int ConcatConverter(void* ctx, OpLite* op, KernelBase* kernel) {
if (op_info->HasAttr("enable_int8")) {
enable_int8 = op_info->GetAttr<bool>("enable_int8");
bit_length = op_info->GetAttr<int>("bit_length");
CHECK(op_info->HasOutputScale(out_name));
output_scale = op_info->GetOutputScale(out_name)[0];
CHECK(op_info->HasOutputScale(out_scale_name, true));
output_scale = op_info->GetOutputScale(out_scale_name, true)[0];

if (enable_int8) {
precision = PRECISION(kInt8);
Expand All @@ -62,8 +63,9 @@ int ConcatConverter(void* ctx, OpLite* op, KernelBase* kernel) {
std::vector<std::shared_ptr<rk::nn::Tensor>> inputs;
std::vector<std::shared_ptr<rk::nn::Tensor>> outputs;

int idx = 1;
for (auto& x_name : x_names) {
for (int i = 0; i < num; i++) {
auto x_name = x_names[i];
auto x_scale_name = "X" + paddle::lite::to_string(i) + "_scale";
auto x = scope->FindMutableTensor(x_name);
auto x_dims = x->dims();
std::shared_ptr<Node> x_node = nullptr;
Expand All @@ -75,8 +77,8 @@ int ConcatConverter(void* ctx, OpLite* op, KernelBase* kernel) {
qnt.enable_int8 = enable_int8;

if (enable_int8) {
CHECK(op_info->HasInputScale(x_name));
input_scale = op_info->GetInputScale(x_name)[0];
CHECK(op_info->HasInputScale(x_scale_name, true));
input_scale = op_info->GetInputScale(x_scale_name, true)[0];
qnt.quant_bits = bit_length;
qnt.scale.push_back(input_scale);
x->mutable_data<int8_t>();
Expand All @@ -85,7 +87,6 @@ int ConcatConverter(void* ctx, OpLite* op, KernelBase* kernel) {
}

inputs.push_back(x_node->data());
idx++;
}

std::shared_ptr<Node> output_node = nullptr;
Expand Down
15 changes: 9 additions & 6 deletions lite/kernels/rknpu/bridges/conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,15 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {

// Get input and output vars and op attributes
auto input_name = op_info->Input("Input").front();
auto input_scale_name = "Input0_scale";
auto input = scope->FindMutableTensor(input_name);
auto input_dims = input->dims();
auto filter_name = op_info->Input("Filter").front();
auto filter_scale_name = "Filter0_scale";
auto filter = scope->FindMutableTensor(filter_name);
auto filter_dims = filter->dims();
auto output_name = op_info->Output("Output").front();
auto output_scale_name = "Output0_scale";
auto output = scope->FindMutableTensor(output_name);
auto output_dims = output->dims();
auto bs = input_dims[0];
Expand All @@ -59,8 +62,8 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CHECK_EQ(dilations.size(), 2L);
// Check depthwise mode
bool is_depthwise_mode = (ic == groups && oc == groups && groups != 1);
CHECK(op_info->HasInputScale(filter_name));
auto weight_scale = op_info->GetInputScale(filter_name);
CHECK(op_info->HasInputScale(filter_scale_name, true));
auto weight_scale = op_info->GetInputScale(filter_scale_name, true);

// for quantization
bool enable_int8 = false;
Expand All @@ -72,11 +75,11 @@ int ConvConverter(void* ctx, OpLite* op, KernelBase* kernel) {

if (op_info->HasAttr("enable_int8")) {
enable_int8 = op_info->GetAttr<bool>("enable_int8");
CHECK(op_info->HasInputScale(input_name));
input_scale = op_info->GetInputScale(input_name)[0];
CHECK(op_info->HasInputScale(input_scale_name, true));
input_scale = op_info->GetInputScale(input_scale_name, true)[0];
bit_length = op_info->GetAttr<int>("bit_length");
CHECK(op_info->HasOutputScale(output_name));
output_scale = op_info->GetOutputScale(output_name)[0];
CHECK(op_info->HasOutputScale(output_scale_name, true));
output_scale = op_info->GetOutputScale(output_scale_name, true)[0];

if (enable_int8) {
precision = PRECISION(kInt8);
Expand Down
11 changes: 7 additions & 4 deletions lite/kernels/rknpu/bridges/elementwise_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,15 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {

// Get input and output vars and op attributes
auto x_name = op_info->Input("X").front();
auto x_scale_name = "X0_scale";
auto x = scope->FindMutableTensor(x_name);
auto x_dims = x->dims();
auto y_name = op_info->Input("Y").front();
auto y_scale_name = "Y0_scale";
auto y = scope->FindMutableTensor(y_name);
auto y_dims = y->dims();
auto out_name = op_info->Output("Out").front();
auto out_scale_name = "Out0_scale";
auto out_type = kernel->GetOutputDeclType("Out");
auto output = scope->FindMutableTensor(out_name);
auto axis = op_info->GetAttr<int>("axis");
Expand All @@ -76,11 +79,11 @@ int ElementwiseConverter(void* ctx, OpLite* op, KernelBase* kernel) {

if (op_info->HasAttr("enable_int8")) {
enable_int8 = op_info->GetAttr<bool>("enable_int8");
CHECK(op_info->HasInputScale(x_name));
input_scale = op_info->GetInputScale(x_name)[0];
CHECK(op_info->HasInputScale(x_scale_name, true));
input_scale = op_info->GetInputScale(x_scale_name, true)[0];
bit_length = op_info->GetAttr<int>("bit_length");
CHECK(op_info->HasOutputScale(out_name));
output_scale = op_info->GetOutputScale(out_name)[0];
CHECK(op_info->HasOutputScale(out_scale_name, true));
output_scale = op_info->GetOutputScale(out_scale_name, true)[0];

if (enable_int8) {
precision = PRECISION(kInt8);
Expand Down
Loading

0 comments on commit 7bc2446

Please sign in to comment.