Skip to content

Commit

Permalink
Support int8 Deconv (#1347)
Browse files Browse the repository at this point in the history
  • Loading branch information
Zhiwei35 committed Oct 14, 2022
1 parent 81a0e45 commit 5ca9b4c
Show file tree
Hide file tree
Showing 19 changed files with 1,012 additions and 176 deletions.
15 changes: 10 additions & 5 deletions neural_compressor/adaptor/inteltensorflow.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@

ops: &common_ops
int8: ['Conv2D', 'Conv3D', 'DepthwiseConv2dNative', 'FusedBatchNorm', 'FusedBatchNormV2','FusedBatchNormV3',
'MatMul', 'BatchMatMul', 'BatchMatMulV2', 'ConcatV2', 'MaxPool', 'MaxPool3D', 'AvgPool', '_MklFusedInstanceNorm']
uint8: ['Conv2D', 'Conv3D', 'DepthwiseConv2dNative', 'MatMul', 'BatchMatMul', 'BatchMatMulV2', 'ConcatV2', 'MaxPool', 'MaxPool3D', 'AvgPool']
'MatMul', 'BatchMatMul', 'BatchMatMulV2', 'ConcatV2', 'MaxPool', 'MaxPool3D', 'AvgPool', '_MklFusedInstanceNorm',
'Conv2DBackpropInput', 'Conv3DBackpropInputV2']
uint8: ['Conv2D', 'Conv3D', 'DepthwiseConv2dNative', 'MatMul', 'BatchMatMul', 'BatchMatMulV2', 'ConcatV2',
'MaxPool', 'MaxPool3D', 'AvgPool', 'Conv2DBackpropInput', 'Conv3DBackpropInputV2']
bf16: ["Conv2D", "Conv2DBackpropFilter", "Conv2DBackpropInput", "Conv3D", "Conv3DBackpropFilterV2", "Conv3DBackpropInputV2",
"DepthwiseConv2dNative", "DepthwiseConv2dNativeBackpropFilter", "DepthwiseConv2dNativeBackpropInput", "GRUBlockCell",
"AUGRUBlockCell", "MklGRU", "MklAUGRU", "MatMul", "BatchMatMul", "BatchMatMulV2", "Einsum", # allow_list
Expand Down Expand Up @@ -296,7 +298,9 @@
'Dequantize + DepthwiseConv2dNative + BiasAdd + QuantizeV2',
'Dequantize + FusedBatchNormV3 + Relu + QuantizeV2',
'Dequantize + _MklFusedInstanceNorm + Relu + QuantizeV2',
'Dequantize + _MklFusedInstanceNorm + LeakyRelu + QuantizeV2'
'Dequantize + _MklFusedInstanceNorm + LeakyRelu + QuantizeV2',
'Dequantize + Conv2DBackpropInput + BiasAdd + QuantizeV2',
'Dequantize + Conv3DBackpropInputV2 + BiasAdd + QuantizeV2'
]
uint8: [
'Dequantize + Conv2D + BiasAdd + AddN + Relu + QuantizeV2',
Expand Down Expand Up @@ -370,8 +374,9 @@
'Dequantize + Conv3D + Add + Relu + QuantizeV2',
'Dequantize + Conv3D + Add + Relu6 + QuantizeV2',
'Dequantize + Conv3D + Add + Eelu + QuantizeV2',
'Dequantize + Conv3D + Add + LeakyRelu + QuantizeV2'

'Dequantize + Conv3D + Add + LeakyRelu + QuantizeV2',
'Dequantize + Conv2DBackpropInput + BiasAdd + QuantizeV2',
'Dequantize + Conv3DBackpropInputV2 + BiasAdd + QuantizeV2'
]

grappler_optimization: &common_grappler_optimization
Expand Down
13 changes: 10 additions & 3 deletions neural_compressor/adaptor/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ class TensorFlowAdaptor(Adaptor):
"MatMul": "matmul",
"BatchMatMul": "matmul",
"BatchMatMulV2": "matmul",
"Pad": "pad"
"Pad": "pad",
"Conv2DBackpropInput": "deconv2d",
"Conv3DBackpropInputV2": "deconv3d"
}
def __init__(self, framework_specific_info):
super().__init__(framework_specific_info)
Expand Down Expand Up @@ -607,11 +609,12 @@ def _dump_model_op_stats(self, model_graphdef):
fp32_op_list=list(set(fp32_op_list_uint8).union(set(fp32_op_list_int8)))


int8_op_prefix_list = ['QuantizedConv2D', '_QuantizedConv3D', 'QuantizedDepthwise',
int8_op_prefix_list = ['QuantizedConv2D', '_FusedQuantizedConv3D', 'QuantizedDepthwise',
'QuantizedMaxPool', 'QuantizedAvgPool',
'QuantizedConcatV2', 'QuantizedMatMul',
'_QuantizedFusedBatchNorm', '_QuantizedMatMul',
'_QuantizedBatchMatMul', '_QuantizedFusedInstanceNorm']
'_QuantizedBatchMatMul', '_QuantizedFusedInstanceNorm',
'_FusedQuantizedDeconv2D', '_FusedQuantizedDeconv3D']
from tensorflow.python.framework import dtypes

res = {}
Expand All @@ -636,6 +639,10 @@ def _dump_model_op_stats(self, model_graphdef):
origin_op_type = 'DepthwiseConv2dNative'
if origin_op_type == 'BatchMatMul':
origin_op_type = 'BatchMatMulV2'
if origin_op_type == 'Deconv2D':
origin_op_type = 'Conv2DBackpropInput'
if origin_op_type == 'Deconv3D':
origin_op_type = 'Conv3DBackpropInputV2'
res[origin_op_type]['INT8'] += 1

if i.op in fp32_op_list:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,12 @@ def do_transformation(self):
if top_node.op == 'ConcatV2':
for i in range(top_node.attr['N'].i):
insert_node_pairs.append([top_node.input[i], self.post_node_name])
elif top_node.op == 'BatchMatMulV2':
elif top_node.op in ('BatchMatMul', 'BatchMatMulV2'):
insert_node_pairs.append([top_node.input[0], self.post_node_name])
if graph_info[top_node.input[1]].node.op != 'Const':
insert_node_pairs.append([top_node.input[1], self.post_node_name])
insert_node_pairs.append([top_node.input[1], self.post_node_name])
elif top_node.op in ('Conv2DBackpropInput', 'Conv3DBackpropInputV2'):
insert_node_pairs.append([top_node.input[2], self.post_node_name])
else:
refresh_pre_node_name = graph_info[self.pre_node_name].node.input[0]
# Check the Conv2D could be fused with previous Pad or not.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,14 @@
from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper as Helper

class FuseConvRedundantDequantizeTransformer(GraphRewriterBase):
"""Fuse _QuantizedConv with the successor Dequantize Op.
_QuantizedConv2D only supports quantizedtype out_type currently
"""Fuse _QuantizedConv/_QuantizedDeConv with the successor Dequantize Op.
"""
fuse_patterns = [[
"_QuantizedConv3D",
"_QuantizedDepthwiseConv2D"
"_FusedQuantizedConv3D",
"_FusedQuantizedConv2D",
"_FusedQuantizedDepthwiseConv2D",
"_FusedQuantizedDeconv2D",
"_FusedQuantizedDeconv3D"
], ['Dequantize']]

def __init__(self, model, device='cpu'):
Expand Down

0 comments on commit 5ca9b4c

Please sign in to comment.