Skip to content

Commit

Permalink
Fix TF2.10.0 UT fail (#1188)
Browse files Browse the repository at this point in the history
  • Loading branch information
lvliang-intel committed Sep 8, 2022
1 parent d625648 commit 8130e7f
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 17 deletions.
4 changes: 2 additions & 2 deletions neural_compressor/adaptor/tf_utils/graph_converter.py
Expand Up @@ -68,7 +68,7 @@
from .graph_util import GraphRewriterHelper as Helper


TF_SUPPORTED_MAX_VERSION = '2.9.1'
TF_SUPPORTED_MAX_VERSION = '2.10.0'
TF_SUPPORTED_MIN_VERSION = '1.14.0'

logger = logging.getLogger()
Expand Down Expand Up @@ -247,7 +247,7 @@ def _check_tf_version(self):
if version1_gte_version2(tf.version.VERSION, '2.9.0'):
is_supported_version = True

if tf.version.VERSION == '1.15.0-up3':
if version1_eq_version2(tf.version.VERSION, '1.15.0-up3'):
is_supported_version = True

except Exception as e:
Expand Down
Expand Up @@ -44,9 +44,9 @@
from .graph_rewriter.int8.post_quantized_op_cse import PostCseOptimizer
from .graph_rewriter.int8.meta_op_optimizer import MetaInfoChangingMemOpOptimizer
from .graph_rewriter.int8.rnn_convert import QuantizedRNNConverter
from .util import version1_gte_version2,version1_gt_version2,version1_eq_version2


TF_SUPPORTED_MAX_VERSION = '2.9.1'
TF_SUPPORTED_MAX_VERSION = '2.10.0'
TF_SUPPORTED_MIN_VERSION = '1.14.0'

logger = logging.getLogger()
Expand Down Expand Up @@ -107,29 +107,31 @@ def _check_tf_version(self):
if IsMklEnabled() and (TF_SUPPORTED_MIN_VERSION <= tf.version.VERSION):
is_supported_version = True

if tf.version.VERSION >= '2.6.0' and os.getenv('TF_ENABLE_ONEDNN_OPTS') == '1':
if version1_gte_version2(tf.version.VERSION, '2.6.0') and os.getenv('TF_ENABLE_ONEDNN_OPTS') == '1':
is_supported_version = True

if version1_gte_version2(tf.version.VERSION, '2.9.0'):
is_supported_version = True

if tf.version.VERSION >= '2.9.0':
if version1_eq_version2(tf.version.VERSION, '1.15.0-up3'):
is_supported_version = True

except Exception as e:
raise ValueError(e)
finally:# pragma: no cover
if tf.version.VERSION > TF_SUPPORTED_MAX_VERSION:
if version1_gt_version2(tf.version.VERSION, TF_SUPPORTED_MAX_VERSION):
logger.warning(
str('Please note the {} version of Intel® Optimizations for '
'TensorFlow is not fully verified! '
'Suggest to use the versions '
'between {} and {} if meet problem.').format(tf.version.VERSION,
TF_SUPPORTED_MIN_VERSION,
TF_SUPPORTED_MAX_VERSION))
if tf.version.VERSION == '2.5.0' and os.getenv('TF_ENABLE_MKL_NATIVE_FORMAT') != '0':
if version1_eq_version2(tf.version.VERSION, '2.5.0') and os.getenv('TF_ENABLE_MKL_NATIVE_FORMAT') != '0':
logger.fatal("Please set environment variable TF_ENABLE_MKL_NATIVE_FORMAT=0 "
"when TensorFlow 2.5.0 installed.")

if tf.version.VERSION >= '2.6.0' and tf.version.VERSION < '2.9.0' \
and os.getenv('TF_ENABLE_ONEDNN_OPTS') != '1':
if version1_gte_version2(tf.version.VERSION, '2.6.0') and os.getenv('TF_ENABLE_ONEDNN_OPTS') != '1':
logger.fatal("Please set environment variable TF_ENABLE_ONEDNN_OPTS=1 "
"when TensorFlow >= 2.6.0 and < 2.9.0 installed.")

Expand Down
Expand Up @@ -39,11 +39,8 @@ def do_transformation(self):
valid_ops = ('BiasAdd', 'Add', 'AddV2', 'AddN')
target_nodes = g.query_fusion_pattern_nodes([['MatMul', 'Conv2D'],])
for i in target_nodes:
# only apply this pass for tensorflow release 2.9.1 and lower version for
# old quantization API.
# only apply this pass for tensorflow old quantization API, pre_optimize does this check
# use conv+dummy_biasadd+relu because TF do not support conv+relu now.
if version1_gt_version2(tf.version.VERSION, '2.9.1'):
continue
if i[0] in self.outputs:
continue
next_node_names = graph_info[i[0]].outputs
Expand Down
Expand Up @@ -627,7 +627,7 @@ def _add_quantize_down_nodes(self,
# Add a RequantizationRange node for finding the min and max values.
requant_range_node = helper.create_node(
"RequantizationRangePerChannel"
if self.per_channel else "RequantizationRange",
if self.per_channel or original_node.op == 'DepthwiseConv2dNative' else "RequantizationRange",
original_node.name + "_eightbit_requant_range", quantized_outputs)

if self.per_channel:
Expand Down Expand Up @@ -661,7 +661,8 @@ def _add_quantize_down_nodes(self,
]

requantize_node = helper.create_node(
"RequantizePerChannel" if self.per_channel else "Requantize",
"RequantizePerChannel" if self.per_channel or original_node.op == 'DepthwiseConv2dNative' \
else "Requantize",
original_node.name + "_eightbit_requantize",
quantized_outputs + min_max_inputs)
if self.per_channel:
Expand Down
4 changes: 3 additions & 1 deletion test/adaptor/tensorflow_adaptor/test_tensorboard.py
Expand Up @@ -10,6 +10,7 @@
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import dtypes
from neural_compressor.adaptor.tf_utils.util import version1_gt_version2
tf.compat.v1.disable_eager_execution()

def build_fake_yaml():
Expand Down Expand Up @@ -195,7 +196,8 @@ def tearDownClass(self):
shutil.rmtree("saved", ignore_errors=True)
shutil.rmtree("runs/", ignore_errors=True)

@unittest.skipIf(tf.version.VERSION > '2.5.0', " Skip test_bf16_fallback case for tf 2.6.0 and above.")
@unittest.skipIf(version1_gt_version2(tf.version.VERSION, '2.5.0'), \
"Skip test_bf16_fallback case for tf 2.6.0 and above.")
def test_run_basic_one_trial(self):
from neural_compressor.experimental import Quantization, common

Expand Down

0 comments on commit 8130e7f

Please sign in to comment.