Skip to content

Commit

Permalink
Remove inteltensorflow adaptor (#1313)
Browse files Browse the repository at this point in the history
  • Loading branch information
lvliang-intel committed Oct 17, 2022
1 parent 5ca9b4c commit a2fcb29
Show file tree
Hide file tree
Showing 41 changed files with 881 additions and 1,324 deletions.
Expand Up @@ -119,7 +119,7 @@ In examples directory, there is a transformer_lt_mlperf.yaml. We could remove mo
```yaml
model:
name: transformer_lt_mlperf
framework: inteltensorflow
framework: tensorflow
inputs: input_tokens
outputs: model/Transformer/strided_slice_15

Expand Down
Expand Up @@ -17,7 +17,7 @@ version: 1.0

model:
name: transformer_lt_mlperf
framework: inteltensorflow
framework: tensorflow
inputs: input_tokens
outputs: model/Transformer/strided_slice_15

Expand Down
Expand Up @@ -15,7 +15,7 @@

model: # mandatory. used to specify model specific information.
name: oob_models
framework: inteltensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension.
framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension.
inputs: input
outputs: output

Expand Down
773 changes: 0 additions & 773 deletions neural_compressor/adaptor/inteltensorflow.yaml

This file was deleted.

24 changes: 5 additions & 19 deletions neural_compressor/adaptor/tensorflow.py
Expand Up @@ -76,16 +76,18 @@ def __init__(self, framework_specific_info):
self.query_handler = TensorflowQuery(local_config_file=os.path.join(
os.path.dirname(__file__), cfg_yaml_name), performance_only=self.performance_only)
self.itex_mode = cfg_yaml_name == 'tensorflow_itex.yaml'
self.qdq_enabled = cfg_yaml_name == 'inteltensorflow.yaml' or \
cfg_yaml_name == 'tensorflow_itex.yaml'

from pkg_resources import parse_version
import tensorflow as tf
self.new_api = True if parse_version(tf.version.VERSION) == parse_version('2.11.0202242') else False
self.qdq_enabled = cfg_yaml_name == 'tensorflow_itex.yaml' or self.new_api
self.op_wise_sequences = self.query_handler.get_eightbit_patterns(self.qdq_enabled)
self.optimization = self.query_handler.get_grappler_optimization_cfg()

self.fp32_results = []
self.fp32_preds_as_label = False
self.benchmark = (GLOBAL_STATE.STATE == MODE.BENCHMARK)
self.callbacks = []
self.new_api = False

def log_histogram(self, writer, tag, values, step=0, bins=1000):
import tensorflow as tf
Expand Down Expand Up @@ -1438,9 +1440,6 @@ def diagnosis_helper(self, fp32_model, quan_model, tune_cfg, save_path):
class Tensorflow_ITEXAdaptor(TensorFlowAdaptor):
def __init__(self, framework_specific_info):
super().__init__(framework_specific_info)
from pkg_resources import parse_version
import tensorflow as tf
self.new_api = True if parse_version(tf.version.VERSION) >= parse_version('2.10.0') else False

@dump_elapsed_time("Pass quantize model")
def quantize(self, tune_cfg, model, data_loader, q_func=None):
Expand Down Expand Up @@ -1789,16 +1788,3 @@ def _generate_pattern(data):
final_out.append(_generate_pattern(similar_sequences))

return final_out

@adaptor_registry
class IntelTensorFlowAdaptor(TensorFlowAdaptor):
def __init__(self, framework_specific_info):
super().__init__(framework_specific_info)
from pkg_resources import parse_version
import tensorflow as tf
self.new_api = True if parse_version(tf.version.VERSION) >= parse_version('2.10.0') else False
# not enable qdq mode for old api
if not self.new_api:
self.qdq_enabled = False
self.op_wise_sequences = self.query_handler.get_eightbit_patterns()

390 changes: 383 additions & 7 deletions neural_compressor/adaptor/tensorflow.yaml

Large diffs are not rendered by default.

39 changes: 19 additions & 20 deletions neural_compressor/adaptor/tf_utils/graph_converter.py
Expand Up @@ -24,24 +24,20 @@

from collections import OrderedDict, UserDict
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import gfile
from neural_compressor.utils.utility import get_all_fp32_data
from neural_compressor.utils.utility import get_tensor_histogram
from neural_compressor.utils.utility import combine_histogram
from neural_compressor.utils.utility import CaptureOutputToFile
from neural_compressor.utils.utility import str2array
from neural_compressor.utils.utility import Dequantize, DequantizeWeight
from neural_compressor.conf.dotdict import deep_get
from neural_compressor.experimental.common import Model
from .transform_graph.insert_logging import InsertLogging
from .transform_graph.rerange_quantized_concat import RerangeQuantizedConcat
from .transform_graph.bias_correction import BiasCorrection
from .util import iterator_sess_run,version1_gt_version2,version1_eq_version2,version1_lt_version2
from .util import version1_gte_version2,version1_lte_version2
from .util import iterator_sess_run,version1_gt_version2,version1_eq_version2
from .util import version1_gte_version2,version1_lte_version2,version1_lt_version2
from .quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel
from .quantize_graph_common import QuantizeGraphHelper
from .quantize_graph.quantize_graph_conv import FuseNodeStartWithConv2d
from .quantize_graph.qdq.optimize_qdq import OptimizeQDQGraph

from .graph_util import GraphAnalyzer
Expand Down Expand Up @@ -236,6 +232,7 @@ def check_shape(tensor, data):

def _check_tf_version(self):
is_supported_version = False
is_sprbase_version = False
try:
from tensorflow import python
if (hasattr(python, "pywrap_tensorflow")
Expand All @@ -253,35 +250,37 @@ def _check_tf_version(self):

if version1_gte_version2(tf.version.VERSION, '2.9.0'):
is_supported_version = True

if version1_eq_version2(tf.version.VERSION, '1.15.0-up3'):
is_supported_version = True


if version1_eq_version2(tf.version.VERSION, '2.11.0202242'):
is_supported_version = True
is_sprbase_version = True

except Exception as e:
raise ValueError(e)
finally:
if version1_gt_version2(tf.version.VERSION, TF_SUPPORTED_MAX_VERSION):
if version1_gt_version2(tf.version.VERSION, TF_SUPPORTED_MAX_VERSION) and not is_sprbase_version:
logger.warning(
str('Please note the {} version of Intel® Optimizations for '
'TensorFlow is not fully verified! '
'Suggest to use the versions '
'between {} and {} if meet problem.').format(tf.version.VERSION,
TF_SUPPORTED_MIN_VERSION,
TF_SUPPORTED_MAX_VERSION))
str('Please note the {} version of TensorFlow is not fully verified! '
'Suggest to use the versions between {} and {} if meet problem.')
.format(tf.version.VERSION, TF_SUPPORTED_MIN_VERSION, TF_SUPPORTED_MAX_VERSION))

if version1_eq_version2(tf.version.VERSION, '2.5.0') and os.getenv('TF_ENABLE_MKL_NATIVE_FORMAT') != '0':
logger.fatal("Please set environment variable TF_ENABLE_MKL_NATIVE_FORMAT=0 "
"when TensorFlow 2.5.0 installed.")

if version1_gte_version2(tf.version.VERSION, '2.6.0') and os.getenv('TF_ENABLE_ONEDNN_OPTS') != '1':
if version1_gte_version2(tf.version.VERSION, '2.6.0') and \
version1_lt_version2(tf.version.VERSION, '2.9.0') and \
os.getenv('TF_ENABLE_ONEDNN_OPTS') != '1':
logger.fatal("Please set environment variable TF_ENABLE_ONEDNN_OPTS=1 "
"when TensorFlow >= 2.6.0 and < 2.9.0 installed.")

if not is_supported_version:
raise ValueError(
str('Please install Intel® Optimizations for TensorFlow '
'or MKL enabled TensorFlow from source code '
'within version >={} and <={}.').format(TF_SUPPORTED_MIN_VERSION,
TF_SUPPORTED_MAX_VERSION))
str('Please install TensorFlow within version >={} and <={}.')
.format(TF_SUPPORTED_MIN_VERSION, TF_SUPPORTED_MAX_VERSION))

def _check_args(self):
if self.model.workspace_path and not os.path.isdir(self.model.workspace_path) \
Expand Down

0 comments on commit a2fcb29

Please sign in to comment.