From b9f38bc721b232f49f265a48f3b51ce911c19d9d Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Sat, 3 Dec 2022 20:21:23 +0800 Subject: [PATCH 01/14] INC New API TF oob, wide_deep_large_ds, 3dunet-mlperf examples Signed-off-by: Lv, Liang1 --- .../oob_models/quantization/ptq/README.md | 26 ++++- .../oob_models/quantization/ptq/config.yaml | 38 -------- .../quantization/ptq/config_itex.yaml | 38 -------- .../quantization/ptq/dataloaders.py | 18 ++++ .../quantization/ptq/find_outputs.py | 20 +++- .../quantization/ptq/model_detail.py | 18 ++++ .../oob_models/quantization/ptq/run_tuning.sh | 2 - .../quantization/ptq/tf_benchmark.py | 94 +++++++++++++------ .../ptq/tf_savemodel_benchmark.py | 24 ++++- .../oob_models/quantization/ptq/utils.py | 28 ++++-- .../quantization/ptq/README.md | 27 +++++- .../quantization/ptq/inference.py | 53 +++++++++-- .../quantization/ptq/run_benchmark.sh | 8 +- .../quantization/ptq/run_tuning.sh | 4 - .../quantization/ptq/wide_deep_large_ds.yaml | 42 --------- .../ptq/wide_deep_large_ds_itex.yaml | 42 --------- .../quantization/ptq/3dunet-mlperf.yaml | 34 ------- .../quantization/ptq/3dunet-mlperf_itex.yaml | 34 ------- .../3dunet-mlperf/quantization/ptq/README.md | 26 ++++- .../quantization/ptq/run_accuracy.py | 50 ++++++++-- .../quantization/ptq/run_benchmark.sh | 3 - .../quantization/ptq/run_tuning.sh | 5 - 22 files changed, 318 insertions(+), 316 deletions(-) delete mode 100644 examples/tensorflow/oob_models/quantization/ptq/config.yaml delete mode 100644 examples/tensorflow/oob_models/quantization/ptq/config_itex.yaml delete mode 100644 examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/wide_deep_large_ds.yaml delete mode 100644 examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/wide_deep_large_ds_itex.yaml delete mode 100644 examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/3dunet-mlperf.yaml delete mode 100644 examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/3dunet-mlperf_itex.yaml diff --git a/examples/tensorflow/oob_models/quantization/ptq/README.md b/examples/tensorflow/oob_models/quantization/ptq/README.md index 4ca0886f7ce..3e619b88538 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/README.md +++ b/examples/tensorflow/oob_models/quantization/ptq/README.md @@ -6,11 +6,12 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow OO # Prerequisite ## 1. Installation - Recommend python 3.6 or higher version. + Recommend python 3.8 or higher version. ```bash # Install Intel® Neural Compressor pip install neural-compressor + # Install Intel® Tensorflow pip install intel-tensorflow ``` > Note: Supported Tensorflow [Version](../../../../../README.md#supported-frameworks). @@ -22,7 +23,8 @@ Intel Extension for Tensorflow is mandatory to be installed for quantizing the m ```shell pip install --upgrade intel-extension-for-tensorflow[gpu] ``` -For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers) +Please refer to the [Installation Guides](https://dgpu-docs.intel.com/installation-guides/ubuntu/ubuntu-focal-dc.html) for latest Intel GPU driver installation. +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers). ### Quantizing the model on Intel CPU(Experimental) Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. @@ -86,8 +88,24 @@ List models names can get with open_model_zoo: | ssd_inception_v2_coco | http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz | | ssd-resnet34 300x300 | https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/ssd_resnet34_fp32_bs1_pretrained_model.pb | -## 5. Config the yaml file -In examples directory, there is a config.yaml for tuning the model on Intel CPUs. The 'framework' in the yaml is set to 'tensorflow'. If running this example on Intel GPUs, the 'framework' should be set to 'tensorflow_itex' and the device in yaml file should be set to 'gpu'. The config_itex.yaml is prepared for the GPU case. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. +## 5. Quantization Config +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. + +``` +config = PostTrainingQuantConfig( + device="gpu", + backend="tensorflow_itex", + inputs=list(inputs.keys()), + outputs=outputs, + approach="static", + calibration_sampling_size=[1], + op_type_list=None, + op_name_list=None, + reduce_range=None, + extra_precisions=[], + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion) +``` # Run ## run tuning diff --git a/examples/tensorflow/oob_models/quantization/ptq/config.yaml b/examples/tensorflow/oob_models/quantization/ptq/config.yaml deleted file mode 100644 index 86e26e1ff8c..00000000000 --- a/examples/tensorflow/oob_models/quantization/ptq/config.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: oob_models - framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: input - outputs: output - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 1 # optional. default value is 100. used to set how many samples should be used in calibration. - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/oob_models/quantization/ptq/config_itex.yaml b/examples/tensorflow/oob_models/quantization/ptq/config_itex.yaml deleted file mode 100644 index 44db3b3910c..00000000000 --- a/examples/tensorflow/oob_models/quantization/ptq/config_itex.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: oob_models - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: input - outputs: output - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 1 # optional. default value is 100. used to set how many samples should be used in calibration. - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/oob_models/quantization/ptq/dataloaders.py b/examples/tensorflow/oob_models/quantization/ptq/dataloaders.py index 59abb70aec9..282f878f419 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/dataloaders.py +++ b/examples/tensorflow/oob_models/quantization/ptq/dataloaders.py @@ -1,3 +1,21 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + from neural_compressor.experimental.data.dataloaders.fetcher import FETCHERS from neural_compressor.experimental.data.dataloaders.sampler import BatchSampler from neural_compressor.experimental.data.dataloaders.default_dataloader import DefaultDataLoader diff --git a/examples/tensorflow/oob_models/quantization/ptq/find_outputs.py b/examples/tensorflow/oob_models/quantization/ptq/find_outputs.py index f5b9eba9336..e0eed505a37 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/find_outputs.py +++ b/examples/tensorflow/oob_models/quantization/ptq/find_outputs.py @@ -1,8 +1,22 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== import argparse -import os -import sys -import re from utils import * from neural_compressor.utils import logger diff --git a/examples/tensorflow/oob_models/quantization/ptq/model_detail.py b/examples/tensorflow/oob_models/quantization/ptq/model_detail.py index 07ce37cf892..430c551af41 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/model_detail.py +++ b/examples/tensorflow/oob_models/quantization/ptq/model_detail.py @@ -1,3 +1,21 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + from utils import * import numpy as np diff --git a/examples/tensorflow/oob_models/quantization/ptq/run_tuning.sh b/examples/tensorflow/oob_models/quantization/ptq/run_tuning.sh index 2971bedf7c3..b2fcf20898e 100755 --- a/examples/tensorflow/oob_models/quantization/ptq/run_tuning.sh +++ b/examples/tensorflow/oob_models/quantization/ptq/run_tuning.sh @@ -162,7 +162,6 @@ function set_args { function run_tuning { input="input" output="predict" - yaml='./config.yaml' extra_cmd+=' --num_warmup 10 -n 500 ' if [[ "${models_need_name[@]}" =~ " ${topology} " ]]; then @@ -189,7 +188,6 @@ function run_tuning { python tf_benchmark.py \ --model_path ${input_model} \ --output_path ${output_model} \ - --yaml ${yaml} \ --tune \ ${extra_cmd} diff --git a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py index 1c352ef713a..9c07a8533af 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py +++ b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py @@ -1,10 +1,27 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + import os import sys import time import logging import argparse import math -import yaml import numpy as np from tensorflow.python.client import timeline @@ -153,24 +170,6 @@ def run_benchmark(model_details, args, find_graph_def): print("Latency: {:.3f} ms".format(latency)) print("Throughput: {:.2f} fps".format(throughput)) -def _write_inputs_outputs_to_yaml(yaml_path, output_yaml_path, inputs, outputs): - # deal with the inputs/outputs at yaml - with open(yaml_path, 'r') as f: - content = yaml.safe_load(f) - - tmp_i = '' - tmp_o = '' - for item in inputs: - tmp_i = tmp_i + str(item) + ',' - for item in outputs: - tmp_o = tmp_o + str(item) + ',' - content['model'].update({'inputs': tmp_i[:-1]}) - content['model'].update({'outputs': tmp_o[:-1]}) - print(content) - - with open(output_yaml_path, 'w') as nf: - yaml.dump(content, nf) - def oob_collate_data_func(batch): """Puts each data field into a pd frame with outer dimension batch size""" elem = batch[0] @@ -330,12 +329,43 @@ def __iter__(self): # tune if args.tune: # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - from neural_compressor.experimental import Quantization, common inputs = model_detail['input'] outputs = model_detail['output'] - _write_inputs_outputs_to_yaml(args.yaml, "./config_tmp.yaml", list(inputs.keys()), outputs) - quantizer = Quantization("./config_tmp.yaml") + from neural_compressor.experimental import common + from neural_compressor.quantization import fit + from neural_compressor.config import PostTrainingQuantConfig, \ + TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed + + set_random_seed(9527) + + tuning_criterion = TuningCriterion( + strategy="basic", + timeout=0, + max_trials=100, + objective="performance") + + tolerable_loss = AccuracyLoss(loss=0.01) + + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, + criterion='relative', + tolerable_loss=tolerable_loss) + + config = PostTrainingQuantConfig( + device="cpu", + backend="tensorflow", + inputs=list(inputs.keys()), + outputs=outputs, + approach="static", + calibration_sampling_size=[1], + op_type_list=None, + op_name_list=None, + reduce_range=None, + extra_precisions=[], + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion) + # generate dummy data if model_detail.get('sparse_d_shape'): sparse_input_names = [list(i.keys()) for i in model_detail['sparse_d_shape'].values()] @@ -343,16 +373,18 @@ def __iter__(self): for i in range(1, len(sparse_input_names)): sparse_input_seq += sparse_input_names[i] input_dense_shape = [tuple(list(i.values())[0]) for i in model_detail['sparse_d_shape'].values()] - dataset = quantizer.dataset(dataset_type='sparse_dummy_v2', + from neural_compressor.data import DATASETS + dataset = DATASETS('tensorflow')['sparse_dummy_v2']( dense_shape=input_dense_shape, label_shape=[[1] for _ in range(len(input_dense_shape))], sparse_ratio=[1-1/np.multiply(*i) for i in input_dense_shape]) seq_idxs = [sparse_input_seq.index(i) for i in inputs.keys()] - quantizer.calib_dataloader = common.DataLoader(dataset=dataset, + calib_dataloader = common.DataLoader(dataset=dataset, batch_size=1, collate_fn=oob_collate_sparse_func) else: - dataset = quantizer.dataset(dataset_type='dummy', + from neural_compressor.data import DATASETS + dataset = DATASETS('tensorflow')['dummy']( shape=inputs_shape, low=low, high=high, dtype=inputs_dtype, @@ -362,13 +394,19 @@ def __iter__(self): Dataloader = dataloader_dict[args.model_name] else: Dataloader = common.DataLoader - quantizer.calib_dataloader = Dataloader(dataset=dataset, + calib_dataloader = Dataloader(dataset=dataset, batch_size=args.batch_size, collate_fn=oob_collate_data_func \ if model_detail.get('model_name')!='DLRM' \ else oob_dlrm_collate_func) - quantizer.model = args.model_path - q_model = quantizer.fit() + q_model = fit( + model=common.Model(args.model_path), + conf=config, + calib_dataloader=calib_dataloader, + calib_func=None, + eval_dataloader=None, + eval_func=None, + eval_metric=None) q_model.save(args.output_path) # benchmark diff --git a/examples/tensorflow/oob_models/quantization/ptq/tf_savemodel_benchmark.py b/examples/tensorflow/oob_models/quantization/ptq/tf_savemodel_benchmark.py index a3e9d0a3d7a..cd8473914fa 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/tf_savemodel_benchmark.py +++ b/examples/tensorflow/oob_models/quantization/ptq/tf_savemodel_benchmark.py @@ -1,6 +1,22 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + import os -import cv2 -import sys import time import argparse import numpy as np @@ -12,7 +28,7 @@ def get_dynamic_inputshape(model_dir,dshape): - # judge object_detection model + # judge object_detection model path = model_dir.split('/') is_detection = False for item in path: @@ -100,7 +116,7 @@ def savemodel_valid(meta_graph): flag=False for op in set(all_op_types): if op in valid_op: - flag=True + flag=True return flag def run_benchmark(args): diff --git a/examples/tensorflow/oob_models/quantization/ptq/utils.py b/examples/tensorflow/oob_models/quantization/ptq/utils.py index 57fba066fc6..0a1fd51215c 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/utils.py +++ b/examples/tensorflow/oob_models/quantization/ptq/utils.py @@ -1,7 +1,23 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (c) 2022 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + import os -import sys import numpy as np -from google.protobuf import text_format from tensorflow.python.framework import graph_util from tensorflow.python.platform import gfile @@ -25,7 +41,7 @@ def generate_data(input_shape, input_dtype="float32", return np.random.randn(batch_size).astype(input_dtype) dummy_input = np.random.randn(*input_shape).astype(input_dtype) # handle the case that the shape of the input is one-dimensional - if newaxis == False: + if newaxis is False: return np.repeat(dummy_input, batch_size, axis=0) return np.repeat(dummy_input[np.newaxis, :], batch_size, axis=0) @@ -42,7 +58,7 @@ def freeze_graph(input_checkpoint, output_graph, output_node_names): with tf.compat.v1.Session() as sess: saver.restore(sess, input_checkpoint) - output_graph_def = graph_util.convert_variables_to_constants( + output_graph_def = graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, output_node_names=output_node_names) @@ -50,7 +66,7 @@ def freeze_graph(input_checkpoint, output_graph, output_node_names): with tf.io.gfile.GFile(output_graph, "wb") as f: f.write(output_graph_def.SerializeToString()) print("convert done!!") - print("%d ops in the final graph." % len(output_graph_def.node)) + print("%d ops in the final graph." % len(output_graph_def.node)) return output_graph_def @@ -92,7 +108,7 @@ def write_graph(out_graph_def, out_graph_file): :return: None. """ if not isinstance(out_graph_def, tf.compat.v1.GraphDef): - raise ValueError( + raise ValueError( 'out_graph_def is not instance of TensorFlow GraphDef.') if out_graph_file and not os.path.exists(os.path.dirname(out_graph_file)): raise ValueError('"output_graph" directory does not exists.') diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md index ae21ba36de6..5b44c269579 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md @@ -24,7 +24,8 @@ Intel Extension for Tensorflow is mandatory to be installed for quantizing the m ```shell pip install --upgrade intel-extension-for-tensorflow[gpu] ``` -For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers) +Please refer to the [Installation Guides](https://dgpu-docs.intel.com/installation-guides/ubuntu/ubuntu-focal-dc.html) for latest Intel GPU driver installation. +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers). #### Quantizing the model on Intel CPU(Experimental) Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. @@ -73,8 +74,28 @@ Two .tfrecords files are generated and will be used later on: wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/wide_deep_fp32_pretrained_model.pb ``` -### 8. Config the yaml file -In examples directory, there is a wide_deep_large_ds.yaml for tuning the model on Intel CPUs. The 'framework' in the yaml is set to 'tensorflow'. If running this example on Intel GPUs, the 'framework' should be set to 'tensorflow_itex' and the device in yaml file should be set to 'gpu'. The wide_deep_large_ds_itex.yaml is prepared for the GPU case. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. +### 8. Quantization Config +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. + +``` +config = PostTrainingQuantConfig( + device="gpu", + backend="tensorflow_itex", + inputs=["new_numeric_placeholder", "new_categorical_placeholder"], + outputs=["import/head/predictions/probabilities"], + approach="static", + calibration_sampling_size=[2000], + op_type_list=None, + op_name_list={ + 'import/dnn/hiddenlayer_0/MatMul': { + 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'scheme':['asym']}, + } + }, + reduce_range=None, + extra_precisions=[], + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion) +``` ### 9. Run Command # The cmd of running WnD diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py index 839ab874692..9afdc52bf6f 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py @@ -178,17 +178,54 @@ def auto_tune(self): Returns: graph: it will return a quantized pb """ - from neural_compressor.experimental import Quantization + from neural_compressor.experimental import common + from neural_compressor.quantization import fit + from neural_compressor.config import PostTrainingQuantConfig, \ + TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed infer_graph = load_graph(self.args.input_graph) - quantizer = Quantization(self.args.config) + set_random_seed(9527) + + tuning_criterion = TuningCriterion( + strategy="basic", + timeout=0, + max_trials=100, + objective="accuracy") + + tolerable_loss = AccuracyLoss(loss=0.01) + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, + criterion='relative', + tolerable_loss=tolerable_loss) + + config = PostTrainingQuantConfig( + device="cpu", + backend="tensorflow", + inputs=["new_numeric_placeholder", "new_categorical_placeholder"], + outputs=["import/head/predictions/probabilities"], + approach="static", + calibration_sampling_size=[2000], + op_type_list=None, + op_name_list={ + 'import/dnn/hiddenlayer_0/MatMul': { + 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'scheme':['asym']}, + } + }, + reduce_range=None, + extra_precisions=[], + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion) + if self.args.calib_data: - quantizer.model = infer_graph - quantizer.calib_dataloader = Dataloader(self.args.calib_data, self.args.batch_size) - quantizer.eval_func = self.eval_inference - q_model = quantizer.fit() + q_model = fit( + model=common.Model(infer_graph), + conf=config, + calib_dataloader=Dataloader(self.args.calib_data, self.args.batch_size), + calib_func=None, + eval_dataloader=None, + eval_func=self.eval_inference, + eval_metric=None) return q_model - else: - print("Please provide calibration dataset!") + print("Please provide calibration dataset!") def eval_inference(self, infer_graph): print("Run inference") diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh index d0cab565bb4..ae31da6513e 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh @@ -15,9 +15,6 @@ function init_params { for var in "$@" do case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; --dataset_location=*) dataset_location=$(echo $var |cut -f2 -d=) ;; @@ -30,9 +27,6 @@ function init_params { --batch_size=*) batch_size=$(echo $var |cut -f2 -d=) ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; *) echo "Error: No such parameter: ${var}" exit 1 @@ -46,7 +40,7 @@ function define_mode { if [[ ${mode} == "accuracy" ]]; then mode_cmd=" --benchmark --accuracy_only" elif [[ ${mode} == "benchmark" ]]; then - mode_cmd=" --steps ${iters} --benchmark" + mode_cmd=" --benchmark" else echo "Error: No such mode: ${mode}" exit 1 diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_tuning.sh b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_tuning.sh index 0f0a24bd456..175651a44b3 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_tuning.sh +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_tuning.sh @@ -14,9 +14,6 @@ function init_params { for var in "$@" do case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; --dataset_location=*) dataset_location=$(echo $var |cut -f2 -d=) ;; @@ -45,7 +42,6 @@ function run_tuning { --accuracy_only \ --batch_size 1000 \ --output_graph ${output_model} \ - --config wide_deep_large_ds.yaml \ --tune } diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/wide_deep_large_ds.yaml b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/wide_deep_large_ds.yaml deleted file mode 100644 index f464c17260d..00000000000 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/wide_deep_large_ds.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: wide_deep_large_ds - framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: new_numeric_placeholder,new_categorical_placeholder - outputs: import/head/predictions/probabilities # optional. inputs and outputs fields are only required for tensorflow backend. - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 2000 # optional. default value is 100. used to set how many samples should be used in calibration. - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - op_wise: { - 'import/dnn/hiddenlayer_0/MatMul': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'scheme':['asym']}, - } - } - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 100 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/wide_deep_large_ds_itex.yaml b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/wide_deep_large_ds_itex.yaml deleted file mode 100644 index 59a9d2e6ee3..00000000000 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/wide_deep_large_ds_itex.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: wide_deep_large_ds - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: new_numeric_placeholder,new_categorical_placeholder - outputs: import/head/predictions/probabilities # optional. inputs and outputs fields are only required for tensorflow backend. - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 2000 # optional. default value is 100. used to set how many samples should be used in calibration. - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - op_wise: { - 'import/dnn/hiddenlayer_0/MatMul': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'scheme':['asym']}, - } - } - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 100 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/3dunet-mlperf.yaml b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/3dunet-mlperf.yaml deleted file mode 100644 index 2d973ab66e9..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/3dunet-mlperf.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: 1.0 - -device: cpu # optional. default value is cpu, other value is gpu. - -model: - name: 3dunet-mlperf - framework: tensorflow - -quantization: - calibration: - sampling_size: 40 - -tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - max_trials: 100 - random_seed: 9527 diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/3dunet-mlperf_itex.yaml b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/3dunet-mlperf_itex.yaml deleted file mode 100644 index 801aec72d52..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/3dunet-mlperf_itex.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: 1.0 - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -model: - name: 3dunet-mlperf - framework: tensorflow_itex - -quantization: - calibration: - sampling_size: 40 - -tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - max_trials: 100 - random_seed: 9527 diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md index cab2e7e39dc..b6f1be0d4c6 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md @@ -25,7 +25,9 @@ Intel Extension for Tensorflow is mandatory to be installed for quantizing the m ```shell pip install --upgrade intel-extension-for-tensorflow[gpu] ``` -For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers) + +Please refer to the [Installation Guides](https://dgpu-docs.intel.com/installation-guides/ubuntu/ubuntu-focal-dc.html) for latest Intel GPU driver installation. +For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers). #### Quantizing the model on Intel CPU(Experimental) Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. @@ -49,13 +51,29 @@ pip install --upgrade intel-extension-for-tensorflow[cpu] ### 6. Prepare Calibration set The calibration set is the forty images listed in brats_cal_images_list.txt. They are randomly selected from Fold 0, Fold 2, Fold 3, and Fold 4 of BraTS 2019 Training Dataset. -### 7. Config the yaml file -In examples directory, there is a 3dunet-mlperf.yaml for tuning the model on Intel CPUs. The 'framework' in the yaml is set to 'tensorflow'. If running this example on Intel GPUs, the 'framework' should be set to 'tensorflow_itex' and the device in yaml file should be set to 'gpu'. The 3dunet-mlperf_itex.yaml is prepared for the GPU case. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. +### 7. Quantization Config +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. + +``` +config = PostTrainingQuantConfig( + device="gpu", + backend="tensorflow_itex", + inputs=[], + outputs=[], + approach="static", + calibration_sampling_size=[40], + op_type_list=None, + op_name_list=None, + reduce_range=None, + extra_precisions=[], + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion) +``` ### 8. Test command * `export nnUNet_preprocessed=/build/preprocessed_data` * `export nnUNet_raw_data_base=/build/raw_data` * `export RESULTS_FOLDER=/build/result` -* `pip install requirements.txt` +* `pip install -r requirements.txt` * `python run_accuracy.py --input-model= --data-location= --calib-preprocess= --iters=100 --batch-size=1 --mode=benchmark --bfloat16 0` diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py index 7ed1dca741d..62ccfe03adf 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py @@ -64,8 +64,6 @@ def get_args(): arg_parser.add_argument('-o', "--output-model", help='Specify the output graph.', dest='output_model') - arg_parser.add_argument('--config', - help='Specify the yaml config file.') arg_parser.add_argument('-c', "--calib-preprocess", help='Specify calibration preprocess dir.', dest='calib_preprocess') @@ -206,11 +204,49 @@ def __len__(self): print(args) graph = load_graph(args.input_model) if args.mode == 'tune': - quantizer = Quantization(args.config) - quantizer.calib_dataloader = common.DataLoader(CalibrationDL()) - quantizer.model = common.Model(graph) - quantizer.eval_func = eval_func - q_model = quantizer.fit() + from neural_compressor.experimental import common + from neural_compressor.quantization import fit + from neural_compressor.config import PostTrainingQuantConfig, \ + TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed + + set_random_seed(9527) + + tuning_criterion = TuningCriterion( + strategy="basic", + timeout=0, + max_trials=100, + objective="accuracy") + + tolerable_loss = AccuracyLoss(loss=0.01) + + accuracy_criterion = AccuracyCriterion( + higher_is_better=True, + criterion='relative', + tolerable_loss=tolerable_loss) + + config = PostTrainingQuantConfig( + device="cpu", + backend="tensorflow", + inputs=[], + outputs=[], + approach="static", + calibration_sampling_size=[40], + op_type_list=None, + op_name_list=None, + reduce_range=None, + extra_precisions=[], + tuning_criterion=tuning_criterion, + accuracy_criterion=accuracy_criterion) + + q_model = fit( + model=common.Model(graph), + conf=config, + calib_dataloader=common.DataLoader(CalibrationDL()), + calib_func=None, + eval_dataloader=common.DataLoader(CalibrationDL()), + eval_func=eval_func, + eval_metric=None) + try: q_model.save(args.output_model) except Exception as e: diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh index 77d20e3b9fe..9d886a3fbcb 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh @@ -19,9 +19,6 @@ function init_params { for var in "$@" do case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; --mode=*) mode=$(echo $var |cut -f2 -d=) ;; diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_tuning.sh b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_tuning.sh index 218d91e49c8..749b0c09c2b 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_tuning.sh +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_tuning.sh @@ -17,9 +17,6 @@ function init_params { for var in "$@" do case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; --dataset_location=*) dataset_location=$(echo $var |cut -f2 -d=) ;; @@ -40,13 +37,11 @@ function init_params { # run_tuning function run_tuning { - config=$topology'.yaml' python run_accuracy.py \ --input-model=${input_model} \ --output-model=${output_model} \ --data-location=${dataset_location} \ --calib-preprocess=${BUILD_DIR}/calib_preprocess \ - --config=${config} \ --mode=tune } From e04cb1915417886841e433b10ff98490de13b0e2 Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Sat, 3 Dec 2022 20:52:24 +0800 Subject: [PATCH 02/14] fix pyspelling check Signed-off-by: Lv, Liang1 --- .azure-pipelines/scripts/codeScan/pyspelling/lpot_dict.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.azure-pipelines/scripts/codeScan/pyspelling/lpot_dict.txt b/.azure-pipelines/scripts/codeScan/pyspelling/lpot_dict.txt index 87492ef780a..3252aa1a824 100644 --- a/.azure-pipelines/scripts/codeScan/pyspelling/lpot_dict.txt +++ b/.azure-pipelines/scripts/codeScan/pyspelling/lpot_dict.txt @@ -2380,3 +2380,5 @@ grappler amsgrad qoperator apis +PostTrainingQuantConfig +dgpu From f1bc6776a8d65a43ba8cb99e839d86d7e3db04f1 Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Mon, 5 Dec 2022 15:01:36 +0800 Subject: [PATCH 03/14] update readme and remove default setting Signed-off-by: Lv, Liang1 --- .../oob_models/quantization/ptq/README.md | 16 ++------ .../quantization/ptq/tf_benchmark.py | 34 ++--------------- .../quantization/ptq/README.md | 20 ++-------- .../quantization/ptq/inference.py | 30 ++------------- .../3dunet-mlperf/quantization/ptq/README.md | 16 ++------ .../quantization/ptq/run_accuracy.py | 37 ++----------------- 6 files changed, 21 insertions(+), 132 deletions(-) diff --git a/examples/tensorflow/oob_models/quantization/ptq/README.md b/examples/tensorflow/oob_models/quantization/ptq/README.md index 3e619b88538..c2344fc883c 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/README.md +++ b/examples/tensorflow/oob_models/quantization/ptq/README.md @@ -89,22 +89,14 @@ List models names can get with open_model_zoo: | ssd-resnet34 300x300 | https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/ssd_resnet34_fp32_bs1_pretrained_model.pb | ## 5. Quantization Config -The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'itex' and the 'device' parameter should be set to 'gpu'. ``` config = PostTrainingQuantConfig( device="gpu", - backend="tensorflow_itex", - inputs=list(inputs.keys()), - outputs=outputs, - approach="static", - calibration_sampling_size=[1], - op_type_list=None, - op_name_list=None, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + backend="itex", + ... + ) ``` # Run diff --git a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py index 9c07a8533af..fea77e5d60f 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py +++ b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py @@ -334,37 +334,13 @@ def __iter__(self): from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, \ - TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed + from neural_compressor.config import PostTrainingQuantConfig, set_random_seed set_random_seed(9527) - - tuning_criterion = TuningCriterion( - strategy="basic", - timeout=0, - max_trials=100, - objective="performance") - - tolerable_loss = AccuracyLoss(loss=0.01) - - accuracy_criterion = AccuracyCriterion( - higher_is_better=True, - criterion='relative', - tolerable_loss=tolerable_loss) - config = PostTrainingQuantConfig( - device="cpu", - backend="tensorflow", inputs=list(inputs.keys()), outputs=outputs, - approach="static", - calibration_sampling_size=[1], - op_type_list=None, - op_name_list=None, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + calibration_sampling_size=[1]) # generate dummy data if model_detail.get('sparse_d_shape'): @@ -402,11 +378,7 @@ def __iter__(self): q_model = fit( model=common.Model(args.model_path), conf=config, - calib_dataloader=calib_dataloader, - calib_func=None, - eval_dataloader=None, - eval_func=None, - eval_metric=None) + calib_dataloader=calib_dataloader) q_model.save(args.output_path) # benchmark diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md index 5b44c269579..5a05dcce516 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md @@ -75,26 +75,14 @@ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/wide_ ``` ### 8. Quantization Config -The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'itex' and the 'device' parameter should be set to 'gpu'. ``` config = PostTrainingQuantConfig( device="gpu", - backend="tensorflow_itex", - inputs=["new_numeric_placeholder", "new_categorical_placeholder"], - outputs=["import/head/predictions/probabilities"], - approach="static", - calibration_sampling_size=[2000], - op_type_list=None, - op_name_list={ - 'import/dnn/hiddenlayer_0/MatMul': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'scheme':['asym']}, - } - }, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + backend="itex", + ... + ) ``` ### 9. Run Command diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py index 9afdc52bf6f..ad950691ee0 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py @@ -180,50 +180,26 @@ def auto_tune(self): """ from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, \ - TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed + from neural_compressor.config import PostTrainingQuantConfig, set_random_seed infer_graph = load_graph(self.args.input_graph) set_random_seed(9527) - tuning_criterion = TuningCriterion( - strategy="basic", - timeout=0, - max_trials=100, - objective="accuracy") - - tolerable_loss = AccuracyLoss(loss=0.01) - accuracy_criterion = AccuracyCriterion( - higher_is_better=True, - criterion='relative', - tolerable_loss=tolerable_loss) - config = PostTrainingQuantConfig( - device="cpu", - backend="tensorflow", inputs=["new_numeric_placeholder", "new_categorical_placeholder"], outputs=["import/head/predictions/probabilities"], - approach="static", calibration_sampling_size=[2000], - op_type_list=None, op_name_list={ 'import/dnn/hiddenlayer_0/MatMul': { 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'scheme':['asym']}, } - }, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + }) if self.args.calib_data: q_model = fit( model=common.Model(infer_graph), conf=config, calib_dataloader=Dataloader(self.args.calib_data, self.args.batch_size), - calib_func=None, - eval_dataloader=None, - eval_func=self.eval_inference, - eval_metric=None) + eval_func=self.eval_inference) return q_model print("Please provide calibration dataset!") diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md index b6f1be0d4c6..e4d95f5c5fd 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md @@ -52,22 +52,14 @@ pip install --upgrade intel-extension-for-tensorflow[cpu] The calibration set is the forty images listed in brats_cal_images_list.txt. They are randomly selected from Fold 0, Fold 2, Fold 3, and Fold 4 of BraTS 2019 Training Dataset. ### 7. Quantization Config -The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'itex' and the 'device' parameter should be set to 'gpu'. ``` config = PostTrainingQuantConfig( device="gpu", - backend="tensorflow_itex", - inputs=[], - outputs=[], - approach="static", - calibration_sampling_size=[40], - op_type_list=None, - op_name_list=None, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + backend="itex", + ... + ) ``` ### 8. Test command diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py index 62ccfe03adf..738a61f3f40 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py @@ -206,47 +206,16 @@ def __len__(self): if args.mode == 'tune': from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, \ - TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed - + from neural_compressor.config import PostTrainingQuantConfig, set_random_seed set_random_seed(9527) - - tuning_criterion = TuningCriterion( - strategy="basic", - timeout=0, - max_trials=100, - objective="accuracy") - - tolerable_loss = AccuracyLoss(loss=0.01) - - accuracy_criterion = AccuracyCriterion( - higher_is_better=True, - criterion='relative', - tolerable_loss=tolerable_loss) - - config = PostTrainingQuantConfig( - device="cpu", - backend="tensorflow", - inputs=[], - outputs=[], - approach="static", - calibration_sampling_size=[40], - op_type_list=None, - op_name_list=None, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + config = PostTrainingQuantConfig(calibration_sampling_size=[40]) q_model = fit( model=common.Model(graph), conf=config, calib_dataloader=common.DataLoader(CalibrationDL()), - calib_func=None, eval_dataloader=common.DataLoader(CalibrationDL()), - eval_func=eval_func, - eval_metric=None) - + eval_func=eval_func) try: q_model.save(args.output_model) except Exception as e: From c67b308e64a10e7a517c2992a8e5fbd33b643ff9 Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Tue, 6 Dec 2022 00:09:35 +0800 Subject: [PATCH 04/14] update code according to comments Signed-off-by: Lv, Liang1 --- examples/.config/model_params_tensorflow.json | 1211 ++++++----------- .../oob_models/quantization/ptq/README.md | 18 +- .../quantization/ptq/run_benchmark.sh | 5 +- .../quantization/ptq/tf_benchmark.py | 34 +- .../quantization/ptq/README.md | 22 +- .../quantization/ptq/inference.py | 45 +- .../quantization/ptq/run_benchmark.sh | 2 +- .../3dunet-mlperf/quantization/ptq/README.md | 16 +- .../quantization/ptq/run_accuracy.py | 38 +- .../quantization/ptq/run_benchmark.sh | 7 - 10 files changed, 441 insertions(+), 957 deletions(-) diff --git a/examples/.config/model_params_tensorflow.json b/examples/.config/model_params_tensorflow.json index fb70465dc90..b64e2c5b304 100644 --- a/examples/.config/model_params_tensorflow.json +++ b/examples/.config/model_params_tensorflow.json @@ -598,395 +598,308 @@ "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/AIPG_trained/text_classification/vdcnn/agnews/tf/aipg-vdcnn.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "arttrack-coco-multi": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/human_pose_estimation/arttrack/coco/tf/arttrack-coco-multi.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "arttrack-mpii-single": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/human_pose_estimation/arttrack/mpii/tf/arttrack-mpii-single.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "ava-face-recognition-3_0_0": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Security/feature_extraction/ava/tf/ava-face-recognition-3.0.0.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "ava-person-vehicle-detection-stage2-2_0_0": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Security/object_detection/common/ava/stage2/tf/ava-person-vehicle-detection-stage2-2.0.0.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "bert-base-uncased_L-12_H-768_A-12": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/language_representation/bert/base/uncased_L-12_H-768_A-12/tf/bert-base-uncased_L-12_H-768_A-12.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "darknet19": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/classification/darknet19/darknet19.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "darknet53": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/classification/darknet53/darknet53.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "deeplabv3": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/semantic_segmentation/deeplab/v3/deeplabv3.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "deepvariant_wgs": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/dna_sequencing/deepvariant/wgs/deepvariant_wgs.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "densenet-121": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/densenet/121/tf/densenet-121.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "densenet-161": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/densenet/161/tf/densenet-161.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "densenet-169": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/densenet/169/tf/densenet-169.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "east_resnet_v1_50": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/text_detection/east/tf/east_resnet_v1_50.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "efficientnet-b0": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/efficientnet/b0/tf/efficientnet-b0.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "efficientnet-b0_auto_aug": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/efficientnet/b0_auto_aug/tf/efficientnet-b0_auto_aug.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "efficientnet-b5": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/efficientnet/b5/tf/efficientnet-b5.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "facenet-20180408-102900": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/face_recognition/facenet/CASIA-WebFace/tf/facenet-20180408-102900.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_inception_v2_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/faster_rcnn/faster_rcnn_inception_v2_coco/tf/faster_rcnn_inception_v2_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_resnet101_ava_v2_1": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/faster_rcnn_resnet101_ava_v2/faster_rcnn_resnet101_ava_v2.1_2018_04_30/frozen_inference_graph.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_resnet101_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/faster_rcnn/faster_rcnn_resnet101_coco/tf/faster_rcnn_resnet101_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_resnet101_kitti": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/faster_rcnn_resnet101_kitti_2018_01_28/faster_rcnn_resnet101_kitti_2018_01_28/faster_rcnn_resnet101_kitti.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_resnet101_lowproposals_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/faster_rcnn_resnet101_lowproposals_coco_2018_01_28/faster_rcnn_resnet101_lowproposals_coco_2018_01_28/faster_rcnn_resnet101_lowproposals_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_resnet50_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/faster_rcnn/faster_rcnn_resnet50_coco/tf/faster_rcnn_resnet50_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_resnet50_lowproposals_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/faster_rcnn/faster_rcnn_resnet50_lowproposals_coco/tf/faster_rcnn_resnet50_lowproposals_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "googlenet-v1": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/googlenet/v1/tf/googlenet-v1.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "googlenet-v2": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/googlenet/v2/tf/googlenet-v2.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "googlenet-v3": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/googlenet/v3/tf/googlenet-v3.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "googlenet-v4": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/googlenet/v4/tf/googlenet-v4.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "Hierarchical_LSTM": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/Hierarchical/text8_freeze.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "image-retrieval-0001": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/image-retrieval-0001/image-retrieval-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "inceptionv2_ssd": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/ssd_inceptionv2/tf/inceptionv2_ssd.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "3d-pose-baseline": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/human_pose_estimation/3d-pose-baseline/tf/3d-pose-baseline.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "cpm-person": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/human_pose_estimation/cpm/person/tf/cpm-person.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "ctpn": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/text_detection/ctpn/tf/ctpn.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "DSSD_12": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/object_detection/common/dssd/DSSD_12/tf/DSSD_12.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "efficientnet-b7_auto_aug": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/efficientnet/b7_auto_aug/tf/efficientnet-b7_auto_aug.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_resnet101_fgvc": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/faster_rcnn_resnet101_fgvc_2018_07_19/faster_rcnn_resnet101_fgvc_2018_07_19/faster_rcnn_resnet101_fgvc.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "faster_rcnn_resnet50_fgvc": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/faster_rcnn_resnet50_fgvc_2018_07_19/faster_rcnn_resnet50_fgvc_2018_07_19/faster_rcnn_resnet50_fgvc.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "handwritten-score-recognition-0003": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Retail//handwritten-score-recognition/0003/tf/handwritten-score-recognition-0003.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "HugeCTR": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/HugeCTR/HugeCTR.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "i3d-flow": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/action_recognition/i3d/flow/tf/i3d-flow.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "i3d-rgb": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/action_recognition/i3d/rgb/tf/i3d-rgb.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "icnet-camvid-ava-0001": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicCompressed/semantic_segmentation/icnet-camvid-tf-ws00/icnet-camvid-ava-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", + "main_script": "tf_benchmark.py", "batch_size": 16, "new_benchmark": false }, @@ -994,8 +907,7 @@ "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicCompressed/semantic_segmentation/icnet-camvid-tf-ws30/icnet-camvid-ava-sparse-30-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", + "main_script": "tf_benchmark.py", "batch_size": 16, "new_benchmark": false }, @@ -1003,8 +915,7 @@ "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicCompressed/semantic_segmentation/icnet-camvid-tf-ws60/icnet-camvid-ava-sparse-60-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", + "main_script": "tf_benchmark.py", "batch_size": 16, "new_benchmark": false }, @@ -1012,937 +923,729 @@ "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Retail/object_attributes/emotions_recognition/0002/tf/icv-emotions-recognition-0002.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "inception-resnet-v2": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/inception-resnet/v2/tf/inception-resnet-v2.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "intel-labs-nonlocal-dehazing": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/IntelLabs/FastImageProcessing/NonlocalDehazing/intel-labs-nonlocal-dehazing.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 100 }, "learning-to-see-in-the-dark-fuji": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/IntelLabs/LearningToSeeInTheDark/Fuji/learning-to-see-in-the-dark-fuji.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "learning-to-see-in-the-dark-sony": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/IntelLabs/LearningToSeeInTheDark/Sony/learning-to-see-in-the-dark-sony.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "license-plate-recognition-barrier-0007": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/optical_character_recognition/license_plate_recognition/tf/license-plate-recognition-barrier-0007.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "mask_rcnn_inception_v2_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/instance_segmentation/mask_rcnn/mask_rcnn_inception_v2_coco/tf/mask_rcnn_inception_v2_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "nasnet-a-large-331": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/nasnet/large/tf/nasnet-a-large-331.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "nasnet-a-mobile-224": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/nasnet/mobile/tf/nasnet-a-mobile-224.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "openpose-pose": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/human_pose_estimation/openpose/pose/tf/openpose-pose.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "optical_character_recognition-text_recognition-tf": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/optical_character_recognition/text_recognition/tf/optical_character_recognition-text_recognition-tf.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "person-vehicle-bike-detection-crossroad-yolov3-1020": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Security/object_detection/crossroad/1020/tf/person-vehicle-bike-detection-crossroad-yolov3-1020.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "PRNet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/face_reconstruction/PRNet/tf/PRNet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "resnet-101": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/resnet/v1/101/tf/resnet-101.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "resnet-152": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/resnet/v1/152/tf/resnet-152.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "resnet-50": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/resnet/v1/50/tf/official/resnet-50.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "resnet-v2-101": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/resnet/v2/101/tf/resnet-v2-101.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "resnet-v2-152": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/resnet/v2/152/tf/resnet-v2-152.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "resnet-v2-50": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/resnet/v2/50/tf/224x224/resnet-v2-50.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "resnet_v2_200": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/Resnet_v2_200/Resnet_v2_200.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "retinanet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/retinanet/tf/retinanet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "rfcn-resnet101-coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/rfcn/rfcn_resnet101_coco/tf/rfcn-resnet101-coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "rmnet_ssd": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Retail//action_detection/pedestrian/rmnet_ssd/0028_tf/tf/rmnet_ssd.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "squeezenet1_1": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/squeezenet/1.1/tf/squeezenet1_1.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ssd_inception_v2_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/ssd_inception_v2_coco_2018_01_28/ssd_inception_v2_coco_2018_01_28/ssd_inception_v2_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ssd_resnet50_v1_fpn_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/ssd_resnet50/ssd_resnet50_v1_fpn_coco/tf/ssd_resnet50_v1_fpn_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ssd_resnet34_300x300": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/ssd-resnet34_300x300/ssd_resnet34_300x300.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "TCN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/sequence_modelling/tcn/tf/TCN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "text-recognition-0012": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Retail//text_recognition/bilstm_crnn_bilstm_decoder/0012/tf/text-recognition-0012.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "tiny_yolo_v1": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/object_detection/common/yolo/v1_tiny/tf/tiny_yolo_v1.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "tiny_yolo_v2": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/object_detection/common/yolo/v2_tiny/tf/tiny_yolo_v2.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "vehicle-attributes-barrier-0103": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_attributes/vehicle_attributes/tf/vehicle-attributes-barrier-0103.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "vehicle-license-plate-detection-barrier-0123": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/barrier/tf/0123/vehicle-license-plate-detection-barrier-0123.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "vgg16-oob": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/vgg/16/tf/vgg16.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "vgg19-oob": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/vgg/19/tf/vgg19.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "vggvox": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/voice_recognition/vggvox/vggvox.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v3-tiny": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/yolo/yolo_v3/yolo-v3-tiny/yolo-v3-tiny-tf/yolo-v3-tiny.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v2": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/yolo/yolo_v2/tf/yolo-v2.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v2-ava-sparse-35-0001": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicCompressed/detection/YOLOv2/fp32_sparsity35/yolo-v2-ava-sparse-35-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v2-ava-sparse-70-0001": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicCompressed/detection/YOLOv2/fp32_sparsity70/yolo-v2-ava-sparse-70-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v2-tiny-ava-0001": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicCompressed/detection/tinyYOLOv2/fp32_sparsity00/yolo-v2-tiny-ava-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v2-tiny-ava-sparse-30-0001": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicCompressed/detection/tinyYOLOv2/fp32_sparsity30/yolo-v2-tiny-ava-sparse-30-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v2-tiny-ava-sparse-60-0001": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicCompressed/detection/tinyYOLOv2/fp32_sparsity60/yolo-v2-tiny-ava-sparse-60-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v2-tiny-vehicle-detection-0001": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Security/object_detection/barrier/yolo/yolo-v2-tiny-vehicle-detection-0001/tf/yolo-v2-tiny-vehicle-detection-0001.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo-v3": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/yolo/yolo_v3/tf/yolo-v3.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "DeepLab": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/deeplabv3_mnv2_cityscapes_train/deeplab.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "GraphSage": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/GraphSage/GraphSage.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "WGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/WGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Vnet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/vnet/vnet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "DRAW": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/DRAW/DRAW.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ALBERT": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/ALBERT/ALBERT.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "BERT_BASE": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/BERT_BASE/BERT_BASE.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "BERT_LARGE": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/BERT_LARGE/BERT_LARGE.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "CapsuleNet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/CapsuleNet/CapsuleNet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "CharCNN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/CharCNN/CharCNN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "CRNN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/CRNN/crnn.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "DIEN_Deep-Interest-Evolution-Network": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/DIEN/dien.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "dense_vnet_abdominal_ct": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/semantic_segmentation/dense_vnet/tf/dense_vnet_abdominal_ct.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "dilation": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/Dilation/dilation.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "faster_rcnn_inception_resnet_v2_atrous_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/faster_rcnn/faster_rcnn_inception_resnet_v2_atrous_coco/tf/faster_rcnn_inception_resnet_v2_atrous_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco_2018_01_28/faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "faster_rcnn_nas_coco_2018_01_28": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/faster_rcnn_nas_coco_2018_01_28/faster_rcnn_nas_coco_2018_01_28.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "faster_rcnn_nas_lowproposals_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/object_detection/common/faster_rcnn/faster_rcnn_nas_lowproposals_coco/tf/faster_rcnn_nas_lowproposals_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "GAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/GAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "gmcnn-places2": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/image_inpainting/gmcnn/tf/gmcnn-places2.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "LSGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/LSGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "mask_rcnn_inception_resnet_v2_atrous_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/instance_segmentation/mask_rcnn/mask_rcnn_inception_resnet_v2_atrous_coco/tf/mask_rcnn_inception_resnet_v2_atrous_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "NCF-1B": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/ncf_trained_movielens_1m/NCF-1B.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "R-FCN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/R-FCN/rfcn_resnet101_coco_2018_01_28/R-FCN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "TextCNN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/TextCNN/TextCNN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "TextRNN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/TextRNN/TextRNN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Transformer-LT": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", - "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/transformer_lt_official_fp32_pretrained_model/graph/Transformer-LT.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/transformer_lt_official_fp32_pretrained_model/graph/Transformer-LT.pb", + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ACGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/ACGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "BEGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/BEGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "CGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/CGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "DRAGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/DRAGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EBGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/EBGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "infoGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/infoGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "mask_rcnn_resnet101_atrous_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/instance_segmentation/mask_rcnn/mask_rcnn_resnet101_atrous_coco/tf/mask_rcnn_resnet101_atrous_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "mask_rcnn_resnet50_atrous_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/instance_segmentation/mask_rcnn/mask_rcnn_resnet50_atrous_coco/tf/mask_rcnn_resnet50_atrous_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "person-vehicle-bike-detection-crossroad-yolov3-1024": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/Security/object_detection/crossroad/1024/tf/person-vehicle-bike-detection-crossroad-yolov3-1024.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "srgan": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/image_processing/srgan/tf/srgan.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ssd_resnet34_fp32_1200x1200_pretrained_model": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/SSD-ResNet34_1200x1200/ssd_resnet34_fp32_1200x1200_pretrained_model.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "unet-3d-isensee_2017": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/volumetric_segmentation/unet/3d/isensee_2017/tf/unet-3d-isensee_2017.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "unet-3d-origin": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/volumetric_segmentation/unet/3d/origin/tf/unet-3d-origin.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "WGAN_GP": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/oob_gan_models/WGAN_GP.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "wide_deep": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/wide_deep/wide_deep.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "DynamicMemory": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/checkpoint_dynamic_memory_network/DynamicMemory.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EntityNet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/checkpoint_entity_network2/EntityNet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Seq2seqAttn": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/Seq2seqAttn/Seq2seqAttn.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "show_and_tell": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/show_and_tell/Show_and_tell.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "SqueezeNet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/SqueezeNet-tf/SqueezeNet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ssd_resnet34_1200x1200": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/ssd_resnet34_model/ssd_resnet34_1200x1200.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "deepspeech": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/speech_to_text/deepspeech/v1/tf/deepspeech.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "TextRCNN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/TextRCNN/TextRCNN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "U-Net": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/unet/unet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "wavenet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/wavenet/wavenet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "HierAtteNet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/checkpoint_hier_atten_title/text_hier_atten_title_desc_checkpoint_MHA/HierAtteNet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "SphereFace": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/SphereFace/SphereFace.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ResNet-50_v1_5": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/ResNet50_v1_5/model_dir/ResNet-50_v1.5.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ResNeXt_50": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/ResNext_50/ResNext_50.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "MiniGo": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/MiniGo/MiniGo.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ResNeXt_101": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/ResNext_101/ResNext_101.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "COVID-Net": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/COVID-Net/COVID-Net.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "resnet50_fashion": { "model_src_dir": "image_recognition/keras_models/resnet50_fashion/quantization/ptq", @@ -1975,470 +1678,366 @@ "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/3D-Unet/3DUNet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "adv_inception_v3": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/adv_inception_v3/adv_inception_v3.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "CenterNet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/CenterNet/CenterNet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "context_rcnn_resnet101_snapshot_serenget": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/context_rcnn_resnet101_snapshot_serengeti_2020_06_10/context_rcnn_resnet101_snapshot_serenget.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EfficientDet-D0-512x512": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/EfficientDet/efficientdet-d0/EfficientDet-D0-512x512.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EfficientDet-D1-640x640": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/EfficientDet/efficientdet-d1/EfficientDet-D1-640x640.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EfficientDet-D2-768x768": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/EfficientDet/efficientdet-d2/EfficientDet-D2-768x768.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EfficientDet-D3-896x896": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/EfficientDet/efficientdet-d3/EfficientDet-D3-896x896.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EfficientDet-D4-1024x1024": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/EfficientDet/efficientdet-d4/EfficientDet-D4-1024x1024.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EfficientDet-D5-1280x1280": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/EfficientDet/efficientdet-d5/EfficientDet-D5-1280x1280.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EfficientDet-D6-1280x1280": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/EfficientDet/efficientdet-d6/EfficientDet-D6-1280x1280.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "EfficientDet-D7-1536x1536": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/EfficientDet/efficientdet-d7/EfficientDet-D7-1536x1536.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ens3_adv_inception_v3": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/ens3_inception_v3/ens3_adv_inception_v3.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Evolution_ensemble": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/simple_net/Evolution_ensemble.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "faster_rcnn_inception_resnet_v2_atrous_lowproposals_oid": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/faster_rcnn_inception_resnet_v2_atrous_lowproposals_oid/faster_rcnn_inception_resnet_v2_atrous_lowproposals_oid.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "faster_rcnn_inception_resnet_v2_atrous_oid": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/faster_rcnn_inception_resnet_v2_atrous_oid/faster_rcnn_inception_resnet_v2_atrous_oid.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "faster_rcnn_resnet101_snapshot_serengeti": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/faster_rcnn_resnet101_snapshot_serengeti/faster_rcnn_resnet101_snapshot_serengeti.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "key-value-memory-networks": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/key-value-memory-networks/key-value-memory-networks.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "MANN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/MANN/MANN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "NCF": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/NCF/NCF.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "NetVLAD": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/NetVLAD/NetVLAD.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "NeuMF": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/NeuMF/NeuMF.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ssd_mobilenet_v1_0_75_depth_300x300_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/ssd_mobilenet_v1_0.75_depth_300x300_coco/ssd_mobilenet_v1_0.75_depth_300x300_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco/ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ssd_resnet_101_fpn_oidv4": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/ssd_resnet101_v1_fpn/ssd_resnet_101_fpn_oidv4.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "SSD_ResNet50_V1_FPN_640x640_RetinaNet50": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03/SSD_ResNet50_V1_FPN_640x640_RetinaNet50.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "vehicle-license-plate-detection-barrier-0106": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/vehicle-license-plate-detection-barrier-0106/vehicle-license-plate-detection-barrier-0106.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "YOLOv4": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/yolov4/YOLOv4.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "CBAM": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/CBAM/CBAM.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "NTM-One-Shot": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/NTM-One-Shot/model/NTM-One-Shot.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "KeypointNet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/keypoint/KeypointNet.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "DCGAN": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/dcgan/DCGAN.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "pose-ae-multiperson": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/human_pose_estimation/pose-ae/multiperson/tf/pose-ae-multiperson.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "pose-ae-refinement": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/human_pose_estimation/pose-ae/refinement/tf/pose-ae-refinement.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "WD": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/WD/wide_deep_saved_models/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ResNest50": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/ResNest/ResNest50/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ResNest101": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/ResNest/ResNest101/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "ResNest50-3D": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/ResNest/ResNest50-3D/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "GPT2": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/GPT-2/gpt-2/models/124M/GPT_2_124M-Generated_LPOT_OOB_Model/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Attention_OCR": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/attention_ocr/model.ckpt-399731_freeze.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "DLRM": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/DLRM/DLRM-Generated_LPOT_OOB_Model.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "centernet_hg104": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/ckpt/centernet_hg104_1024x1024_coco17/saved_model/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "DETR": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/DETR/DETR.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Elmo": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/elmo/model/elmo.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Time_series_LSTM": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/time_series_LSTM/time_series_lstm.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Unet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/oob/Unet/checkpoint/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "adversarial_text": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/mlp/adversarial_text/imdb_pretrain/model.ckpt-1135_freeze.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "AttRec": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/AttRec/AttRec.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Parallel_WaveNet": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/Parallel_WaveNet/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "PNASNet-5": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/PNASNet-5/model/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "VAE-CF": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/VAE-CF/data/binary/ml_20m/", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "Deep_Speech_2": { "model_src_dir": "oob_models/quantization/ptq", "dataset_location": "", "input_model": "/tf_dataset/tensorflow/tf_oob_models/dpg/Deep_Speech2/Deep_Speech_2-Generated_LPOT_OOB_Model.pb", - "yaml": "config.yaml", - "strategy": "basic", - "batch_size": 1, - "new_benchmark": false + "main_script": "tf_benchmark.py", + "batch_size": 1 }, "yolo_v3": { "model_src_dir": "object_detection/yolo_v3/quantization/ptq", @@ -2453,10 +2052,8 @@ "model_src_dir": "semantic_image_segmentation/3dunet-mlperf/quantization/ptq", "dataset_location": "/tf_dataset2/models/tensorflow/3dunet/build", "input_model": "/tf_dataset2/models/tensorflow/3dunet/3dunet_dynamic_ndhwc.pb", - "yaml": "3dunet-mlperf.yaml", - "strategy": "basic", - "batch_size": 100, - "new_benchmark": false + "main_script": "run_accuracy.py", + "batch_size": 100 }, "transformer_lt_mlperf": { "model_src_dir": "nlp/transformer_lt_mlperf/quantization/ptq", diff --git a/examples/tensorflow/oob_models/quantization/ptq/README.md b/examples/tensorflow/oob_models/quantization/ptq/README.md index 3e619b88538..dea9f15724d 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/README.md +++ b/examples/tensorflow/oob_models/quantization/ptq/README.md @@ -89,22 +89,14 @@ List models names can get with open_model_zoo: | ssd-resnet34 300x300 | https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/ssd_resnet34_fp32_bs1_pretrained_model.pb | ## 5. Quantization Config -The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'itex' and the 'device' parameter should be set to 'gpu'. ``` config = PostTrainingQuantConfig( device="gpu", - backend="tensorflow_itex", - inputs=list(inputs.keys()), - outputs=outputs, - approach="static", - calibration_sampling_size=[1], - op_type_list=None, - op_name_list=None, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + backend="itex", + ... + ) ``` # Run @@ -117,5 +109,5 @@ config = PostTrainingQuantConfig( ## run benchmarking ```bash -./run_benchmark.sh --topology=${model_topology} --dataset_location= --input_model=${model_path} --mode=benchmark --batch_size=1 --iters=200 +./run_benchmark.sh --topology=${model_topology} --dataset_location= --input_model=${model_path} --mode=benchmark --batch_size=1 ``` diff --git a/examples/tensorflow/oob_models/quantization/ptq/run_benchmark.sh b/examples/tensorflow/oob_models/quantization/ptq/run_benchmark.sh index 87d16a45c1e..2f94d4b6a88 100755 --- a/examples/tensorflow/oob_models/quantization/ptq/run_benchmark.sh +++ b/examples/tensorflow/oob_models/quantization/ptq/run_benchmark.sh @@ -32,9 +32,6 @@ function init_params { --batch_size=*) batch_size=$(echo $var |cut -f2 -d=) ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; *) echo "Error: No such parameter: ${var}" exit 1 @@ -49,7 +46,7 @@ function define_mode { if [[ ${mode} == "accuracy" ]]; then echo "For TF OOB models, there is only benchmark mode!, num iter is: ${iters}" exit 1 - elif [[ ${mode} == "benchmark" ]]; then + elif [[ ${mode} == "performance" ]]; then mode_cmd=" --benchmark " else echo "Error: No such mode: ${mode}" diff --git a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py index 9c07a8533af..fea77e5d60f 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py +++ b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py @@ -334,37 +334,13 @@ def __iter__(self): from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, \ - TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed + from neural_compressor.config import PostTrainingQuantConfig, set_random_seed set_random_seed(9527) - - tuning_criterion = TuningCriterion( - strategy="basic", - timeout=0, - max_trials=100, - objective="performance") - - tolerable_loss = AccuracyLoss(loss=0.01) - - accuracy_criterion = AccuracyCriterion( - higher_is_better=True, - criterion='relative', - tolerable_loss=tolerable_loss) - config = PostTrainingQuantConfig( - device="cpu", - backend="tensorflow", inputs=list(inputs.keys()), outputs=outputs, - approach="static", - calibration_sampling_size=[1], - op_type_list=None, - op_name_list=None, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + calibration_sampling_size=[1]) # generate dummy data if model_detail.get('sparse_d_shape'): @@ -402,11 +378,7 @@ def __iter__(self): q_model = fit( model=common.Model(args.model_path), conf=config, - calib_dataloader=calib_dataloader, - calib_func=None, - eval_dataloader=None, - eval_func=None, - eval_metric=None) + calib_dataloader=calib_dataloader) q_model.save(args.output_path) # benchmark diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md index 5b44c269579..40661ab8f8d 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/README.md @@ -75,26 +75,14 @@ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/wide_ ``` ### 8. Quantization Config -The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'itex' and the 'device' parameter should be set to 'gpu'. ``` config = PostTrainingQuantConfig( device="gpu", - backend="tensorflow_itex", - inputs=["new_numeric_placeholder", "new_categorical_placeholder"], - outputs=["import/head/predictions/probabilities"], - approach="static", - calibration_sampling_size=[2000], - op_type_list=None, - op_name_list={ - 'import/dnn/hiddenlayer_0/MatMul': { - 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'scheme':['asym']}, - } - }, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + backend="itex", + ... + ) ``` ### 9. Run Command @@ -102,7 +90,7 @@ config = PostTrainingQuantConfig( ```shell bash run_tuning.sh --dataset_location=/path/to/datasets --input_model=/path/to/wide_deep_fp32_pretrained_model.pb --output_model=./wnd_int8_opt.pb bash run_benchmark.sh --dataset_location=/path/to/datasets --input_model=./wnd_int8_opt.pb --mode=accuracy --batch_size=500 - bash run_benchmark.sh --dataset_location=/path/to/datasets --input_model=./wnd_int8_opt.pb --mode=benchmark --batch_size=500 + bash run_benchmark.sh --dataset_location=/path/to/datasets --input_model=./wnd_int8_opt.pb --mode=performance --batch_size=500 ``` ### Other This example takes the reference from https://github.com/IntelAI/models/tree/master/benchmarks/recommendation/tensorflow/wide_deep_large_ds. diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py index 9afdc52bf6f..a048f4e5237 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py @@ -107,6 +107,9 @@ def _parse_function(proto): dataset = dataset.prefetch(batch_size*10) return dataset +def evaluation_func(model, measurer=None): + evaluate_opt_graph.eval_inference(model) + class eval_classifier_optimized_graph: """Evaluate image classifier with optimized TensorFlow graph""" @@ -180,59 +183,35 @@ def auto_tune(self): """ from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, \ - TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed + from neural_compressor.config import PostTrainingQuantConfig, set_random_seed infer_graph = load_graph(self.args.input_graph) set_random_seed(9527) - tuning_criterion = TuningCriterion( - strategy="basic", - timeout=0, - max_trials=100, - objective="accuracy") - - tolerable_loss = AccuracyLoss(loss=0.01) - accuracy_criterion = AccuracyCriterion( - higher_is_better=True, - criterion='relative', - tolerable_loss=tolerable_loss) - config = PostTrainingQuantConfig( - device="cpu", - backend="tensorflow", inputs=["new_numeric_placeholder", "new_categorical_placeholder"], outputs=["import/head/predictions/probabilities"], - approach="static", calibration_sampling_size=[2000], - op_type_list=None, op_name_list={ 'import/dnn/hiddenlayer_0/MatMul': { 'activation': {'dtype': ['uint8'], 'algorithm': ['minmax'], 'scheme':['asym']}, } - }, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + }) if self.args.calib_data: q_model = fit( model=common.Model(infer_graph), conf=config, calib_dataloader=Dataloader(self.args.calib_data, self.args.batch_size), - calib_func=None, - eval_dataloader=None, - eval_func=self.eval_inference, - eval_metric=None) + eval_func=self.eval_inference) return q_model print("Please provide calibration dataset!") def eval_inference(self, infer_graph): print("Run inference") if isinstance(infer_graph, tf.compat.v1.GraphDef): - graph = tf.Graph() + graph = tf.Graph() with graph.as_default(): - tf.import_graph_def(infer_graph, name='') + tf.import_graph_def(infer_graph, name='') infer_graph = graph data_config = tf.compat.v1.ConfigProto() @@ -334,8 +313,12 @@ def run(self): q_model.save(self.args.output_graph) if self.args.benchmark: - infer_graph = load_graph(self.args.input_graph) - self.eval_inference(infer_graph) + from neural_compressor.benchmark import fit + from neural_compressor.config import BenchmarkConfig + conf = BenchmarkConfig(iteration=100, cores_per_instance=4, num_of_instance=7) + fit(self.args.input_graph, conf, + b_dataloader=Dataloader(self.args.eval_data, self.args.batch_size), + b_func=evaluation_func) class Dataloader(object): def __init__(self, data_location, batch_size): diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh index ae31da6513e..f50f01b4935 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh @@ -39,7 +39,7 @@ function init_params { function define_mode { if [[ ${mode} == "accuracy" ]]; then mode_cmd=" --benchmark --accuracy_only" - elif [[ ${mode} == "benchmark" ]]; then + elif [[ ${mode} == "performance" ]]; then mode_cmd=" --benchmark" else echo "Error: No such mode: ${mode}" diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md index b6f1be0d4c6..e4d95f5c5fd 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/README.md @@ -52,22 +52,14 @@ pip install --upgrade intel-extension-for-tensorflow[cpu] The calibration set is the forty images listed in brats_cal_images_list.txt. They are randomly selected from Fold 0, Fold 2, Fold 3, and Fold 4 of BraTS 2019 Training Dataset. ### 7. Quantization Config -The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'tensorflow_itex' and the 'device' parameter should be set to 'gpu'. +The Quantization Config class has default parameters setting for running on Intel CPUs. If running this example on Intel GPUs, the 'backend' parameter should be set to 'itex' and the 'device' parameter should be set to 'gpu'. ``` config = PostTrainingQuantConfig( device="gpu", - backend="tensorflow_itex", - inputs=[], - outputs=[], - approach="static", - calibration_sampling_size=[40], - op_type_list=None, - op_name_list=None, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + backend="itex", + ... + ) ``` ### 8. Test command diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py index 62ccfe03adf..0d7edb8e60c 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py @@ -133,7 +133,7 @@ def eval_func(graph): time_list.append(duration) else: predictions[i] = sess.run(output_tensor, feed_dict={input_tensor: data[np.newaxis, ...]})[0].astype(np.float32) - if args.mode == 'benchmark': + if args.mode == 'performance': latency = np.array(time_list[warmup: ]).mean() / args.batch_size print('Batch size = {}'.format(args.batch_size)) print('Latency: {:.3f} ms'.format(latency * 1000)) @@ -199,53 +199,23 @@ def __len__(self): return self.count - from neural_compressor.experimental import Quantization, common args = get_args() print(args) graph = load_graph(args.input_model) if args.mode == 'tune': from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, \ - TuningCriterion, AccuracyCriterion, AccuracyLoss, set_random_seed + from neural_compressor.config import PostTrainingQuantConfig, set_random_seed set_random_seed(9527) - - tuning_criterion = TuningCriterion( - strategy="basic", - timeout=0, - max_trials=100, - objective="accuracy") - - tolerable_loss = AccuracyLoss(loss=0.01) - - accuracy_criterion = AccuracyCriterion( - higher_is_better=True, - criterion='relative', - tolerable_loss=tolerable_loss) - - config = PostTrainingQuantConfig( - device="cpu", - backend="tensorflow", - inputs=[], - outputs=[], - approach="static", - calibration_sampling_size=[40], - op_type_list=None, - op_name_list=None, - reduce_range=None, - extra_precisions=[], - tuning_criterion=tuning_criterion, - accuracy_criterion=accuracy_criterion) + config = PostTrainingQuantConfig(calibration_sampling_size=[40]) q_model = fit( model=common.Model(graph), conf=config, calib_dataloader=common.DataLoader(CalibrationDL()), - calib_func=None, eval_dataloader=common.DataLoader(CalibrationDL()), - eval_func=eval_func, - eval_metric=None) + eval_func=eval_func) try: q_model.save(args.output_model) diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh index 9d886a3fbcb..fd466c5f8d0 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_benchmark.sh @@ -31,12 +31,6 @@ function init_params { --batch_size=*) batch_size=$(echo $var |cut -f2 -d=) ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; - --bfloat16=*) - bfloat16=$(echo ${var} |cut -f2 -d=) - ;; *) echo "Error: No such parameter: ${var}" exit 1 @@ -59,7 +53,6 @@ function run_benchmark { --input-model=${input_model} \ --data-location=${dataset_location} \ --calib-preprocess=${BUILD_DIR}/calib_preprocess \ - --iters=${iters} \ --batch-size=${batch_size} \ --mode=${mode} \ ${extra_cmd} From 52ac84a345bc165e7a910f84a1d03d17ab7e5ae4 Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Fri, 9 Dec 2022 14:20:28 +0800 Subject: [PATCH 05/14] remove oldapi examples Signed-off-by: Lv, Liang1 --- .../deeplab/quantization/ptq/README.md | 183 --------- .../quantization/ptq/datasets/__init__.py | 0 .../ptq/datasets/build_ade20k_data.py | 123 ------ .../ptq/datasets/build_cityscapes_data.py | 198 ---------- .../quantization/ptq/datasets/build_data.py | 161 -------- .../ptq/datasets/build_voc2012_data.py | 146 -------- .../ptq/datasets/convert_cityscapes.sh | 60 --- .../ptq/datasets/data_generator.py | 350 ------------------ .../ptq/datasets/data_generator_test.py | 115 ------ .../datasets/download_and_convert_ade20k.sh | 80 ---- .../datasets/download_and_convert_voc2012.sh | 92 ----- .../ptq/datasets/remove_gt_colormap.py | 83 ----- .../deeplab/quantization/ptq/deeplab.yaml | 65 ---- .../quantization/ptq/deeplab_itex.yaml | 65 ---- .../deeplab/quantization/ptq/main.py | 70 ---- .../deeplab/quantization/ptq/requirements.txt | 2 - .../deeplab/quantization/ptq/run_benchmark.sh | 40 -- .../deeplab/quantization/ptq/run_tuning.sh | 39 -- .../quantization/ptq/README.md | 150 -------- .../quantization/ptq/conf.yaml | 48 --- .../quantization/ptq/conf_itex.yaml | 48 --- .../ptq/content_images/colva_beach_sq.jpg | Bin 14235 -> 0 bytes .../ptq/content_images/golden_gate_sq.jpg | Bin 12423 -> 0 bytes .../quantization/ptq/prepare_model.py | 33 -- .../quantization/ptq/requirements.txt | 2 - .../quantization/ptq/run_benchmark.sh | 62 ---- .../quantization/ptq/run_tuning.sh | 50 --- .../ptq/style_images/kanagawa_great_wave.jpg | Bin 28352 -> 0 bytes .../ptq/style_images/zigzag_colorful.jpg | Bin 19632 -> 0 bytes .../quantization/ptq/style_tune.py | 190 ---------- 30 files changed, 2455 deletions(-) delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/README.md delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/__init__.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_ade20k_data.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_cityscapes_data.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_data.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_voc2012_data.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/convert_cityscapes.sh delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/data_generator.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/data_generator_test.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/download_and_convert_ade20k.sh delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/download_and_convert_voc2012.sh delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/remove_gt_colormap.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/deeplab.yaml delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/deeplab_itex.yaml delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/main.py delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/requirements.txt delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/run_benchmark.sh delete mode 100644 examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/run_tuning.sh delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/README.md delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/conf.yaml delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/conf_itex.yaml delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/colva_beach_sq.jpg delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/golden_gate_sq.jpg delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/prepare_model.py delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/requirements.txt delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_benchmark.sh delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_tuning.sh delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/kanagawa_great_wave.jpg delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/zigzag_colorful.jpg delete mode 100644 examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_tune.py diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/README.md b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/README.md deleted file mode 100644 index 1bca85e202b..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/README.md +++ /dev/null @@ -1,183 +0,0 @@ -Step-by-Step -============ - -This document list steps of reproducing Intel Optimized TensorFlow image recognition models tuning results via Neural Compressor. -This example can run on Intel CPUs and GPUs. - -> **Note**: -> Most of those models are both supported in Intel optimized TF 1.15.x and Intel optimized TF 2.x. -> [Version support](../../../../../../README.md#supported-frameworks) -# Prerequisite - -### 1. Installation - Recommend python 3.6 or higher version. - - ```shell - cd examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq - pip install -r requirements.txt - ``` - -### 2. Install Intel Tensorflow -```shell -pip install intel-tensorflow -``` -> Note: Supported Tensorflow [Version](../../../../../../README.md#supported-frameworks). - -### 3. Install Intel Extension for Tensorflow -#### Quantizing the model on Intel GPU -Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. - -```shell -pip install --upgrade intel-extension-for-tensorflow[gpu] -``` -For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers) - -#### Quantizing the model on Intel CPU(Experimental) -Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. - -```shell -pip install --upgrade intel-extension-for-tensorflow[cpu] -``` - -### 4. Prepare Dataset -Please use the script under the folder `datasets` to download and convert PASCAL VOC 2012 semantic segmentation dataset to TFRecord. Refer to [Running DeepLab on PASCAL VOC 2012 Semantic Segmentation Dataset](https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/pascal.md#running-deeplab-on-pascal-voc-2012-semantic-segmentation-dataset) for more details. -```shell -# From the examples/tensorflow/semantic_image_segmentation/deeplab/datasets directory. -sh download_and_convert_voc2012.sh -``` - -### 5. Prepare pre-trained model -Refer to [Export trained deeplab model to frozen inference graph](https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/export_model.md#export-trained-deeplab-model-to-frozen-inference-graph) for more details. - -1. Download the checkpoint file -```shell -wget http://download.tensorflow.org/models/deeplabv3_pascal_train_aug_2018_01_04.tar.gz -tar -xvf deeplabv3_pascal_train_aug_2018_01_04.tar.gz -``` -2. Export to a frozen graph -```shell -git clone https://github.com/tensorflow/models.git -cd models/research/ -export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim - -python deeplab/export_model.py \ - --checkpoint_path=/PATH/TO/deeplabv3_pascal_train_aug/model.ckpt \ - --export_path=/PATH/TO/deeplab_export.pb \ - --model_variant="xception_65" \ - --logtostderr \ - --eval_split="val" \ - --model_variant="xception_65" \ - --atrous_rates=6 \ - --atrous_rates=12 \ - --atrous_rates=18 \ - --output_stride=16 \ - --decoder_output_stride=4 \ - --eval_crop_size="513,513" \ - --dataset="pascal_voc_seg" -``` - -# Run -```shell -cd examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq -bash run_tuning.sh --config=deeplab.yaml --input_model=/PATH/TO/deeplab_export.pb --output_model=./nc_deeplab.pb -``` - - -Examples of enabling Intel® Neural Compressor auto tuning on Deeplab model for tensorflow -======================================================= - -This is a tutorial of how to enable deeplab model with Intel® Neural Compressor. - -# User Code Analysis - -Intel® Neural Compressor supports two usages: - -1. User specifies fp32 "model", yaml configured calibration dataloader in calibration field and evaluation dataloader in evaluation field, metric in tuning.metric field of model-specific yaml config file. - -> *Note*: -> you should change the model-specific yaml file dataset path to your own dataset path - -2. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. - -We provide Deeplab model pretrained on PASCAL VOC 2012, Using mIOU as metric which is built-in supported by Intel® Neural Compressor. - -### Write Yaml config file - -In examples directory, there is a deeplab.yaml for tuning the model on Intel CPUs. The 'framework' in the yaml is set to 'tensorflow'. If running this example on Intel GPUs, the 'framework' should be set to 'tensorflow_itex' and the device in yaml file should be set to 'gpu'. The deeplab_itex.yaml is prepared for the GPU case. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. - -```yaml -# deeplab.yaml -device: cpu # optional. default value is cpu, other value is gpu. -model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. - name: deeplab - framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 50, 100 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: - batch_size: 1 - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to calibration - transform: - ParseDecodeVoc: {} - - -evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - mIOU: - num_classes: 21 # built-in metrics are topk, map, f1, allow user to register new metric. - dataloader: - batch_size: 1 - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to evaluation dataset location if needed - transform: - ParseDecodeVoc: {} - performance: # optional. used to benchmark performance of passing model. - iteration: 100 - configs: - cores_per_instance: 4 - num_of_instance: 6 - dataloader: - batch_size: 1 - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to evaluation dataset location if needed - transform: - ParseDecodeVoc: {} - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. - - -``` - -Here we choose topk which is built-in metric and set accuracy criterion as tolerating 0.01 relative accuracy loss of baseline. The default tuning strategy is basic strategy. The timeout 0 means early stop as long as a tuning config meet accuracy target. - -### Tune - -After completed preparation steps, we just need to add below tuning part in `eval_classifier_optimized_graph` class. - -```python -from neural_compressor.experimental import Quantization, common -quantizer = Quantization(self.args.config) -quantizer.model = common.Model(self.args.input_graph) -q_model = quantizer.fit() -q_model.save(self.args.output_graph) -``` - -### Benchmark -```python -from neural_compressor.experimental import Benchmark, common -evaluator = Benchmark(self.args.config) -evaluator.model = common.Model(self.args.input_graph) -evaluator(self.args.mode) -``` - diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/__init__.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_ade20k_data.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_ade20k_data.py deleted file mode 100644 index fc04ed0db04..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_ade20k_data.py +++ /dev/null @@ -1,123 +0,0 @@ -# Lint as: python2, python3 -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Converts ADE20K data to TFRecord file format with Example protos.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import math -import os -import random -import sys -import build_data -from six.moves import range -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string( - 'train_image_folder', - './ADE20K/ADEChallengeData2016/images/training', - 'Folder containing trainng images') -tf.app.flags.DEFINE_string( - 'train_image_label_folder', - './ADE20K/ADEChallengeData2016/annotations/training', - 'Folder containing annotations for trainng images') - -tf.app.flags.DEFINE_string( - 'val_image_folder', - './ADE20K/ADEChallengeData2016/images/validation', - 'Folder containing validation images') - -tf.app.flags.DEFINE_string( - 'val_image_label_folder', - './ADE20K/ADEChallengeData2016/annotations/validation', - 'Folder containing annotations for validation') - -tf.app.flags.DEFINE_string( - 'output_dir', './ADE20K/tfrecord', - 'Path to save converted tfrecord of Tensorflow example') - -_NUM_SHARDS = 4 - - -def _convert_dataset(dataset_split, dataset_dir, dataset_label_dir): - """Converts the ADE20k dataset into into tfrecord format. - - Args: - dataset_split: Dataset split (e.g., train, val). - dataset_dir: Dir in which the dataset locates. - dataset_label_dir: Dir in which the annotations locates. - - Raises: - RuntimeError: If loaded image and label have different shape. - """ - - img_names = tf.gfile.Glob(os.path.join(dataset_dir, '*.jpg')) - random.shuffle(img_names) - seg_names = [] - for f in img_names: - # get the filename without the extension - basename = os.path.basename(f).split('.')[0] - # cover its corresponding *_seg.png - seg = os.path.join(dataset_label_dir, basename+'.png') - seg_names.append(seg) - - num_images = len(img_names) - num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) - - image_reader = build_data.ImageReader('jpeg', channels=3) - label_reader = build_data.ImageReader('png', channels=1) - - for shard_id in range(_NUM_SHARDS): - output_filename = os.path.join( - FLAGS.output_dir, - '%s-%05d-of-%05d.tfrecord' % (dataset_split, shard_id, _NUM_SHARDS)) - with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: - start_idx = shard_id * num_per_shard - end_idx = min((shard_id + 1) * num_per_shard, num_images) - for i in range(start_idx, end_idx): - sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( - i + 1, num_images, shard_id)) - sys.stdout.flush() - # Read the image. - image_filename = img_names[i] - image_data = tf.gfile.FastGFile(image_filename, 'rb').read() - height, width = image_reader.read_image_dims(image_data) - # Read the semantic segmentation annotation. - seg_filename = seg_names[i] - seg_data = tf.gfile.FastGFile(seg_filename, 'rb').read() - seg_height, seg_width = label_reader.read_image_dims(seg_data) - if height != seg_height or width != seg_width: - raise RuntimeError('Shape mismatched between image and label.') - # Convert to tf example. - example = build_data.image_seg_to_tfexample( - image_data, img_names[i], height, width, seg_data) - tfrecord_writer.write(example.SerializeToString()) - sys.stdout.write('\n') - sys.stdout.flush() - - -def main(unused_argv): - tf.gfile.MakeDirs(FLAGS.output_dir) - _convert_dataset( - 'train', FLAGS.train_image_folder, FLAGS.train_image_label_folder) - _convert_dataset('val', FLAGS.val_image_folder, FLAGS.val_image_label_folder) - - -if __name__ == '__main__': - tf.app.run() diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_cityscapes_data.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_cityscapes_data.py deleted file mode 100644 index 53c11e30310..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_cityscapes_data.py +++ /dev/null @@ -1,198 +0,0 @@ -# Lint as: python2, python3 -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Converts Cityscapes data to TFRecord file format with Example protos. - -The Cityscapes dataset is expected to have the following directory structure: - - + cityscapes - - build_cityscapes_data.py (current working directiory). - - build_data.py - + cityscapesscripts - + annotation - + evaluation - + helpers - + preparation - + viewer - + gtFine - + train - + val - + test - + leftImg8bit - + train - + val - + test - + tfrecord - -This script converts data into sharded data files and save at tfrecord folder. - -Note that before running this script, the users should (1) register the -Cityscapes dataset website at https://www.cityscapes-dataset.com to -download the dataset, and (2) run the script provided by Cityscapes -`preparation/createTrainIdLabelImgs.py` to generate the training groundtruth. - -Also note that the tensorflow model will be trained with `TrainId' instead -of `EvalId' used on the evaluation server. Thus, the users need to convert -the predicted labels to `EvalId` for evaluation on the server. See the -vis.py for more details. - -The Example proto contains the following fields: - - image/encoded: encoded image content. - image/filename: image filename. - image/format: image file format. - image/height: image height. - image/width: image width. - image/channels: image channels. - image/segmentation/class/encoded: encoded semantic segmentation content. - image/segmentation/class/format: semantic segmentation file format. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import glob -import math -import os.path -import re -import sys -import build_data -from six.moves import range -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string('cityscapes_root', - './cityscapes', - 'Cityscapes dataset root folder.') - -tf.app.flags.DEFINE_string( - 'output_dir', - './tfrecord', - 'Path to save converted SSTable of TensorFlow examples.') - - -_NUM_SHARDS = 10 - -# A map from data type to folder name that saves the data. -_FOLDERS_MAP = { - 'image': 'leftImg8bit', - 'label': 'gtFine', -} - -# A map from data type to filename postfix. -_POSTFIX_MAP = { - 'image': '_leftImg8bit', - 'label': '_gtFine_labelTrainIds', -} - -# A map from data type to data format. -_DATA_FORMAT_MAP = { - 'image': 'png', - 'label': 'png', -} - -# Image file pattern. -_IMAGE_FILENAME_RE = re.compile('(.+)' + _POSTFIX_MAP['image']) - - -def _get_files(data, dataset_split): - """Gets files for the specified data type and dataset split. - - Args: - data: String, desired data ('image' or 'label'). - dataset_split: String, dataset split ('train_fine', 'val_fine', 'test_fine') - - Returns: - A list of sorted file names or None when getting label for - test set. - """ - if dataset_split == 'train_fine': - split_dir = 'train' - elif dataset_split == 'val_fine': - split_dir = 'val' - elif dataset_split == 'test_fine': - split_dir = 'test' - else: - raise RuntimeError("Split {} is not supported".format(dataset_split)) - pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data]) - search_files = os.path.join( - FLAGS.cityscapes_root, _FOLDERS_MAP[data], split_dir, '*', pattern) - filenames = glob.glob(search_files) - return sorted(filenames) - - -def _convert_dataset(dataset_split): - """Converts the specified dataset split to TFRecord format. - - Args: - dataset_split: The dataset split (e.g., train_fine, val_fine). - - Raises: - RuntimeError: If loaded image and label have different shape, or if the - image file with specified postfix could not be found. - """ - image_files = _get_files('image', dataset_split) - label_files = _get_files('label', dataset_split) - - num_images = len(image_files) - num_labels = len(label_files) - num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) - - if num_images != num_labels: - raise RuntimeError("The number of images and labels doesn't match: {} {}".format(num_images, num_labels)) - - image_reader = build_data.ImageReader('png', channels=3) - label_reader = build_data.ImageReader('png', channels=1) - - for shard_id in range(_NUM_SHARDS): - shard_filename = '%s-%05d-of-%05d.tfrecord' % ( - dataset_split, shard_id, _NUM_SHARDS) - output_filename = os.path.join(FLAGS.output_dir, shard_filename) - with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: - start_idx = shard_id * num_per_shard - end_idx = min((shard_id + 1) * num_per_shard, num_images) - for i in range(start_idx, end_idx): - sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( - i + 1, num_images, shard_id)) - sys.stdout.flush() - # Read the image. - image_data = tf.gfile.FastGFile(image_files[i], 'rb').read() - height, width = image_reader.read_image_dims(image_data) - # Read the semantic segmentation annotation. - seg_data = tf.gfile.FastGFile(label_files[i], 'rb').read() - seg_height, seg_width = label_reader.read_image_dims(seg_data) - if height != seg_height or width != seg_width: - raise RuntimeError('Shape mismatched between image and label.') - # Convert to tf example. - re_match = _IMAGE_FILENAME_RE.search(image_files[i]) - if re_match is None: - raise RuntimeError('Invalid image filename: ' + image_files[i]) - filename = os.path.basename(re_match.group(1)) - example = build_data.image_seg_to_tfexample( - image_data, filename, height, width, seg_data) - tfrecord_writer.write(example.SerializeToString()) - sys.stdout.write('\n') - sys.stdout.flush() - - -def main(unused_argv): - # Only support converting 'train_fine', 'val_fine' and 'test_fine' sets for now. - for dataset_split in ['train_fine', 'val_fine', 'test_fine']: - _convert_dataset(dataset_split) - - -if __name__ == '__main__': - tf.app.run() diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_data.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_data.py deleted file mode 100644 index 45628674dbf..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_data.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Contains common utility functions and classes for building dataset. - -This script contains utility functions and classes to converts dataset to -TFRecord file format with Example protos. - -The Example proto contains the following fields: - - image/encoded: encoded image content. - image/filename: image filename. - image/format: image file format. - image/height: image height. - image/width: image width. - image/channels: image channels. - image/segmentation/class/encoded: encoded semantic segmentation content. - image/segmentation/class/format: semantic segmentation file format. -""" -import collections -import six -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_enum('image_format', 'png', ['jpg', 'jpeg', 'png'], - 'Image format.') - -tf.app.flags.DEFINE_enum('label_format', 'png', ['png'], - 'Segmentation label format.') - -# A map from image format to expected data format. -_IMAGE_FORMAT_MAP = { - 'jpg': 'jpeg', - 'jpeg': 'jpeg', - 'png': 'png', -} - - -class ImageReader(object): - """Helper class that provides TensorFlow image coding utilities.""" - - def __init__(self, image_format='jpeg', channels=3): - """Class constructor. - - Args: - image_format: Image format. Only 'jpeg', 'jpg', or 'png' are supported. - channels: Image channels. - """ - with tf.Graph().as_default(): - self._decode_data = tf.placeholder(dtype=tf.string) - self._image_format = image_format - self._session = tf.Session() - if self._image_format in ('jpeg', 'jpg'): - self._decode = tf.image.decode_jpeg(self._decode_data, - channels=channels) - elif self._image_format == 'png': - self._decode = tf.image.decode_png(self._decode_data, - channels=channels) - - def read_image_dims(self, image_data): - """Reads the image dimensions. - - Args: - image_data: string of image data. - - Returns: - image_height and image_width. - """ - image = self.decode_image(image_data) - return image.shape[:2] - - def decode_image(self, image_data): - """Decodes the image data string. - - Args: - image_data: string of image data. - - Returns: - Decoded image data. - - Raises: - ValueError: Value of image channels not supported. - """ - image = self._session.run(self._decode, - feed_dict={self._decode_data: image_data}) - if len(image.shape) != 3 or image.shape[2] not in (1, 3): - raise ValueError('The image channels not supported.') - - return image - - -def _int64_list_feature(values): - """Returns a TF-Feature of int64_list. - - Args: - values: A scalar or list of values. - - Returns: - A TF-Feature. - """ - if not isinstance(values, collections.Iterable): - values = [values] - - return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) - - -def _bytes_list_feature(values): - """Returns a TF-Feature of bytes. - - Args: - values: A string. - - Returns: - A TF-Feature. - """ - def norm2bytes(value): - return value.encode() if isinstance(value, str) and six.PY3 else value - - return tf.train.Feature( - bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) - - -def image_seg_to_tfexample(image_data, filename, height, width, seg_data): - """Converts one image/segmentation pair to tf example. - - Args: - image_data: string of image data. - filename: image filename. - height: image height. - width: image width. - seg_data: string of semantic segmentation data. - - Returns: - tf example of one image/segmentation pair. - """ - return tf.train.Example(features=tf.train.Features(feature={ - 'image/encoded': _bytes_list_feature(image_data), - 'image/filename': _bytes_list_feature(filename), - 'image/format': _bytes_list_feature( - _IMAGE_FORMAT_MAP[FLAGS.image_format]), - 'image/height': _int64_list_feature(height), - 'image/width': _int64_list_feature(width), - 'image/channels': _int64_list_feature(3), - 'image/segmentation/class/encoded': ( - _bytes_list_feature(seg_data)), - 'image/segmentation/class/format': _bytes_list_feature( - FLAGS.label_format), - })) diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_voc2012_data.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_voc2012_data.py deleted file mode 100644 index f0bdecb6a0f..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/build_voc2012_data.py +++ /dev/null @@ -1,146 +0,0 @@ -# Lint as: python2, python3 -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Converts PASCAL VOC 2012 data to TFRecord file format with Example protos. - -PASCAL VOC 2012 dataset is expected to have the following directory structure: - - + pascal_voc_seg - - build_data.py - - build_voc2012_data.py (current working directory). - + VOCdevkit - + VOC2012 - + JPEGImages - + SegmentationClass - + ImageSets - + Segmentation - + tfrecord - -Image folder: - ./VOCdevkit/VOC2012/JPEGImages - -Semantic segmentation annotations: - ./VOCdevkit/VOC2012/SegmentationClass - -list folder: - ./VOCdevkit/VOC2012/ImageSets/Segmentation - -This script converts data into sharded data files and save at tfrecord folder. - -The Example proto contains the following fields: - - image/encoded: encoded image content. - image/filename: image filename. - image/format: image file format. - image/height: image height. - image/width: image width. - image/channels: image channels. - image/segmentation/class/encoded: encoded semantic segmentation content. - image/segmentation/class/format: semantic segmentation file format. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import math -import os.path -import sys -import build_data -from six.moves import range -import tensorflow as tf - -FLAGS = tf.app.flags.FLAGS - -tf.app.flags.DEFINE_string('image_folder', - './VOCdevkit/VOC2012/JPEGImages', - 'Folder containing images.') - -tf.app.flags.DEFINE_string( - 'semantic_segmentation_folder', - './VOCdevkit/VOC2012/SegmentationClassRaw', - 'Folder containing semantic segmentation annotations.') - -tf.app.flags.DEFINE_string( - 'list_folder', - './VOCdevkit/VOC2012/ImageSets/Segmentation', - 'Folder containing lists for training and validation') - -tf.app.flags.DEFINE_string( - 'output_dir', - './tfrecord', - 'Path to save converted SSTable of TensorFlow examples.') - - -_NUM_SHARDS = 4 - - -def _convert_dataset(dataset_split): - """Converts the specified dataset split to TFRecord format. - - Args: - dataset_split: The dataset split (e.g., train, test). - - Raises: - RuntimeError: If loaded image and label have different shape. - """ - dataset = os.path.basename(dataset_split)[:-4] - sys.stdout.write('Processing ' + dataset) - filenames = [x.strip('\n') for x in open(dataset_split, 'r')] - num_images = len(filenames) - num_per_shard = int(math.ceil(num_images / _NUM_SHARDS)) - - image_reader = build_data.ImageReader('jpeg', channels=3) - label_reader = build_data.ImageReader('png', channels=1) - - for shard_id in range(_NUM_SHARDS): - output_filename = os.path.join( - FLAGS.output_dir, - '%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS)) - with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: - start_idx = shard_id * num_per_shard - end_idx = min((shard_id + 1) * num_per_shard, num_images) - for i in range(start_idx, end_idx): - sys.stdout.write('\r>> Converting image %d/%d shard %d' % ( - i + 1, len(filenames), shard_id)) - sys.stdout.flush() - # Read the image. - image_filename = os.path.join( - FLAGS.image_folder, filenames[i] + '.' + FLAGS.image_format) - image_data = tf.gfile.GFile(image_filename, 'rb').read() - height, width = image_reader.read_image_dims(image_data) - # Read the semantic segmentation annotation. - seg_filename = os.path.join( - FLAGS.semantic_segmentation_folder, - filenames[i] + '.' + FLAGS.label_format) - seg_data = tf.gfile.GFile(seg_filename, 'rb').read() - seg_height, seg_width = label_reader.read_image_dims(seg_data) - if height != seg_height or width != seg_width: - raise RuntimeError('Shape mismatched between image and label.') - # Convert to tf example. - example = build_data.image_seg_to_tfexample( - image_data, filenames[i], height, width, seg_data) - tfrecord_writer.write(example.SerializeToString()) - sys.stdout.write('\n') - sys.stdout.flush() - - -def main(unused_argv): - dataset_splits = tf.gfile.Glob(os.path.join(FLAGS.list_folder, '*.txt')) - for dataset_split in dataset_splits: - _convert_dataset(dataset_split) - - -if __name__ == '__main__': - tf.app.run() diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/convert_cityscapes.sh b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/convert_cityscapes.sh deleted file mode 100644 index ddc39fb11dd..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/convert_cityscapes.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# Script to preprocess the Cityscapes dataset. Note (1) the users should -# register the Cityscapes dataset website at -# https://www.cityscapes-dataset.com/downloads/ to download the dataset, -# and (2) the users should download the utility scripts provided by -# Cityscapes at https://github.com/mcordts/cityscapesScripts. -# -# Usage: -# bash ./convert_cityscapes.sh -# -# The folder structure is assumed to be: -# + datasets -# - build_cityscapes_data.py -# - convert_cityscapes.sh -# + cityscapes -# + cityscapesscripts (downloaded scripts) -# + gtFine -# + leftImg8bit -# - -# Exit immediately if a command exits with a non-zero status. -set -e - -CURRENT_DIR=$(pwd) -WORK_DIR="." - -# Root path for Cityscapes dataset. -CITYSCAPES_ROOT="${WORK_DIR}/cityscapes" - -export PYTHONPATH="${CITYSCAPES_ROOT}:${PYTHONPATH}" - -# Create training labels. -python "${CITYSCAPES_ROOT}/cityscapesscripts/preparation/createTrainIdLabelImgs.py" - -# Build TFRecords of the dataset. -# First, create output directory for storing TFRecords. -OUTPUT_DIR="${CITYSCAPES_ROOT}/tfrecord" -mkdir -p "${OUTPUT_DIR}" - -BUILD_SCRIPT="${CURRENT_DIR}/build_cityscapes_data.py" - -echo "Converting Cityscapes dataset..." -python "${BUILD_SCRIPT}" \ - --cityscapes_root="${CITYSCAPES_ROOT}" \ - --output_dir="${OUTPUT_DIR}" \ diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/data_generator.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/data_generator.py deleted file mode 100644 index d84e66f9c48..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/data_generator.py +++ /dev/null @@ -1,350 +0,0 @@ -# Lint as: python2, python3 -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Wrapper for providing semantic segmentaion data. - -The SegmentationDataset class provides both images and annotations (semantic -segmentation and/or instance segmentation) for TensorFlow. Currently, we -support the following datasets: - -1. PASCAL VOC 2012 (http://host.robots.ox.ac.uk/pascal/VOC/voc2012/). - -PASCAL VOC 2012 semantic segmentation dataset annotates 20 foreground objects -(e.g., bike, person, and so on) and leaves all the other semantic classes as -one background class. The dataset contains 1464, 1449, and 1456 annotated -images for the training, validation and test respectively. - -2. Cityscapes dataset (https://www.cityscapes-dataset.com) - -The Cityscapes dataset contains 19 semantic labels (such as road, person, car, -and so on) for urban street scenes. - -3. ADE20K dataset (http://groups.csail.mit.edu/vision/datasets/ADE20K) - -The ADE20K dataset contains 150 semantic labels both urban street scenes and -indoor scenes. - -References: - M. Everingham, S. M. A. Eslami, L. V. Gool, C. K. I. Williams, J. Winn, - and A. Zisserman, The pascal visual object classes challenge a retrospective. - IJCV, 2014. - - M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson, - U. Franke, S. Roth, and B. Schiele, "The cityscapes dataset for semantic urban - scene understanding," In Proc. of CVPR, 2016. - - B. Zhou, H. Zhao, X. Puig, S. Fidler, A. Barriuso, A. Torralba, "Scene Parsing - through ADE20K dataset", In Proc. of CVPR, 2017. -""" - -import collections -import os -import tensorflow as tf -from deeplab import common -from deeplab import input_preprocess - -# Named tuple to describe the dataset properties. -DatasetDescriptor = collections.namedtuple( - 'DatasetDescriptor', - [ - 'splits_to_sizes', # Splits of the dataset into training, val and test. - 'num_classes', # Number of semantic classes, including the - # background class (if exists). For example, there - # are 20 foreground classes + 1 background class in - # the PASCAL VOC 2012 dataset. Thus, we set - # num_classes=21. - 'ignore_label', # Ignore label value. - ]) - -_CITYSCAPES_INFORMATION = DatasetDescriptor( - splits_to_sizes={'train_fine': 2975, - 'train_coarse': 22973, - 'trainval_fine': 3475, - 'trainval_coarse': 23473, - 'val_fine': 500, - 'test_fine': 1525}, - num_classes=19, - ignore_label=255, -) - -_PASCAL_VOC_SEG_INFORMATION = DatasetDescriptor( - splits_to_sizes={ - 'train': 1464, - 'train_aug': 10582, - 'trainval': 2913, - 'val': 1449, - }, - num_classes=21, - ignore_label=255, -) - -_ADE20K_INFORMATION = DatasetDescriptor( - splits_to_sizes={ - 'train': 20210, # num of samples in images/training - 'val': 2000, # num of samples in images/validation - }, - num_classes=151, - ignore_label=0, -) - -_DATASETS_INFORMATION = { - 'cityscapes': _CITYSCAPES_INFORMATION, - 'pascal_voc_seg': _PASCAL_VOC_SEG_INFORMATION, - 'ade20k': _ADE20K_INFORMATION, -} - -# Default file pattern of TFRecord of TensorFlow Example. -_FILE_PATTERN = '%s-*' - - -def get_cityscapes_dataset_name(): - return 'cityscapes' - - -class Dataset(object): - """Represents input dataset for deeplab model.""" - - def __init__(self, - dataset_name, - split_name, - dataset_dir, - batch_size, - crop_size, - min_resize_value=None, - max_resize_value=None, - resize_factor=None, - min_scale_factor=1., - max_scale_factor=1., - scale_factor_step_size=0, - model_variant=None, - num_readers=1, - is_training=False, - should_shuffle=False, - should_repeat=False): - """Initializes the dataset. - - Args: - dataset_name: Dataset name. - split_name: A train/val Split name. - dataset_dir: The directory of the dataset sources. - batch_size: Batch size. - crop_size: The size used to crop the image and label. - min_resize_value: Desired size of the smaller image side. - max_resize_value: Maximum allowed size of the larger image side. - resize_factor: Resized dimensions are multiple of factor plus one. - min_scale_factor: Minimum scale factor value. - max_scale_factor: Maximum scale factor value. - scale_factor_step_size: The step size from min scale factor to max scale - factor. The input is randomly scaled based on the value of - (min_scale_factor, max_scale_factor, scale_factor_step_size). - model_variant: Model variant (string) for choosing how to mean-subtract - the images. See feature_extractor.network_map for supported model - variants. - num_readers: Number of readers for data provider. - is_training: Boolean, if dataset is for training or not. - should_shuffle: Boolean, if should shuffle the input data. - should_repeat: Boolean, if should repeat the input data. - - Raises: - ValueError: Dataset name and split name are not supported. - """ - if dataset_name not in _DATASETS_INFORMATION: - raise ValueError('The specified dataset is not supported yet.') - self.dataset_name = dataset_name - - splits_to_sizes = _DATASETS_INFORMATION[dataset_name].splits_to_sizes - - if split_name not in splits_to_sizes: - raise ValueError('data split name %s not recognized' % split_name) - - if model_variant is None: - tf.logging.warning('Please specify a model_variant. See ' - 'feature_extractor.network_map for supported model ' - 'variants.') - - self.split_name = split_name - self.dataset_dir = dataset_dir - self.batch_size = batch_size - self.crop_size = crop_size - self.min_resize_value = min_resize_value - self.max_resize_value = max_resize_value - self.resize_factor = resize_factor - self.min_scale_factor = min_scale_factor - self.max_scale_factor = max_scale_factor - self.scale_factor_step_size = scale_factor_step_size - self.model_variant = model_variant - self.num_readers = num_readers - self.is_training = is_training - self.should_shuffle = should_shuffle - self.should_repeat = should_repeat - - self.num_of_classes = _DATASETS_INFORMATION[self.dataset_name].num_classes - self.ignore_label = _DATASETS_INFORMATION[self.dataset_name].ignore_label - - def _parse_function(self, example_proto): - """Function to parse the example proto. - - Args: - example_proto: Proto in the format of tf.Example. - - Returns: - A dictionary with parsed image, label, height, width and image name. - - Raises: - ValueError: Label is of wrong shape. - """ - - # Currently only supports jpeg and png. - # Need to use this logic because the shape is not known for - # tf.image.decode_image and we rely on this info to - # extend label if necessary. - def _decode_image(content, channels): - return tf.cond( - tf.image.is_jpeg(content), - lambda: tf.image.decode_jpeg(content, channels), - lambda: tf.image.decode_png(content, channels)) - - features = { - 'image/encoded': - tf.FixedLenFeature((), tf.string, default_value=''), - 'image/filename': - tf.FixedLenFeature((), tf.string, default_value=''), - 'image/format': - tf.FixedLenFeature((), tf.string, default_value='jpeg'), - 'image/height': - tf.FixedLenFeature((), tf.int64, default_value=0), - 'image/width': - tf.FixedLenFeature((), tf.int64, default_value=0), - 'image/segmentation/class/encoded': - tf.FixedLenFeature((), tf.string, default_value=''), - 'image/segmentation/class/format': - tf.FixedLenFeature((), tf.string, default_value='png'), - } - - parsed_features = tf.parse_single_example(example_proto, features) - - image = _decode_image(parsed_features['image/encoded'], channels=3) - - label = None - if self.split_name != common.TEST_SET: - label = _decode_image( - parsed_features['image/segmentation/class/encoded'], channels=1) - - image_name = parsed_features['image/filename'] - if image_name is None: - image_name = tf.constant('') - - sample = { - common.IMAGE: image, - common.IMAGE_NAME: image_name, - common.HEIGHT: parsed_features['image/height'], - common.WIDTH: parsed_features['image/width'], - } - - if label is not None: - if label.get_shape().ndims == 2: - label = tf.expand_dims(label, 2) - elif label.get_shape().ndims == 3 and label.shape.dims[2] == 1: - pass - else: - raise ValueError('Input label shape must be [height, width], or ' - '[height, width, 1].') - - label.set_shape([None, None, 1]) - - sample[common.LABELS_CLASS] = label - - return sample - - def _preprocess_image(self, sample): - """Preprocesses the image and label. - - Args: - sample: A sample containing image and label. - - Returns: - sample: Sample with preprocessed image and label. - - Raises: - ValueError: Ground truth label not provided during training. - """ - image = sample[common.IMAGE] - label = sample[common.LABELS_CLASS] - - original_image, image, label = input_preprocess.preprocess_image_and_label( - image=image, - label=label, - crop_height=self.crop_size[0], - crop_width=self.crop_size[1], - min_resize_value=self.min_resize_value, - max_resize_value=self.max_resize_value, - resize_factor=self.resize_factor, - min_scale_factor=self.min_scale_factor, - max_scale_factor=self.max_scale_factor, - scale_factor_step_size=self.scale_factor_step_size, - ignore_label=self.ignore_label, - is_training=self.is_training, - model_variant=self.model_variant) - - sample[common.IMAGE] = image - - if not self.is_training: - # Original image is only used during visualization. - sample[common.ORIGINAL_IMAGE] = original_image - - if label is not None: - sample[common.LABEL] = label - - # Remove common.LABEL_CLASS key in the sample since it is only used to - # derive label and not used in training and evaluation. - sample.pop(common.LABELS_CLASS, None) - - return sample - - def get_one_shot_iterator(self): - """Gets an iterator that iterates across the dataset once. - - Returns: - An iterator of type tf.data.Iterator. - """ - - files = self._get_all_files() - - dataset = ( - tf.data.TFRecordDataset(files, num_parallel_reads=self.num_readers) - .map(self._parse_function, num_parallel_calls=self.num_readers) - .map(self._preprocess_image, num_parallel_calls=self.num_readers)) - - if self.should_shuffle: - dataset = dataset.shuffle(buffer_size=100) - - if self.should_repeat: - dataset = dataset.repeat() # Repeat forever for training. - else: - dataset = dataset.repeat(1) - - dataset = dataset.batch(self.batch_size).prefetch(self.batch_size) - return dataset.make_one_shot_iterator() - - def _get_all_files(self): - """Gets all the files to read data from. - - Returns: - A list of input files. - """ - file_pattern = _FILE_PATTERN - file_pattern = os.path.join(self.dataset_dir, - file_pattern % self.split_name) - return tf.gfile.Glob(file_pattern) diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/data_generator_test.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/data_generator_test.py deleted file mode 100644 index f4425d01da0..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/data_generator_test.py +++ /dev/null @@ -1,115 +0,0 @@ -# Lint as: python2, python3 -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for deeplab.datasets.data_generator.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - -from six.moves import range -import tensorflow as tf - -from deeplab import common -from deeplab.datasets import data_generator - -ImageAttributes = collections.namedtuple( - 'ImageAttributes', ['image', 'label', 'height', 'width', 'image_name']) - - -class DatasetTest(tf.test.TestCase): - - # Note: training dataset cannot be tested since there is shuffle operation. - # When disabling the shuffle, training dataset is operated same as validation - # dataset. Therefore it is not tested again. - def testPascalVocSegTestData(self): - dataset = data_generator.Dataset( - dataset_name='pascal_voc_seg', - split_name='val', - dataset_dir= - 'deeplab/testing/pascal_voc_seg', - batch_size=1, - crop_size=[3, 3], # Use small size for testing. - min_resize_value=3, - max_resize_value=3, - resize_factor=None, - min_scale_factor=0.01, - max_scale_factor=2.0, - scale_factor_step_size=0.25, - is_training=False, - model_variant='mobilenet_v2') - - self.assertAllEqual(dataset.num_of_classes, 21) - self.assertAllEqual(dataset.ignore_label, 255) - - num_of_images = 3 - with self.test_session() as sess: - iterator = dataset.get_one_shot_iterator() - - for i in range(num_of_images): - batch = iterator.get_next() - batch, = sess.run([batch]) - image_attributes = _get_attributes_of_image(i) - self.assertEqual(batch[common.HEIGHT][0], image_attributes.height) - self.assertEqual(batch[common.WIDTH][0], image_attributes.width) - self.assertEqual(batch[common.IMAGE_NAME][0], - image_attributes.image_name.encode()) - - # All data have been read. - with self.assertRaisesRegexp(tf.errors.OutOfRangeError, ''): - sess.run([iterator.get_next()]) - - -def _get_attributes_of_image(index): - """Gets the attributes of the image. - - Args: - index: Index of image in all images. - - Returns: - Attributes of the image in the format of ImageAttributes. - - Raises: - ValueError: If index is of wrong value. - """ - if index == 0: - return ImageAttributes( - image=None, - label=None, - height=366, - width=500, - image_name='2007_000033') - elif index == 1: - return ImageAttributes( - image=None, - label=None, - height=335, - width=500, - image_name='2007_000042') - elif index == 2: - return ImageAttributes( - image=None, - label=None, - height=333, - width=500, - image_name='2007_000061') - else: - raise ValueError('Index can only be 0, 1 or 2.') - - -if __name__ == '__main__': - tf.test.main() diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/download_and_convert_ade20k.sh b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/download_and_convert_ade20k.sh deleted file mode 100644 index 3614ae42c16..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/download_and_convert_ade20k.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# Script to download and preprocess the ADE20K dataset. -# -# Usage: -# bash ./download_and_convert_ade20k.sh -# -# The folder structure is assumed to be: -# + datasets -# - build_data.py -# - build_ade20k_data.py -# - download_and_convert_ade20k.sh -# + ADE20K -# + tfrecord -# + ADEChallengeData2016 -# + annotations -# + training -# + validation -# + images -# + training -# + validation - -# Exit immediately if a command exits with a non-zero status. -set -e - -CURRENT_DIR=$(pwd) -WORK_DIR="./ADE20K" -mkdir -p "${WORK_DIR}" -cd "${WORK_DIR}" - -# Helper function to download and unpack ADE20K dataset. -download_and_uncompress() { - local BASE_URL=${1} - local FILENAME=${2} - - if [ ! -f "${FILENAME}" ]; then - echo "Downloading ${FILENAME} to ${WORK_DIR}" - wget -nd -c "${BASE_URL}/${FILENAME}" - fi - echo "Uncompressing ${FILENAME}" - unzip "${FILENAME}" -} - -# Download the images. -BASE_URL="http://data.csail.mit.edu/places/ADEchallenge" -FILENAME="ADEChallengeData2016.zip" - -download_and_uncompress "${BASE_URL}" "${FILENAME}" - -cd "${CURRENT_DIR}" - -# Root path for ADE20K dataset. -ADE20K_ROOT="${WORK_DIR}/ADEChallengeData2016" - -# Build TFRecords of the dataset. -# First, create output directory for storing TFRecords. -OUTPUT_DIR="${WORK_DIR}/tfrecord" -mkdir -p "${OUTPUT_DIR}" - -echo "Converting ADE20K dataset..." -python ./build_ade20k_data.py \ - --train_image_folder="${ADE20K_ROOT}/images/training/" \ - --train_image_label_folder="${ADE20K_ROOT}/annotations/training/" \ - --val_image_folder="${ADE20K_ROOT}/images/validation/" \ - --val_image_label_folder="${ADE20K_ROOT}/annotations/validation/" \ - --output_dir="${OUTPUT_DIR}" diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/download_and_convert_voc2012.sh b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/download_and_convert_voc2012.sh deleted file mode 100644 index 3126f729dec..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/download_and_convert_voc2012.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# -# Script to download and preprocess the PASCAL VOC 2012 dataset. -# -# Usage: -# bash ./download_and_convert_voc2012.sh -# -# The folder structure is assumed to be: -# + datasets -# - build_data.py -# - build_voc2012_data.py -# - download_and_convert_voc2012.sh -# - remove_gt_colormap.py -# + pascal_voc_seg -# + VOCdevkit -# + VOC2012 -# + JPEGImages -# + SegmentationClass -# - -# Exit immediately if a command exits with a non-zero status. -set -e - -CURRENT_DIR=$(pwd) -WORK_DIR="./pascal_voc_seg" -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -mkdir -p "${WORK_DIR}" -cd "${WORK_DIR}" - -# Helper function to download and unpack VOC 2012 dataset. -download_and_uncompress() { - local BASE_URL=${1} - local FILENAME=${2} - - if [ ! -f "${FILENAME}" ]; then - echo "Downloading ${FILENAME} to ${WORK_DIR}" - wget -nd -c "${BASE_URL}/${FILENAME}" - fi - echo "Uncompressing ${FILENAME}" - sudo apt install unzip - unzip "${FILENAME}" -} - -# Download the images. -BASE_URL="https://data.deepai.org/" -FILENAME="PascalVOC2012.zip" - -download_and_uncompress "${BASE_URL}" "${FILENAME}" - -cd "${CURRENT_DIR}" - -# Root path for PASCAL VOC 2012 dataset. -PASCAL_ROOT="${WORK_DIR}/VOC2012" - -# Remove the colormap in the ground truth annotations. -SEG_FOLDER="${PASCAL_ROOT}/SegmentationClass" -SEMANTIC_SEG_FOLDER="${PASCAL_ROOT}/SegmentationClassRaw" - -echo "Removing the color map in ground truth annotations..." -python3 "${SCRIPT_DIR}/remove_gt_colormap.py" \ - --original_gt_folder="${SEG_FOLDER}" \ - --output_dir="${SEMANTIC_SEG_FOLDER}" - -# Build TFRecords of the dataset. -# First, create output directory for storing TFRecords. -OUTPUT_DIR="${WORK_DIR}/tfrecord" -mkdir -p "${OUTPUT_DIR}" - -IMAGE_FOLDER="${PASCAL_ROOT}/JPEGImages" -LIST_FOLDER="${PASCAL_ROOT}/ImageSets/Segmentation" - -echo "Converting PASCAL VOC 2012 dataset..." -python3 "${SCRIPT_DIR}/build_voc2012_data.py" \ - --image_folder="${IMAGE_FOLDER}" \ - --semantic_segmentation_folder="${SEMANTIC_SEG_FOLDER}" \ - --list_folder="${LIST_FOLDER}" \ - --image_format="jpg" \ - --output_dir="${OUTPUT_DIR}" diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/remove_gt_colormap.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/remove_gt_colormap.py deleted file mode 100644 index 900570038ed..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/datasets/remove_gt_colormap.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Removes the color map from segmentation annotations. - -Removes the color map from the ground truth segmentation annotations and save -the results to output_dir. -""" -import glob -import os.path -import numpy as np - -from PIL import Image - -import tensorflow as tf - -FLAGS = tf.compat.v1.flags.FLAGS - -tf.compat.v1.flags.DEFINE_string('original_gt_folder', - './VOCdevkit/VOC2012/SegmentationClass', - 'Original ground truth annotations.') - -tf.compat.v1.flags.DEFINE_string('segmentation_format', 'png', 'Segmentation format.') - -tf.compat.v1.flags.DEFINE_string('output_dir', - './VOCdevkit/VOC2012/SegmentationClassRaw', - 'folder to save modified ground truth annotations.') - - -def _remove_colormap(filename): - """Removes the color map from the annotation. - - Args: - filename: Ground truth annotation filename. - - Returns: - Annotation without color map. - """ - return np.array(Image.open(filename)) - - -def _save_annotation(annotation, filename): - """Saves the annotation as png file. - - Args: - annotation: Segmentation annotation. - filename: Output filename. - """ - pil_image = Image.fromarray(annotation.astype(dtype=np.uint8)) - with tf.io.gfile.GFile(filename, mode='w') as f: - pil_image.save(f, 'PNG') - - -def main(unused_argv): - # Create the output directory if not exists. - if not tf.io.gfile.isdir(FLAGS.output_dir): - tf.io.gfile.makedirs(FLAGS.output_dir) - - annotations = glob.glob(os.path.join(FLAGS.original_gt_folder, - '*.' + FLAGS.segmentation_format)) - for annotation in annotations: - raw_annotation = _remove_colormap(annotation) - filename = os.path.basename(annotation)[:-4] - _save_annotation(raw_annotation, - os.path.join( - FLAGS.output_dir, - filename + '.' + FLAGS.segmentation_format)) - - -if __name__ == '__main__': - tf.compat.v1.app.run() diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/deeplab.yaml b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/deeplab.yaml deleted file mode 100644 index 98b362456f5..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/deeplab.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -device: cpu # optional. default value is cpu, other value is gpu. - -model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. - name: deeplab - framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: ImageTensor - outputs: SemanticPredictions - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 50, 100 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to calibration dataset location if needed - transform: - ParseDecodeVoc: {} - - -evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - mIOU: - num_classes: 21 # built-in metrics are topk, map, f1, allow user to register new metric. - dataloader: - batch_size: 1 - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to evaluation dataset location if needed - transform: - ParseDecodeVoc: {} - performance: # optional. used to benchmark performance of passing model. - iteration: 100 - configs: - cores_per_instance: 4 - num_of_instance: 6 - dataloader: - batch_size: 1 - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to evaluation dataset location if needed - transform: - ParseDecodeVoc: {} - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/deeplab_itex.yaml b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/deeplab_itex.yaml deleted file mode 100644 index 5f5ae67361d..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/deeplab_itex.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. - name: deeplab - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: ImageTensor - outputs: SemanticPredictions - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 50, 100 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to calibration dataset location if needed - transform: - ParseDecodeVoc: {} - - -evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - mIOU: - num_classes: 21 # built-in metrics are topk, map, f1, allow user to register new metric. - dataloader: - batch_size: 1 - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to evaluation dataset location if needed - transform: - ParseDecodeVoc: {} - performance: # optional. used to benchmark performance of passing model. - iteration: 100 - configs: - cores_per_instance: 4 - num_of_instance: 6 - dataloader: - batch_size: 1 - dataset: - VOCRecord: - root: /path/to/pascal_voc_seg/tfrecord # NOTE: modify to evaluation dataset location if needed - transform: - ParseDecodeVoc: {} - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/main.py b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/main.py deleted file mode 100644 index 7628672b5b9..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/main.py +++ /dev/null @@ -1,70 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import time -import numpy as np -from argparse import ArgumentParser -import tensorflow as tf -tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) -tf.compat.v1.disable_eager_execution() - -class eval_classifier_optimized_graph: - """Evaluate image classifier with optimized TensorFlow graph""" - - def __init__(self): - - arg_parser = ArgumentParser(description='Parse args') - - arg_parser.add_argument('-g', "--input-graph", - help='Specify the input graph for the transform tool', - dest='input_graph') - - arg_parser.add_argument("--output-graph", - help='Specify tune result model save dir', - dest='output_graph') - - arg_parser.add_argument("--config", default=None, help="tuning config") - - arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') - - arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') - - arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') - - self.args = arg_parser.parse_args() - - def run(self): - """ This is neural_compressor function include tuning and benchmark option """ - - if self.args.tune: - from neural_compressor.experimental import Quantization, common - quantizer = Quantization(self.args.config) - quantizer.model = common.Model(self.args.input_graph) - q_model = quantizer.fit() - q_model.save(self.args.output_graph) - - if self.args.benchmark: - from neural_compressor.experimental import Benchmark, common - evaluator = Benchmark(self.args.config) - evaluator.model = common.Model(self.args.input_graph) - evaluator(self.args.mode) - -if __name__ == "__main__": - - evaluate_opt_graph = eval_classifier_optimized_graph() - evaluate_opt_graph.run() diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/requirements.txt b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/requirements.txt deleted file mode 100644 index 16ea87a7151..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -intel-tensorflow -neural-compressor diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/run_benchmark.sh b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/run_benchmark.sh deleted file mode 100644 index 2304baaea20..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/run_benchmark.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - for var in "$@" - do - case $var in - --config=*) - config=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - esac - done - -} - -# run_tuning -function run_benchmark { - - python main.py \ - --input-graph ${input_model} \ - --config ${config} \ - --mode ${mode} \ - --benchmark -} - -main "$@" diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/run_tuning.sh b/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/run_tuning.sh deleted file mode 100644 index 5ecbd6b31ea..00000000000 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/quantization/ptq/run_tuning.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -x - -function main { - init_params "$@" - run_tuning - -} - -# init params -function init_params { - - for var in "$@" - do - case $var in - --config=*) - config=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --output_model=*) - output_model=$(echo $var |cut -f2 -d=) - ;; - esac - done - -} - -# run_tuning -function run_tuning { - python main.py \ - --input-graph ${input_model} \ - --output-graph ${output_model} \ - --config ${config} \ - --tune -} - -main "$@" diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/README.md b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/README.md deleted file mode 100644 index d6b6d3c9698..00000000000 --- a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/README.md +++ /dev/null @@ -1,150 +0,0 @@ -Step-by-Step -============ - -This document is used to list steps of reproducing TensorFlow style transfer Intel® Neural Compressor tuning zoo result. -This example can run on Intel CPUs and GPUs. - -## Prerequisite - -### 1. Installation -```shell -# Install Intel® Neural Compressor -pip install neural-compressor -``` -### 2. Install Intel Tensorflow -```shell -pip install intel-tensorflow -``` -> Note: Supported Tensorflow [Version](../../../../../../README.md#supported-frameworks). - -### 3. Install Additional Dependency packages -```shell -cd examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq -pip install -r requirements.txt -``` - -### 4. Install Intel Extension for Tensorflow -#### Quantizing the model on Intel GPU -Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. - -```shell -pip install --upgrade intel-extension-for-tensorflow[gpu] -``` -For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers) - -#### Quantizing the model on Intel CPU(Experimental) -Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. - -```shell -pip install --upgrade intel-extension-for-tensorflow[cpu] -``` - -### 5. Prepare Dataset -There are two folders named style_images and content_images -you can use these two folders to generated stylized images for test -you can also prepare your own style_images or content_images - -### 6. Prepare Pretrained model - -#### Automated approach -Run the `prepare_model.py` script located in `LowPrecisionInferenceTool/examples/tensorflow/style_transfer`. - -``` -usage: prepare_model.py [-h] [--model_path MODEL_PATH] - -optional arguments: - -h, --help show this help message and exit - --model_path MODEL_PATH directory to put models, default is ./model -``` - -#### Manual approach - -```shell -wget https://storage.googleapis.com/download.magenta.tensorflow.org/models/arbitrary_style_transfer.tar.gz -tar -xvzf arbitrary_style_transfer.tar.gz ./model -``` - -## Run Command - ```shell - python style_tune.py --output_dir=./result --style_images_paths=./style_images --content_images_paths=./content_images --input_model=./model/model.ckpt - ``` -### Quantize with neural_compressor -#### 1. Tune model with neural_compressor - ```shell - bash run_tuning.sh --dataset_location=style_images/,content_images/ --input_model=./model/model.ckpt --output_model=saved_model - ``` -#### 2. check benchmark of tuned model - ```shell - bash run_benchmark.sh --dataset_location=style_images/,content_images/ --input_model=saved_model.pb --batch_size=1 - ``` - -Details of enabling Intel® Neural Compressor on style transfer for Tensorflow. -========================= - -This is a tutorial of how to enable style_transfer model with Intel® Neural Compressor. -## User Code Analysis -1. User specifies fp32 *model*, calibration dataset *q_dataloader*, evaluation dataset *eval_dataloader* and metric in tuning.metric field of model-specific yaml config file. - -2. User specifies fp32 *model*, calibration dataset *q_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. - -For style_transfer, we applied the latter one because we don't have metric for style transfer model.The first one is to implement the q_dataloader and implement a fake *eval_func*. As neural_compressor have implement a style_transfer dataset, so only eval_func should be prepared after load the graph - -### Evaluation Part Adaption -As style transfer don't have a metric to measure the accuracy, we only implement a fake eval_func -```python -def eval_func(model): - return 1. -``` - -### Write Yaml config file -In examples directory, there is a conf.yaml for tuning the model on Intel CPUs. The 'framework' in the yaml is set to 'tensorflow'. If running this example on Intel GPUs, the 'framework' should be set to 'tensorflow_itex' and the device in yaml file should be set to 'gpu'. The conf_itex.yaml is prepared for the GPU case. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. - -```yaml -device: cpu # NOTE: optional. default value is cpu, other value is gpu. - -model: - name: style_transfer - framework: tensorflow - inputs: import/style_input,import/content_input - outputs: import/transformer/expand/conv3/conv/Sigmoid - -quantization: - calibration: - dataloader: - batch_size: 2 - dataset: - style_transfer: - content_folder: ./content_images/ # NOTE: modify to content images path if needed - style_folder: ./style_images/ # NOTE: modify to style images path if needed - -evaluation: - accuracy: - dataloader: - batch_size: 2 - dataset: - style_transfer: - content_folder: ./content_images/ # NOTE: modify to content images path if needed - style_folder: ./style_images/ # NOTE: modify to style images path if needed - -tuning: - accuracy_criterion: - relative: 0.01 - exit_policy: - timeout: 0 - random_seed: 9527 -``` -Here we set the input tensor and output tensors name into *inputs* and *outputs* field. In this case we only calibration and quantize the model without tune the accuracy - -### Code update - -After prepare step is done, we just need add 2 lines to get the quantized model. -```python -from neural_compressor.experimental import Quantization - -quantizer = Quantization(args.config) -quantizer.model = graph -quantizer.eval_func = eval_func -q_model = quantizer.fit() -``` - -The Intel® Neural Compressor quantizer.fit() function will return a best quantized model during timeout constrain. diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/conf.yaml b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/conf.yaml deleted file mode 100644 index 89664a00e35..00000000000 --- a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/conf.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -device: cpu # optional. default value is cpu, other value is gpu. - -model: # mandatory. used to specify model specific information. - name: style_transfer - framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: style_input,content_input # optional. inputs and outputs fields are only required for tensorflow backend. - outputs: transformer/expand/conv3/conv/Sigmoid - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 2 - dataset: - style_transfer: - content_folder: ./content_images/ # NOTE: modify to content images path if needed - style_folder: ./style_images/ # NOTE: modify to style images path if needed - -evaluation: - performance: - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 2 - dataset: - style_transfer: - content_folder: ./content_images/ # NOTE: modify to content images path if needed - style_folder: ./style_images/ # NOTE: modify to style images path if needed - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 100 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/conf_itex.yaml b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/conf_itex.yaml deleted file mode 100644 index 23a38a18893..00000000000 --- a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/conf_itex.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -model: # mandatory. used to specify model specific information. - name: style_transfer - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: style_input,content_input # optional. inputs and outputs fields are only required for tensorflow backend. - outputs: transformer/expand/conv3/conv/Sigmoid - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 2 - dataset: - style_transfer: - content_folder: ./content_images/ # NOTE: modify to content images path if needed - style_folder: ./style_images/ # NOTE: modify to style images path if needed - -evaluation: - performance: - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 2 - dataset: - style_transfer: - content_folder: ./content_images/ # NOTE: modify to content images path if needed - style_folder: ./style_images/ # NOTE: modify to style images path if needed - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 100 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/colva_beach_sq.jpg b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/colva_beach_sq.jpg deleted file mode 100644 index 5f6c5a6beb51054070261ed7d4c7ca6e5973f484..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14235 zcmbWebx>PR`2QQE1Sl>o4kc+RQi>H4oKoCfOL2!1f>XRmAq0X$ad&qqPznKBNO6k0 zOL2!^Ki}V#nfuqh_t`mfcIND!+1KpOv%Bwo_F?*A2|%VOs~`)&!U6!W{w2V}EI=B7 zkB0}udxQ@J0tpE435lqPi5@>DqNSiDrDCLGW@4mcU|<1rv$3#pffyKG3bAwX@CpbB zFtb6$Abg_S`~rOc{RE4EfPm;R5e+dh4c{|{XMF#k?V$@mj{jeqaIjbb*yLC^9Y?_^}< zuuV6c!bi zl-AbOH#9aix3qrm`O({l?jIN&9UGsRoSObKv%Ippw!X2swY_t6d~$kresOtq{U6tV zod2i&Z?OLd7x_OfY+PI%T;P9PSlFKb9vpJqN6&ciD5NxiW-v-tz94+6x2T+&ZUPX$ z=3i=a*Ac=eV1ec5NB^Py583}au;Bk+Wd9e~|IIZ6Ai}}=*LgVP00}@#BSNdVDo@l| z1A8>IfGS)42_@B6yUT)J&d46YQKu4K*_X}ba*=OafC9JbGCWWWnT&I+>fxs7F^e|6 zP^rc217N~^5er-8qDw^~q{8Fk`N!C@W?P zkOg23?pIdW^5_d!i6rca0|HDj{NCb@vQq9JbsT; zP1UL?IzI{fSmL@}UXGDJXew*n_%u4-L)h_2h>HAU8jJPFFkR&f7MeZ@;f}kyqIa|6 z<6tYC1ME3B471uXC%s(5{Z#y~X0=my30BKciygVQes#U>HSbWeHMyUqMzl>yyY#A* zGRi8D9O)%Hx{>EI?z%{IX}2ZB_yI{DhYPH`c&%2p`6QVot6hY0`p)l8JkgXJk5gVa zt>hlhb$#SVw>3!x?Nx^CrgITn&TW`@N_i^RL6zs@PsOevK6Ykqd4|n&&(o!Q##a-; z$0H%B49?f?4*;wyA6@^dQX|_Z{vVPsB2_<|X#2dO;#r&H-mf1~>uQzMirm+Dy+p$? z2NjP>_!JnTS~aOW(YR!4CY?XGbqHnPE>Cc+Js1U-oDVTYlNIvL+%^VQc8t$O9spkJ z;S8SQ6Xp&uCn6r2;z!!XA+**ySfl!E0E-b zlc;1!G8lR@9xwvS7FzWV+ac8t7ibSu8T_wDGzS9B}?F7@b1O z(=Ws(h8IBMeXoD+lAPPI?+nvb%&YaNF}f$eGHXG;KpdE(HuiTBDM_%8wAb+ zV5bazX}GQ_=ye`(On_5K%AEb8j@xOd>JP|N&@>R1wdx;`wY_URmc9MW;VJEjtk%mx+7H^;c0^%TG^GI^-$vWrJcE2*k<8 znLXJOp){zIfm8C#EWZ2+|BhHX5NlaYn`lG!bDP^Kp)$1*7NV<6^M1Wg^q!uZ^F-FZ zBY`zj2FiL=-?Xoy?o?b{cMt8|Kd2CypY(9#_m2$FGcT{pL%Nw-mi;VWiK=Vgp+|(TaX_-uHo-mgmDOOiBZI(hDZB{CYGU5vAM zQI)@=w0cZQ$IBLxI^oel5!9QXjs51*&m51vy8ui@o?|(TFx7hao0Y+zpLJW=h0JqSFWK2R6Nnq{mxUES6_GVbV%s62Sa1qypJjFZ{2 zwzPKE1wOhuQG0KH`SN*e$fAuZ|G*ZB#Py#lsnu=tyR12wpRVgfz1yrEuYrcWCtu;^ zBx^Q2)-ynOEZ9ucewy%yWu>-#1@tdA#=n+Izrnag?IU3_LpdOPQ4 zi65RGTVmdoW4(sS$?Mb7tYt@_16mrb?^(`#J}%-7!_iE<_OC0~X6fG5GGW#RGg{n#O>1E?hpu>)Rr=y;%9o2gKB<|Az zs70p2u(v1*5$MXvt{Gi9VxOPh#2UoU5IX*sKpSw2{(WPS89vtXh73{uwH1%P1tWfU zvN+jrD&q93PQc%uvhQi|(&k5J7oc>e&qmpM5G{B<_F(Tv?Xle1mTzYGOilc}2lr&1 zK>Z)nw0X@nj+`rLzX@7^`(dW=>HhY6lW>nCwHL`QS6&G;QA~;t0MDY2C+T_w)}zi` zLZU??F&;E)Sx4s^t`y04WjvTCgGrBhim^F!u(XEc^CUzdOOI{ zx_37V>~Pz7IQt88Dgie)RR;PeKnRE=7`Cq;fjj_=%K^*AuD~LR@JpS2jm))IgQW?6 zIVtRf%(NiL>eSKFRI+fT{UN-TrzSvVLZ^Pja{uNBDnp8aFCCVJIXPc{s+;uvSSS2u z6Smjk=8j_%%hiCSY z3vplhGK1R;TwMv$! zpM6mGH@@Q}-0`IX?b%wUP{<<~culqA)T9RQ-csChLIiXO`935v|IvAuxz#mZVGR)6Z~q8y$h{X}v#?9HGj_3=$%8GA7J=GB%< zv5S<&{CQ0C)oh;vE|WuRG&qY7TB_~jTB}41nElhulj#e08Tnzz|NW31%_s?`oT+~` zdOkRz<&`0tT@H@1ivfyAiH558BuPm1vA;S~c$Vl^;b2c=sZ!_E)!%&;^LtfH3^q`i zyD$K<3sk3`bH+hju-xgV9TefKitj=;p)x!0G?n$1c7p2fM(g@t<9D;h130$!pOJ@K zNd_Io0*#4C|CUbqFg$g|o9kcQFwZ}*9+vj=zkC1$>0}767?|@K7aJ8{#^S{KJpk11 zt5=ekNs6`oeMuVhG&3`VxCHF?3c?f_f9|=+9lr?s9{fuZCr)8WJEhdHT~jIleJ=%n zJBsg-NfC(gzF}@Yaag~sjln&-ex|aByNK<&$Z^lv(UwV&$1*0ZjAZzxV9~EsX>mzfym|#hO|NtW@7BzKHKl^U~9-Dg6d^EM8BE&YQBs zz|q;2gr3^Y7dC#*AMEp<=5k1xa&71YJstGwK1uvS0Xn962Gt=fs^s%rtA1Y*Q#M<+ zKrQJ}8f$=5BA?oTg5$d}(DObqkXlW!h_8B0f-PAemLuv@c;npS-i;=jA2e@+Fw?n zZ@X;^3!t20gwSjc_t-k~4iI6<0^o6^%vC3q$;xUkmS*-|5pedeae6)gn7i&p#|;zG zg)=Br$+foJbc=WG@KGW1AoU+a*svj`NEdYgPLVyY$*G$Hlk~*g@c^5hC2AkpwaO)p zgUVd(G--HLu&ZC+c;GoR{C4Omt}mI|TT+SPGEN#p1y*`_NSC=_jMm&{k!XOzMO2FZ zuTbz!O_)0W)MX5m!PIT@2;yza~QRt0Vv=@hQ?o`BWs6V2(L&udPo0(;U? zzb8xQ!Hw@!L42&Z(aDcyfgn%Oy9tK#VKMetyTQ% zCL-aGN-G3eUKs=TA+M`mOp4beTj6qkOx5-;5ApkW@)|C+R+&EG`5^>J=qn+@K*s4 zKYoImJ2LSrF|Jmye8o99)Q~!EcC0W*`i*MI2X(sbw1;08|3f(akZ~?2KS(36d(|2X zUnAIsKn!aw8ja&GR)#E1SQPjszY~IZ%H{9488z_i z1Z@7xN9a#J3gDZ<-R~sU!N?OS}~DyENf*Wrf#`UD2Y*KN*&nl zq8{2wvv{-Y%Wb9{I4DQc%@)!pn^6A&K#p9D-W>$+^S<#7nXBQmkDlfqf zu(YWHSWN4^&$f>-YaBc}!h_Lr(e#EOC`6dP{W;wpx?2(C_gN()17d*o5>S zhNL4r|M}oa&Y|ZXzmCz)zdnd4r!cb*^F21Tah9|1eXDV?+__;pXLaz?q8vN-$riWh za>lG;7Mgk4fQh-?^ijeug+Jd|16p|q?`d&;nWc;_D|d9L_1Qs47q_*QK8r|RwcZz; zMyb=$7LPOXXrbb3lVFdMVz10|qX>=BQAlo#*ubNKTOpLu8f@420T8h_y4Z7#$ob-J z@R)LYgLDg~B~)-jC#~nau?!Toc=N+v0hJboawa6kl`0ulwM~DXE$}QDZtg}7 zPvqW+q80;PnvrjzU2N+RuqPOHcjzqmPed!N!41S>u@Lx^W02#O&RAKX3W2=oFtjfa z!h?@}Ez(M*(XgFN`dgl~osNoAQ#;w$+D5nXxH>75kq{z+>x;JXZ;@5cCKC^ombAc@b&naXw5v882_5;K}KG z@%B8Tu%M|X$gS6Tu>r&0;AgaQ@j<|y_FGn=FPqTg0U{UDDaHW8?S@y_wx>}Jm-4nI zZ{lq~G?(*XK?in`ozc!EZSURq6DEIopi1q35~8V>XWBo$R(4et)r<^DS#Kj z_AEyf;3{r*xPPza#%6-QwmJtJ+wHZSwsQ_!mUH;7uT$W1eRO~)E9~bdfvIH9+rBqy82eP~_uYdu#S(QG z^Yw_e+GnDs^ulg#<4p$ptM$aS`BbHE^WF3Ev{2|(HGRkqvFko1XDyO<3h8U!c4zWi zrTj+_Kvy?{%!Rg8cvSXPm*ClkddxumE7($MntB`w1&HfvoS=ieRs& zNFwL_MVXyYnJ1PpBu*kp1H=%vW&zAZj{zHCLVFI2`JMa~uN~xuB~Sr0`9-K9n#$eu z0xBdUzZ%KqyWnSSzfG~G{_dI>l$v}iEB@(qlTZWZ48R`-+<(}N-}brXGv4mQ{~5jc zs(L9_Vi>kaz0rrEP#~B=h-)=LfS}3tJ>|@Z!=AqVx>vKdbG{D%j`JvoPupa(3UpNm z)UCs9_vH%c>o;W@V1G-fwb}Gb-M#?E&J3SK6Gl!!n-6YCCKcM1jPiMFa+%j( zk13?z{aCUXs%y{LH*0^^^IZ%{pr(H`5ngpri9r_4Nr#_7Ad(Gp+%)y&hksa1Uv27P%#Q zj?20=lVO7y4oe;0uXV&ymF0R?DL@=Cc0jNRsz_w`kxaIpERy zG@@p^zwfsMUEl9li0Tguig|?5xSx)B^2Dc?dHlXkw%VCe7_~#YT%IMdW zUsZ3|07QtGP2-ay?+@c!p+>R`SuTzECDtXw(&9_9JW7-?lNIL0+cIgiO)gI~OU3Z; zINz2LJpka$?_^meI+Ba$G#>#>wW42rgwy}$p*1A+l{F#_UpDJIb2R4BD;2))%iHi` zlp8N8a{~};7Wpuhb|t2M%~jg8ZFIyF>?0$!x(X=x&}(t2^7GPXUVnX+t6~qHwQHnb z0QAZkGE{bE=T)0M4m*S7_v-YT;sE!F&7-n3GE??2%p;&7sSQ zo}dz4CiIn`1IPG;K7?OU%WW+`5T1Fzq!HOa#%&dLcRJfhzp7Oe0flt~$7nEDc#U$P zH<@&GacfyWngps8WKQI)HOJ?A3ytO5TMP9-AWs=gBP`4QMC2=UxxsrbX+g0*)EmRH z=9?aQ_PETwwaL~vwpGg=?Ns zB{VerlA)>(PS9|963K4ctgpxplX?1>KvDc}ZqT@9vwlJ0{;}mu*X=E8x50j0w^}o4 zR=#^q?W2@#84>^8xJu3Ai&`R$NOsu!r@Yk(nj?{(j|u*DQ0}tX%iC=`_Bk#K+vk)w zZx`N}g4*M0(aK8!TW@N#9rN@*epF{jiiIv6p|WIb^d$a|K)ZL_s4l9 zYX_JjdrQHP8_jfaW#=prB5+`B-H2sb!5G=@3cRBB(skpADOci?4|nvX)rOB-u-yYD zvWcx^ZYr)ie~so&yaq$GLgiZY=u?Q&i8AZ=CJS{Ll}6KuUtyVzTYpu}_8=NbW4ANr zqRnQKc}`_40Si=+D0+dozAt(D$Lb zyL4&>uO*HJitD3}=}aV-aZUTW>csuGX}Zwg)Yet=Ef9Wo4o0cfL(g*KGQRgHeOj_$ z3HG_;cs#4qQ6D#-QtU|9QyMD;{L8}py=X=$3`b1v_yIuJKYpuJueL3D&ZT+0$sLvvHI}>)@j7F*<2C=)?d^3<^R1?3pawj3SjzSc#oe&2+D~a zOEJK=b>Rj{V|DgU6lM6|<*#h!hyRlP_SHo?5b@Q&+IRr)+s_QI77qytv{Xi=OdGQE z%l;ev?o_GyLn5J}L$vurFYM;H^jZfp=SuoU;qgnW(;PatRCgZ%?D z$9KlhVXoA|pN{40sVJw4KG;VZ=stgi3;|~@Y@l9OYzDr`AKraSSj7sI7y$Ot&>fFs@)lZ8bRX)voOMa;#_k)Rb+<851bW$WG2RE-JH0YC!c=*R6 z<@-0i#xgJV%aMFu^KqROJ{(@0#-mT()%Zra4X-k2#S(1CmHM^b=)T_wF9&*Ndd%g4 z`~OYPMYP~kyo?4Bl?gpfza+l+ZV+L>4EWKViZZcAv+KvDcaBd`(y%dGZV7SD(H|1< zME6<<`)i-OXdX!#9Dob^pWb zsOxIht=m1MqJ4_L)a+Kzwjx-x7)j!Nw@Q9R{+`K!BY>eJKw9CJm)+i{p)iB_ut+8z zs}U!8HnYuVHN^OJ#93Li%)zU;QFnwwz4Fs%u>qvj^TY6v2CvmOTb_?MrPpi2je|8> zh=*a?m2Mq^O1{yPPr}sLBWr?Oix6b6?gCA|_zpAyxWXcE?BMV;!vnV=HuqO4BdruJ zij8f~3D-}Ik7Qi%QB|Jjfa}$(xzjQ>MWl)*XP?DQ!}mf+|9nW+C3)FiWE|(|41C*I z^YaI3uc18YmFM%5sU;PIXmC)@uXor0Q&m~HaR%Q>=8(hJXTJ))npDD)7LRvX(*uam*CEPux|woA=Ly&HMc2Afj525ZUWOS6zm<$; zpUIL@E*Fm>H!*P?iE(Ya-0$-r8GVk{LKg216+0_N0&K&>i(6@LLb-QZXI>9eb0-|d zy~k%RwDlu-XB^cAM@s};vlrF~WX(|KXt=R&ASP%FJu2Cwkf2hG_4nM3td zRG(Uer}(@?WNjOApnUllz^!y$qZ@UjSG3{7*r-_j)$PSMFVo3;LQVw^IG>!z+BYYl z&;qM$qW;N*L{>#rfStcM zf6h7x0$EPU9_aFPxr3@qRa|GHdn+(s8k-j>q|d#7hv5>VUAEJgvbME8%zcoyv2&;T ztl<3B%784?8p41K-(Vox%!F2Yt*{ooZr^f)=1|&bC&{~QXR=ww0;q}y23Xv>&&qeJ zmq1a$M+7`0&7>1!^s11Xo-7N5TUiBYaob!*PUUAyLZy$pN%}D_YO2X_=rR3%%ZdE6 zaKKBzlRo-Nc~I5ZuAXLTYm?SXEjy7@mX671)y=$oEmnXy04VE+2F%CVjr|ZLJMg97 z7~U7~y;9A>LEzpJ1#0OML8+Y<+z$sUWC!!8dq*Aj&B_uS0BJ<;yvx&)))xeoN=C)= zKEfY&ThjiRT)PT05R8?VrI+6gXDfuD;o2$*nFc-kr&m+~y!v%J4*B{jU?0DNF?n2A zFN|sHBt1?K@t1T(#;P;AflVj6K3b4Qd-G>~^fL41t{#1XP|XyLQ=TOGh}956cvgkL7JS~CZT z%p&8MWDZ(XZsa2pfAFVh|M?vBsabTvbemQ7ZNAxU^qbQtHX8E8Yla7a=mavL-IjZh zk1Ab1JXv!1f))Y4`qkO2u%( ze2R0+f>3)cV?@(QLc2qy2}il{`)%Pcf5yU<2Ff7G$E4~-xt@B2jH3?nMo%{ewkcwK zD7qOa$ry&mnXPo0KRhLIF;GIWBXm>Cerw0&G4xlLa}&AN4lsWP)Aa zi*a(;jDtGtz76F0>oJShJAGbNwoZ_wi;GI#}Q6IM~I3>wM#OsR)s&gAJNZV)-Hj3IwtMZnkuwU%DQWe zBARMytkZvjC*rsB0{M(JROGGcDyYnDVC_inEL=m}_Wl{2;@6Akor%M~>aRXl{Q(b; zHtU?wNr#zQS@fR|*}q?S+CH{FVlZh(gpgddF9C^X@k(E6Kb_i8Bk%EF7j0gTkp3p4 z?mb6esSSW%GpTacMmt&SgTo+npwF^WsA_HiS`v>pMpBGjnvtdg)S}tbHs6+FZHn?Gv2u<_#?L1XviHv$Zxl#8`fAN$R5LTL z&Tm!8o$~)sUd{-Zd6!7@d!8NVR1(ihp7uc0o-(B-WJ#e@jZtt7x-iAF<|H+&+h`SG;|iEiY~= zKO-{UC_+&}@7KXTcdYP+G&TJsB=fE$S6z-c{p4RLQh`g2xF68o!v#|OX#tLsw^rv1 zkd{H4JIlOWn+Ooor;LS7)Adg_6je9!$+W!ap$Vd0=<@S%8z}p1oPpxA<4n&A+`Rb^ z_0bv}d_F4gg9u@TU8PTD&wtkMCJ+8Ks|N~H`=poRrMp}oZ9Ay`<3-^2g1bLPV#1%Y zDm;$u`35Q35=DZr+Z=jZlZ_b0*IFFi@XP2zH3A_yBh1M%76<(LcBMR~dwYY>Zpgj% z_8V`Sb#XQ4&e4{u=Wr9-uaD{2=4o#22xVObUr#xieyihhd@PccZxz19NvYo1o*~(o zwyx^UN-Iry)(2dE0PHrXzH3Fv&rJ%<%dTt)ja8a89#1{W!i`m9AujqIklC(Um%AhE zm=xwM+N&ShoS=}-0wnYU02zhNbFz%b$?@sJa+-yFle!prV@%pN(lgn|JwoH8IZB3^ zi4?Zu)UIeN%uA(zt(9nI$k^l*F|JPx$J`~28spk@mi&P60dV(bYp zl6wUaQK24!*tbCaMuwx%%fHPNtlrev7l?rW=&XHgKYr)Vh+hlB*XdS7fC`hzAd z#@((oEarysM&f8bb=`mRJv*;dTo|tpA~+rZ9+ol065s@?s`5Z~WL=<@>7u4NzBD=| zY@BTH!jW4GmbN5L%tAQeoyY1=P$!M(&E_oi$c*ZX&M^EnlHnsVNkHR%J~NuNeP)>T zx}M;7dYsRa8yjOw^-g@q<#)M@alHnJEsW?AP{g(cN+oTn@}K?- z@R;PZuOu|`=X<)3JukjkB}@^{%)xHRxbR)F%^?Y5ywbn^oM zWIWQA;br#~=8P{U{;h$hqxFiWe>t3H=aGc@P1Fo1n}T579{)||ky_Oo zOvm^r?>uJ>DAR>KaDWBy1b%n8D_3*SJ+bE`UPgoIzhZngFbkw+C){{bnFd}hA?H8%p7!`U3 z9@2GIhR~c2o27-7psl}V3@a>C9jglK;uln^$GjG=hSb`6a311@R|_%D8vlQV>sY?A}7~D=EwoW%XYvteaako zCqd{v`YMets!z>C;`Pa{PQhcS4O2## zV-pWwP{UQX?o5Dc!xy2j=l$ZYVQjJRvE#ML$pbOC!iJD^oLF(&mU78=qr8+XdBHdV zXYfarDAd`mvJox${5{5vj3|CR4sZZRreF%(x5Y8-LCIA^vq!sXH*P^8vZ9^ma*E;X z6xDw_4qT#H-g*Q_H9KReQk?_`eAre-%|Bmwd@{FmhB2A{Qn_I3{~L#GiY|U(RKC|t zYU@U|m(C=mquHmNu`)X1w2;AhQxaJ{iOuv06EUKW&xl&!&%j@BM6eoEeDlh)FyVw} zb}mjI`v;k~82`Ojh1}n}(U`dfyn$i;M9g|x%nhZ6CK+GNuqRg_AYkd8<$Zuj{d>=+ zby*gIN~w&P>xL_8*j(tg)hirp;50Qc zf<9+njP`^fiT3Czf`Z;*ETLwPv|cE$)J}?E!vAGum65PX?}{fun9oc|rEn6ntTDR-i>ywf zojXoG!Sk9US)Q%(0n0m$Q_r?^Mx4ZaZ$KnXwWQ}>G(;-X1b3l^fAa>)*@p%_jzjQ? zAc^I_~aP2TqQ%ZdTcD<*!Gr}WQpzO?G!Z$kL`YF?%Ed6YoA3K>IX zsgd4uuAK#hsjyVZfo;{bX-NNI;KF$R8IYDOj(rSlyK0&9wUf43m4AxmY)OZc$1dm3 zz|!WTz(k@N2<#Jr-c)8|xDX3BwBCD(!95=m!LMepnGz!wsM%d?EMy}W(U2+Dz=d!e#%SrbSghS0@9gwAEORxJbj@nxEPt_%8+CfH8-shV za8nj0H4iopxzhft zm`t-^0nDv;l=^q=0)3MwR+7`|FKL}K`mLZe>;aJWyv5Q3E51lfZgt#dCXwN;R3_O@ z+4Qy6<+#83-tmUc++x%2%3mn(k`1mcLI6tFyE z9Kum`!LWMNy$Vn72AaO*YT_i;g&%V{wwlQ8f1rRHf??3Y+6H|ZRLp4MoCRD@Sl;NR zTR!5wP$EZ=OyBO<3GQ(X;(ti6bn)}rvQxn0(UDycJR19(BZ8y%l6F%8Orda0bT`s{ zk>A8@7iuWLmX*>Upx8-k0X?B4%v*Pd+&z-;Oe%-~Fi{~%N2_K$-W7YtilxAgTgz5O zwK)s>Ea}%xd9#!!!cc=>Ja}Eiw-_crc}xlE;kFOa40{CgVQufK@+CF_Y0kl!fAamB zXYeGU`?q%-oqzsV&bB9tk??Tz*Cx)=M-~dF-+w!^Ucey)C_O5`l!>ZZq5ikJm2e4uFvs-umhhlwZSzZnjMa} zv59y>&mI8Hi=$DYVVh$J&+H~+RQg3Bg&eL0Tl!fZM9f#Kx>CO}2~wo+d;kX`BE4MG zJ>z9cLIAAfR3w&GR2hQXkGT+{2+JtInx1AyCoHCZ5;NfV&r&SGvZH=>`nS!S$3vRG zb(Oa$)kl(q^r{oPno-#^*}M@c=Rk0JMy{%`RWs+fZd=<}+I-vsMWrXlAwwufvnUfj z2k*f0R}cB07G&}))W&CBIctxDU3hn;W8(zFatbR)pSj#C;xQQ}+EJVTiwiV;V!*$5 z)h(($6p+hR3#o6mJ!S-nNCMX%@n^L~FY*p-W`9gS$?uc@ceTM6X@{jNN|a^K{T==A G=YIi|{hoCI diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/golden_gate_sq.jpg b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/content_images/golden_gate_sq.jpg deleted file mode 100644 index 248d9fd31f9e7f8cafd9bede3c346271b0394aea..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12423 zcmbWdcT`hd^fq{@p(9N}AVEM85TtiRM5@wzM|u$i0#QH+MSAbjrGxZd6_AefUX>~( z(t9Au#CLw*_pLR5&CJQVYn|Ned)D3UdG^^Sn0d@9a7RT!SpmSo0RSBA2f!==asUw_ zA&8KG2m}HV6BCh;(UOyql9Dk{(@@ef--m#i?=vy6u=8@Tus&g9VtOpf`Gk*ONJt36 zAucT@AjK;vB=Bz$9AaW(GEy>na&mfs2TTtH{yz^)CqPB?@15{)SOHur96TxOPe2GFA|@fl3N+jSaB=YPaPjd72=MW-(!SW=0emU~>IeL?gfyCO zK&)=G0s-;4L~Ji=y6Chfj@Sh)-2;h9?%tzkxc~4G$74<|Az=|wF>wjGm+}gVO3EtQ zI=XuL28Kpf);6|w_709v4^PJ@zR)GK_+86P;HC@DPf?7v( zmhKZIciDy39v=Tw?LW=_-zgUO|4Xy~t=Rw3YY`yB!@)Waj|z|h=9iS47K@gY^uAYa zhr2V^XE-|7XV^EJL^?A%n$}xJ1`jk&G#2(ZFxod$Ip;eoM~K{Ly0R`QET+<0Mh4tn zRBaGhRQ*0}Lex0vT-rGK-#wA(HV2Ujtki!?D>p_4P%Mf}7uqk$cSbrt`tQl$fe5U; z%0y#i%%-H9tWi?M16mW_=X&Y#YS`-@Y4XtNYfkOtqS^`9mS(*~XVu0;XZgVgOUjM8 z9UV%R# z+9VUM54c}*V-1#^x2n&rYEwE;Brw&?7^AC3eBIG1E=*5d*KA2A6Z+1?XC^s}7glAa zT#_ws2rCibr~h$*n4Y6u)Vqk??5I#{Rk_s3uX>L%gd1h1My$$~?M%{7`yY^k7woS& zcGEjk2D+tcS5}m%r3zRe+||5_@?BjUl^|(E>9KIn>?KlVU22e%VJRV>iwGYXWMVKS z{OYE@sji7!G9@cT*-s-l_0OoHI|Y9>)Qs>EO<9y{VBIxD{!`MMAgzqn*~33N)56A| zS9PYkm-^Uj?N-wG*hw$jC_tn!-w;{T#7;Mf2<|gv`eDHp)R$dxq^%)F_LQh%ot+Qg zE`==)DuKcgfsSnh=XNL_X)X@q~s%ay}rqWR&UbP<{!It z$@E|s;1+Z>r?Y!uwj$M2i)c4*LE)8mhTL!c*ONz-cez{VsHI#Z}Tql3NU}4 z&TzE+mz8(+{{=;&^F3Ui`r!Y$MOTzuIy8k10G9s(pjW7ZPvo`0D|EwA$Ff|&Rd!J< zr=Dyu9%&y)&GFY_RMjDzWJ@y&CLvv3>~XO_zVXq5|Es+fIwJ=~%+(ZX<#GJ!s0F{< znid+50iK*9gY??poH{MCl5HwCV}ReU(6nRZPt(>>YUT$&?qL838wRkcVx_oFzRlV6 z;eK6)Jn#(mb32on%u6G~AECZ$%f)=}s`mNVv^SE>+j2UxOKhhozg?1t-QBI;Wo+`B zcZ!=`B;ia9q^skE;CPN!0q`Ksz3 zr_KqGI4|rnS7Y-W2)xsVJv3Y!~nq>M+a?14#>j&k)B^hCVk~;4`SZ)zv-7&2Y1g_ zItIae7&>+{xvxIv-#U#RY`?K#IOXeO?%W1FbE4e3E!$pEmSg0sxVSm+m6qVk#^p;H zKIhH}14CluL&X1ND?Y&OCnd9+gK^{OUZvUjyuI-DP43%G%1zKJt^189j^ZAD8;%}t zV&A?jyX{r974)J<=1jH~hL=peUJap0NHWj42IsCFUY$%4UB6E={BfHb#%>*OrSB3i zbmzK;*DB5>fqy=`ykq)3VorQRNl%iMBx`WAOGo>g`JDBUW z!xXypL1e?Uz?~rlS>_#5wUc2E569Pbhb+v}ooK5L7w>^D0)+T?6l9s^S{QySww!7O zC(rgeu>Sf?PbRJ%wThhL*%BnwSLf9c^W=~TlkWNpkH^>TY#Q= z0Ep8F=DGHe^!HIgSWvY{cDa*GR$lB>Mn?B66a!TBD(O|I3VYbDum2suQdSYCa&6XN z4aI-lhvm8dGkd^|jR#JJAzpk*2a)Nee}or`CA`lSSoRzcnWQVdn4~Rstud+Kt1!f7 zt9)m<-eA~NsEX*CgdAV>UJV(0_6nimjjb`V1PzAlHY0OYyQC5&WCzO z9Z!~wy$-p!qX`TkSG&nvJ(BZwfd4jj%Pbc6&#cTP=%h;W=!fNq1iZw2X0uB96FjdM zy*HPZ;-FQ0K9fD_dCO&`>mK3z6+iaxaY(ds)X6}8aWgvqWV@m{^->wVlP8Rt^d;~< z)ZR|qY9aG6;~VHo+6C}Q$ULyGIbKJ;R^{$;HnWT@OSPeW=KBu(HBW#*F1)?63Y`7mY% z@UxEN0vMpkLF)=9=)<-S)Am4W6H27yJdOIY=NDURRJFmnJ%fS8mc9d9cADIEMCt0` z9fw8H@T!Az&K@dL#hb>P?uVyAF+S(JT~%D4CVK~wXvJ@4)W}b|t!#5dC`1&T2yNSP z2tHn2nrI4~2=tZt&^~3n4hsG>D$}sRVK4K<`Z-d=)DGEE@yF=4<6`*~P!e%u?E&E?ygaXh8Qd1hO-cZD3WFem3AftHEzt@Rgd< zCHYS)RQOwOxe8ycEJ>r6*b*n&@|y%(>kwi8FCDSlveBOfE!!t3bt|#R&EmZ~B_~!G zz(bQ7y(P0k_eAcJ6TMUL$!}feQt_n9`1AE8^8imjQUWcEUvd#;8^Eyfo^g#iT#;NOAgtU2qeR+I zn!|Tl|5XC2&`-zwkRyE$(9%TC$uo~vI9|oq(r=G*OBeV@~14yDP>#5NO15E z+l^wcDh5!sLA&c`?}-X14TR#rI5%4Q;1%ACnR{kjm+9tz&O4VJ-lxyjkOxPZS)Z`R zkpch-F?)7k`&W|l?UUQs{4!n>A(Fermsn6Y!4lRuxpE;C){Rcf`NQ25w5HI)c z2iKn54>5q{QxvYzS+mS0#2LK_pKM=Iyj9&-yy3?HJ2d)vovQ}Mr?UUKL#XtkywKeTv`PWTnQPjyRwm@n z-oM=~W4V#J1tB%g@3(s6>vK!=tQit-=0^!^f*T(BLC+wwU&SbyYxEZpmj+}TTf1js zrFYj}?=!lt;(|)fAz37*UEQy~;Qd8Fl0Nl)c$|N5JWaWae!bTJRCi(NdHGI`3 zMIIcm;d?T+K!W)kFMI@CtmqLb3|OfmqeRa)IaKr48FDT3H(hF+RqKp=L-+-F(|@J3 z>*%8UTyfY}Db9y~=JV$I!UfIuL^hR(UKw6cHh*97jxDs$QsIP!ZAUl3$VZ@7>Z%IJ zJ-zSkO&CDQV*$TSw`Tcx?v~5?Otg@hQ9_lP|6LK!_w_yoW>TgTIj~n*&z$61_-iBb zazlJBs`%vt;c}P9F%dnwvEN6O>xkZtek!$iE)_*~A`uf>vY(x-u`K1pR4Hp1Cy)#Z zNI~RWIx+CFdEMO{ablljE~btfUiyBF|FJWEN^I4msCJ>-onXoMIn80M3&>%}9gv z4#*EWuyKo~`+0>v;XJcvGsNS|GRB*7BAY3T_xtYLyH!uxg5T!9(9UNDjY=FklLGrX zfqEO~Y8+A~O(HdEYU>p^Y3m-~m-Z?5oB3Yvn>N@kGyEA3)MdT>SrJfS9BY4r0cwO@ zAT)+OdAQb3!8icLiqDN6BL*0NQC@-20m9bH<<2Fh0I~a6Je?HeZC^$2 zoiz+*tFzV>sJ#PRCt%%l?&CWtCw{Nf|FRih8QkX+_x*m>9g>sK!4BDTFxV3xL@3 zZN7bXTzA==;h7YUf0`S~@$;9#1138lRkgA}op%GuLvHF!;1gZD&PTHVikV_v6J z^6qLSO6{80h55ZKo=RKTGQE{J6Rt$I;MtD zxqFyWtZPhI#kx6*zCA3fNaI;rX%fcP&7cr+C36Gscp-e5y&zj~bfo||Sa zG(YoWv5niaOkhEPjem^H_??+dvQ`RuRG8f)oKTHOfdP^tW3;s>7YM7~-;Wrd%Ds7U zAWEgkaa(t4M-_$KT&2iTW7z>JgfhyOal9ezXyFDv-UAkP(SGY3AFE#wZ`a-}h) zNP96|+;Ry=i0#Wyq%#+p1`2;{RCF(HV#_|y@nruxWXbs5u?(hkPkw6WD<4+-arWy5J|0_cZkMXJT}jhBMXl2Y5F#9)$-7Z_K!} z>N*^FDEM8v8AjO=a*_Jcv$NNUb+AZ(>JV zoVn~P8l@@2j*mpd0Y?;+oDQk;T7>gDsxR%VWDT~2&s)6xGMJDv3V`OjlsvbDd&GS7W6ukV< z#S{D7ZSNI$p4^>$cv})Q480Y=0HJ%nUz4X}Qcp|iyG3xATY9|=t#yjIFQnhjH22*- zFs2lU5xy(){d9pjEv^IfP>JP?Va$Y5Wg<~O-QXp3@BLrhZmz;9)>8U@`oGU*%tu?g zC0%69RrH%#_2`)(Di^3vj7HgbW$ZbnRUd?wK1V_t87D!9>^a`ssFg2gmxno4NkY^8 zKRpx___uDXW<#1$7t0hYrO(DiJ)AN)pu!C@+44bC1niA=94%bn~bld#p~LUNv3qoy}@oGa%z3qpE`WcnHjmssql-1k3%=?3u%NKVZg(*YSDzzAb;s`u2|P* zHYLYwSUJz6UANHl(jQ7)(@d1>+Jl=H{oCq`0;c!IhZDrsZRdNcjEif@PSy~Wk& zT#a!B`B9$IshRo-tO zx?6DGkz4jKy*fg{s%4Jkn`|G*#fR_qF?uLOI;Mv*gMJhIJJqNbEI)NNLw)DtjP|Kj zjPmVuMb+Q4w(9og_Z)vu^0+8)mJ3ect1=brD=9_7EjEwJt*N`hY@|Gk#Zi+e^%L4( z(heqg_fNCc;cv{!FBLN=L(Jzik=BFGYh~e^AnW&B`vHmm8tJR=AOg6Qj5OFC2i;=v zZTS6El!wd>kJ~-PKPDJpOb81c@*mN3J|}*5)0R0cv=2*JZL#}@vc!%X9(@mSCS~5a zHz_8o=ZM*oC8nX}*mHOI&ZCN*iiScA;N|TnJfrzt&t3Cs<5XIS23630)-HwtI_R># zx_uo%?_d{qdI@+w9CPZ9y*V~)Ewj?kiN92Y9O0OFX`@at0Oejx)Y6qE2B1c%P$QHs$Edk1p{V?_ybQuHizaD&MCQW%>@(X&i z{1iI3$63=JGl8@?yLIqDx7~C4i9ArDcV@UOy8haH9l9couAgua3V~ui{@LmUWY}atK{+`yF!aM!qvk(RaS8*}OWah`t;0tH7ue(c31XW% z2G|;80&4m|ATjgJX|uOQJ6%(vpqN+OF})_6IQf(V{TTjQ2+`QvIiA->Mr@VYDm4e zK>LU~mlC|b?gw^NawP1@UqhqI{Jklz*qZ32BD!=c3>S%ZJa#7ZOVEdVTX`3iHRUf? zlxNs9+oZ@!VGKKh{Y>Db+2&LyFL%Xv(~qOxSrQH4pKTP$f~x3Di+!^VuB{+Hf`D_W zjVkTIk|uW_KjC&hXN2bRR_W)GT?LU-cJ?PDtfdU>f!l6_dr-t-)U-ff-GFr}@BYl^ z1XWIp?jpbXzAk10&mTz-j`Bkay^~G<9K5{HsO2M#I`4*TrRVZAt7kw7HE^TsvS-|t zHdZ?RP*BKH$bwG4ep(B|M|+i&`XsBtRPT#EOdYsRd#)QV(|KRr z&L?x(;^?H%fCTaV2*r;Ina~lKx`&neTI%_Tlb{>;9G{51%FM@gx{XL)~`!MH%VnOI@ar- z*DM8L02+b~w7JYhu(e|kxuq*@PLefthfUxVdxZha%^TtSB-;%ODu`4P!e{2Fg!Z>( zS7Wy_U#0(dDxQcQnpm6{q`Y|aF=t&^!NGXcE%Rgl^@Jde+at!SF+7C1QcB5nDG#LT zI-Ppp**u6@{)Nr6ck0-V&4NP$=MBu&t;Gk$i5veE-Yfxc(@0nLzOO9G@$L^Z_h9`Q znnGq(sP0LuXrsDJcxn2~g;!J?4RL}me^kKlqcUPLf!Eyc@IThu|;C5xg71;9G>C(A1`Ty2WjfniH%)FX<5iTS7Sa= z7x=^P>q{ih%ZrF;R>zB8KO}9tyB3P)Fu>3?%D24UydCz($O)0VLdF)^20L4>!GB>K zPLI=NAO0(%?>V2iIEYzk^*BaQ^QW)@rn*ndJ&Z_x@zM(cOynndECm>+VDn_i&F;}I+S?~Az zTWUL_5Qtg_{1$gz-DBbA)x(nIoAqvE%7HNbrb@sqQlz_@(FNcU%K(Ts@c|= zl(04DCYWklx+s~i?;b03a6M2_HiDN`wV$4jQHFk3vko@4mLAopAlO;3^`?OOnQRHI zi&!!LAmT+74dT#H6sl9M6vX5WE2hcZI4S9-WA{Yazg-2xNlJEJiL`f8a-0>nm#{@` zm&lCPz1;e)1qUZTDWJF9o^8n9@t93GLutZfuBjWS64BKd;BUj=Zl&MK+uwg7etoTB zvA0^@U(Vj1BlWp0L`@cBFDOqPkB-cH{mM?Bg)-kh-0@h4@kgzmtUL=WF_GkBz?S*$ z!b7avS^Z2mBo!Y6K&i=M?xJL;X4#KkEeyhqv-Rj?9PXjsz6Wyl=^$s97@(dTWW?C; zGxkyu?a%EBAJD{Blu)AW&=9HmNAcKNvUIOk@B|Ap9x-}!t)r9(e zB0W6Bbhcjg&daapP4Io?SG-CLSYB@DGJI=lx|M3p?=qO6KtiYv%5H$Ii+_qJqfJ?I z2aB4Ly?yhjCysxLd2l}nA@%atB3QJhxTzN< zU`UWI_;KAqWRt)*f1C+Pv#-%B$e6>+9pE4aa3%ecuw%euKR?`^!mX$%)t4pXBw8f# z&-ffe_@e5eH?HhP^LmCSDyK*-?^td`pmd=QcD@pn9ic!d`-F(2m~QaNOIU~K`6Y8r z_w;+1ro4T{qXdPRm7mGFjE#qAnqZR-h{uudx4K;Fwi@1__%@BMpk^)YawyCP@ok@p0{RpV?hw|T61nws-3@q!!&p&nXT2Jm@dirRA z)NfCO)uhE>9_L4!TQh20lv;sODCC-2SmV1-xve03H%@>!hl{$jV1jJFnwxHMd&>HY zQx($~rNQ0AY-#Si6him+DcNnA=XLirOseOmUK+iz1C!os@$_AShK-7AeHbfYgG9>- z57%*XB`Ym&_R}vGXP(A%#5lILy}u=(SlFbP4k=c*dH=^tw`Rq)v#+DD4IY^|QVX|u8e(O<12x;Y28lBvr@Y9qv1Eh?h-A0i?!IEX%TB;?@&$iz zObP=?ypH*_y~jyvRViIc`m~Y&;AV=V0M!(wNvk=K2y%LCJ9M$w=2I{by$K=At;v0z z)hv|JFD-rIkU-YBwX1o+rEri;sWtK7p)wn7Tj;a1uKF_;%`SYtZrB%IsXLV3uuZmN zgoaI0oVV7&B8B3nw=V0ePzD-5s8X@%ikJ zK~ESDabmIUb;pkFe{q)WSB$B0{H zbD9xr+#zExqflm9U8TUDBb@8gJswKR%rQTabkdDBD5?Bi(kM}s!N-_w4HMJl03otJ zqZ~=o^j^G%=J%L$`3LHpi@Ovj-hM5ObWoT+L=IIL&*m@;yb<+fF+Jv#;;WNc|f;d>S9LIMXqn#xc zJx1Hk)3_b#y*mrkC(d`^qmD6GE_v@iL%%DYDHf zm;H#c8TYGK)L+x3hvDprnK|D6-)H+z3s1f9&MW7&gJpuj&PG!usBL#mHfy|op?7swu7a8!ts?VMHz{|V9iN-wx_Tn1;}eltDs^Kg^eIrV3j@Fst*Teurm9K`o)B#VH`C5z7{Dwk1+REp zYeC8T@^-9gOX+HN{;~*f)cw7m&i=wi?nOZe3!|5w*q0*#eYP2UX@}g;OhF_3lpxn@ zbFp)TlC0)mhfjp!HxMIjchuFL2BLOmX3?|83zN4o1RACcDwoNmVHJ_pF0@vU>( zef;mfYV(1w@ISL%#r?#dpa@VXm+H;gH#>!Y`XRE#6S7j^+LiGEuj;{P4=Jtd+3Kaw z73snsuQj`g2VqQEg-N_B8%)&he9FBaPI?sW%g~^2Wb92DC>7^7!t3%6lJ{BPvIyK^ zWKWC&-(motXUlp~ZnAkjfA+joYDoVmxe_T1atAc8#@~v`@G_@@WWrgh&~hM{*}PatSf37#5u1AB>+h*M_Cyr?{5sqvt-N4c(@0$| zk3X2lqCqrKqp7K_x%vac_u;s<+Uh6@91U8g?EIE~vmg+qX1h zCv+c{ut8m-oj_#q)G4)9x;?=w^(4Gt-ZtVlOj(ySMCm$das1*IiD%!hi zJhLmAp;l}OLkfeU;Nk>tt|~$=@)Bd%H0MC2;&j=>oU@v# z@5ZMfoZ8)k{T+KYD`qQdzrUw)N$~Q4#;x#F`yd>Sc z^4R3eLJlv<+81h+D`T^HwkEgppxmL!5dIi%@B43DmjTKzV?qbdQCDXyFxDu}O&jQ( zG-t)QjR5&$Gh*#gZxUXdwh|8U?HD%|dcFbiTSuwQ%}UTz1&8)Y$EuqAJ*uY~`k-Sv z`ZvuKqP`9<{bO8<$4?%=WdehywVL~JmBn0qIm@@jS(v=1hzC}24%Hqu9&o7yU6y<(Y)gyyK#|LAWK-_87!$iU>Wk5K*VE{F{dqm+J&l^A+RN#WogU-x+t-I)o46W9}V!nJ0>VlIO+jgjOeX2VP>kgQedh8ZH?8yCq%S&hz73 zSA2VwGV*NYE#=&@UE#>-DvApn!ao~P;P2VD6T{E61gDDN_oK4iCD6KT3C+v?WE9Ug z5Mx#5eZeG%KtFAr0@bi?p#S~P+fwymi4b$m*Vb$g)O<^GJT@Soi`WY6U0fjpg^h@Y%$yGb>Gx~=5HSmtqovl&M3VyK%C15LPopbUEY~va zKqpg9`|l%Gr?1sbG!Dpe_oB7?L5Kv=`&;0J1J)+f2)>xDjD#+CC47o5MKJn28Eodo zF_EF~z8RI0D*svN%acr%FGDB;ca00|A1`J)hzEFht17m9%a3aJWdb&SN1?&zn$8?L z$+;~s+ciE{iBpsW#rAQEq)%)UV;Vaid!I;j0olblv#&3rj5?poMUv@*vY4Kh%XWbo z-@U)fnT*gWnZEO&^%hY_66gf58;?C$oJ(JBc6AYjy>WzM0O$MFQnGj=`81D=JuE1o znKUSV;Fk zZ(s8O2*JCm>AI})QWpG^X#hzgJ3|DzY0`}{5cgM#l^VS`PPR%iQ}D*?zrN)RD^QM3 z6O+R0%tMkUK^2Z0MLCB;He&LH@5d+3u_ZXkv!z}1Hd~5AqvuJK2E{K)k#3sG_Um_- zXb4!CP;MxByM3*R#-dJjC# z@ou=TK6$81k4Mp0?s+w`s5{I2?|M#ONPu+W14ArY96wgSPAGr-X`lyabM=Ae{D|bN zsC*YX{9==elq}}9<{WQ#I-84YMl-T{jqq3DX%XH#8e^3)$v_o?)g0L?T&PZah<#@D zPHuI+^zqoXQPE3hJ=L1py2qbeK89>kL#G(d5&Eo@a2S}mOei=(T@a>|yr$SF2F^Yo zWYS5vj9#I=5HAWE@+5D=KO+xRtm&^Q&`FzTVH{k=`&CsYC!(bk5#%4;7QMh&*R?;6 z1bn9JjKf~P@n7mQl?R+G9W8pjH7DJAI=p> z&6+JJ5E@N?mwr!hpyH?8*Z`gNfF%*WsA<3W7*t)Px_uZ-J@T)W##RFfk&ys%i8@3s zJPRSa_``~5bCv4?Cz}L{B&f4ZHjYFHePz|IuOl3LGl$fD6p!wkPy_^CDrwi>*#ORw QcBgbsG}NZ|pqPdK3$9ow{r~^~ diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/prepare_model.py b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/prepare_model.py deleted file mode 100644 index 74182ad5f37..00000000000 --- a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/prepare_model.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import argparse -import enum -import tarfile -import abc - -def get_pretrained_model(destination): - """ - Obtains a ready to use style_transfer model file. - Args: - destination: path to where the file should be stored - """ - url = "https://storage.googleapis.com/download.magenta.tensorflow.org/models/ \ - arbitrary_style_transfer.tar.gz" - - os.system("curl -o arbitrary_style_transfer.tar.gz {0}".format(url)) - with tarfile.open("arbitrary_style_transfer.tar.gz") as tar: - if not os.path.exists(destination): - os.makedirs(destination) - tar.extractall(destination) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Prepare pre-trained model for style transfer model') - parser.add_argument('--model_path', type=str, default='./model', help='directory to put models, default is ./model') - - args = parser.parse_args() - model_path = args.model_path - try: - get_pretrained_model(model_path) - except AttributeError: - print("The model fetched failed.") - diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/requirements.txt b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/requirements.txt deleted file mode 100644 index 1e5d462dcd4..00000000000 --- a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -scikit-image -Pillow>=8.2.0 diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_benchmark.sh b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_benchmark.sh deleted file mode 100644 index 9582fc5c6f0..00000000000 --- a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_benchmark.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - iters=100 - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - --batch_size=*) - batch_size=$(echo $var |cut -f2 -d=) - ;; - --iters=*) - iters=$(echo ${var} |cut -f2 -d=) - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - - -# run_tuning -function run_benchmark { - style_images=$(echo ${dataset_location} | awk -F ',' '{print $1}') - content_images=$(echo ${dataset_location} | awk -F ',' '{print $2}') - echo "$style_images, $content_images" - - python style_tune.py \ - --input_model "${input_model}" \ - --style_images_paths "${style_images}" \ - --content_images_paths "${content_images}" \ - --config "./conf.yaml" \ - --batch_size "${batch_size}" \ - --tune=False \ - --output_model "${output_model}" - -} - -main "$@" diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_tuning.sh b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_tuning.sh deleted file mode 100644 index d5adc1060bb..00000000000 --- a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/run_tuning.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -# set -x - -function main { - - init_params "$@" - - run_tuning - -} - -# init params -function init_params { - - for var in "$@" - do - case $var in - --topology=*) - topology=$(echo $var |cut -f2 -d=) - ;; - --dataset_location=*) - dataset_location=$(echo "$var" |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo "$var" |cut -f2 -d=) - ;; - --output_model=*) - output_model=$(echo "$var" |cut -f2 -d=) - ;; - esac - done - -} - -# run_tuning -function run_tuning { - style_images=$(echo ${dataset_location} | awk -F ',' '{print $1}') - content_images=$(echo ${dataset_location} | awk -F ',' '{print $2}') - echo "$style_images, $content_images" - - python style_tune.py \ - --input_model "${input_model}" \ - --style_images_paths "${style_images}" \ - --content_images_paths "${content_images}" \ - --config "./conf.yaml" \ - --tune=True \ - --output_model "${output_model}" -} - -main "$@" diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/kanagawa_great_wave.jpg b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/kanagawa_great_wave.jpg deleted file mode 100644 index 5af5a0eff5980f17081299c28da0903326d380c7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 28352 zcmbT7bx<6^_uv?L)cL4sb0Tck3=;#>eXqXrn z7+6@C*to=axHvet6t9U0h-oP4XlW>^sp*+{*ytI#n5e1Wh_G|<@dJTCIyNyWQ2|LF zL7>2Y20_BY!otPDCC9@f7hs@f5cq$VzdZm#Or$WRFcc(405TyG3L(fx zmjM2!AR(imqM>78VqxR_>(KZLfQ*EKf{coShK7pzuXpgj=KxefwAT#$a_B^Q?=cv` z!~!1^^D&v^YkNubXRnwAZM?#Xl!b3 zX>IH49~c}O9vK~*n_pO5T3-3Sy1lczw|{VWbbNAsb9;CH@c8rT`9EAp0F?j6`gi?r zu>XUL@E;d4Dk=&p#(%hwkp2FhD1@kJ4E*S?<@7M#gNYagK4KEfC+64oVlfHoUy;~& z&0>=>1GibO|AY2lWdC@~Mt{$6c|H4Ss{vWAM4eIbN#l_?ZcXnEDX-yFlsdt1(t%A|ubWh+B#ETh7RL;u>K7JDG?{ z04KJ*Xvy_|=8%`5LJZRe?H+4+m;pH86doFAFDy_O^HP>@7nt`7qB927 z>L*5GU56bcY(X@QN6RorhRp%*nR7by;|PfJ<%na4)!$Kw<%RnfUk(!ogz2Kxe$+^W zzE7YO!A6zw#3*7AlVV4jd^6+(z{%802q@WsIfRkp}K0wZQ$3v#z zasHijq0<-)yHdc7NC+gJNA^)aXQv{9&+96q&&iIuBtzWvd#&M4q}IVV7zls}RnUAC z9>Za^x!|!ZPNt=g)G_AnTsj=vhCF~6Ur=4`t{_S37+2%fx&<=rw5rY!c*Xp)ndFT+igUUhYIleQFo$o^h#m*@fER#%Z#B-{M(7NGp;YEpx zP%dO~N@~j2nf7uclWfEgxNiSELd|B+_gex%pwyQWPdTnpOf-LvWGo5#dNpN*!;6sO&RgLnD@Do=P9njf}#^jE7X2 z-b<71Ya%P>zD@F!9x9Mm0NYyTkm3F!m2#z<_Xy%1(;buCY5lr!NYEro_}0Xu;it!T zKVBWa-mtCOaSmYOD25 zbGz}9JBeB}ZS$RHwrV#S9rsAg^-43dc|v6`hn?0(~gL1Jfi4WHL@)m9@RW4@I` zt_n{4{9zui!k75Tl%27Js$A(*NBE3D-6*!%7CrUrMbs-V(TZ1ncJ6S)MA_*D@TqYD zj19S*uWL`;cg0Ay!0Cv+FP-{XSIN~ziRqKRi$DOS@!>~vFV<_=qDhl`%cLXldm*J@ zh9^W1PFc_8N`;>P&hV}>Q%KV}5h@>0BsQ!rIEg|GS%z9iEF~AQYV*qypxUJ07kgVP zVe13}8cAAE4k&~~(n@}URFG2|y-N!rV#10`CjrQLNz9QNBin{PtZBdWqK5eFJTh90 zXw=G=GOZ@>r&ll6pG=>)e-R*L9GYzogBZ!}bMC*ITQsNPOAc?*6%bx>YyFJzHKrXFH=sT{|!A zp{O#O*O=@DMhFY<I-_%T>U51+8*MWeu~p$%iJ$+7qPu9 zzute5{rw|P`c7VQs+ZlXA}cVi)#E*~s-U1)U%8KvF&&Y)l^h>SewxrzYFiv)boV1*WtIQtoA{HgLpURmi^IkE|~=53d!5Fu?IwRnhve^o&^bx|kc0L(0foDigof zD1+3IUnRK^t%XZ&F7Caiv`w=@#i#EosLw{7bOXh240YW0d$dvFI)i3+u=IC={Ld|% z*Kwc5*Ixbt%o}|;%|~LlgTH5gQ|0}{P^{t2M5t&O&e;&IS}<6cTrnAE(i!kF?MOPp z^kJP7S5$AAN%Ek0{;;DqvAS4ynafc4cCF-=6@Gl{+9$ZbSO6ImHlA4<8l>iI!r{GI zmXYWMJs5u((S5@2_zTGS*2b+c@M?XSrBcr$RafwWLT^~Tr%9=`wWTGTccai^;+-Sj z&E3mU@oH=#M~uaA{(@7pUtrW}S=>ndP1zj+C)(AcfK`vU9G-*^{t4aVZVdY6g4gY6 zFVH>N{*yHQoD(E%%N;e=VfTBVe((0KvPk>90YtGnn!YNa^OC^pf;__Y6iOgnaMdF?##arvm;wm#x^V_x2QqDyC{r8OS)IqXpOR2~Cm@Gk)^a}1kR$IEhbLsd$Tr?Qhdo;b z13IhIn)TUM)4OPGPyu(-Cf7XOj~pQ)7>K?em$DTxmq}10N{I&Uhw8EyCixfXLu3hn z6L$9nY`{-{hyi@X@Q)GN)5QH3PRPPAzFrcuvt}=Rj(w)r?4rd&tWf3Y{Zl8tLL;CH< zrTRv0GuK@lT!)}2QPbt;C7*{12orZxE=A^!0<4-N=b*VEo5Iy*{cy51#A>>_(WmNc zKREjCH(L;5|BAp9U>BgdOpRC_6~8b)*fz50n?{8sNghRRQhBJ<6IZnfIa53 zZX^gNwew8AiDUaf)IjucvsLUKp?;+Xm@jbpO4vB9E3;)}{%i1N%BFFCjQb!oup~R; z{;_gsp=t<$ym202`AV`&fG_xS$*DzOviT_a*f%%kf!dCa#@lc88Tx$de*yKQTRs)W zekRMyb1_-bi?X|qVRqMl0n6o-_i+WtI-n9WQh~D76LGq+4j29UYuaRnw6hJRh;cH} zsu2T&?-gGw+DmHh;Hwj>AAGM$%~0a+!}c8K>MwmXw5*p@J&>aIby^Kz8h~Z^M`h>)_Ze@oR(>8$uKD;z2UaYnK~S` z@Y1YYr|Fm8T;z$}%_Xq3R`UqdkgNoFq9cvcEf`ld7s{3YbhhGBO?!rSp9;o*y|otO zBJ{U@HsvvyXx9eNV05}@^OrWR>ulVL>rkCGFKu%Ab5TS#xTIhU;6s+$b)x>THZQn^ z^p}YTmyPmA^w0!`r0y<0%AVm23pE^x>1&iK zt*Xq3$JQy<{mwB2F}d{;?|1rFrjUs|5HYHbin)qpEUNwm+#7MTUPvkQyOeW`#NqGk zig9OtzZ%Ih%n53hGu?ZqAwxkSvun9J3=wgtY)_=}tDQ4*NX-83Ifpd*7a&^J@y5L0 z9P`mWFO4gaVMzSfFYFT!x2@JjED*D%E~N3}@V+H(rdxw}QE$?X+1|-fN#w(nvw;!g ztg-h#q5ySF(wtoHFa{mM-u$h_`mw$# z^ZuhwisfGE9^_Au(YTZQr$==6*Yf-k3*bM*DRkUX2PswVYsb&Z`Ba*Tj+T5TgdGqH^&v5f*Q=rWTC~yBxurmPw5vur8>{&9Qh8HMVuZD_GlkXpTT=XsL}x z!%eXl1*#ixkTOHYCZ)x&PW&Lm7$L8$Mk3xHSDYDR*UGHnipjhf?##@fj3=7IX;%r$ zYG=j(o_{AH^)H&HaeDQ80IY+7oOi0vtL=pJeGpef-%j5h2q@gbelK~eP=k}?{Ao^5 z#1dQEnA+CsRI_4~F+Zr~w{vpW1&5$nYrPVx4tFmkQk2B7314uMo&JXXlS6*fxEh0i zmP8Q{e5tWqgo67~8`TH+b&x7*U6;waU$b%EhTVg9CNOza$X_omxU?#3{Q!OUREr5} zFl~-SUdaC`#OF(;Q`7UOnGMx_mWYd*%3O-mbaQp!kgXeta&M&5@)>*2TjmS@$=QR0 z?dXu*iQHXPv|e`WY)T1TWL7$*Qg-LWqs{F&TN`d>KGX5Ta+mF-(8P6o3bHKCKhw+D zN~1A6XR$fz=7Qa!N7SWq`Qv%Xp|do&zK_YMF->(ZGq?)r@!ZNeCA;cTY&rjwedjBM zIj^t`|E{#bYXK^Zh$j=M<>@n`JNhujWCO@D;=lK+UF}2$`w-#KcB?e7EP%NyQQw{v zI*RxUAU!dDu1YQ57&19$@g0-c{c(q2kTXbaPuricu{=4(of~`?E{bDEdtbE8u9^Ab z=V#*Z#_cb_e`4c$wa$Bz?HqgA1jD}0N4hxeFF@CfSiytb6eI7{iif?*P~Ig=!SC&L z_W4E2V^cC8(oJm6B2Ve8e^!5;%j<1kM6?;f;~?1ni>DavVl^ObUOTj5WE>^CkMKq1 z$4!2(5|8e@7osVbdz$~MbLFnWcZt$Joou<1BrtmTt#kFmj^3x%)d17wk9q^72m2^B zQLL_lNRQ6Bzy!|qJO4inN`>a;pf>mPY;isj3QvvabFLf49i=s9k>qWyqP*M10a{lb1XBGLz5}`* zPU(jeGapJCZxnyw$tdXTi4G`CdpDd6&(YG^kvQ_MFtu;A$saN{Iuyz+{+VOtr_$93 z|8N2@ScgZLHLV^%XCg_iMo%<~e}}LAX0#yu?2);{qw=)3YzA^Geyh-iZer;x2_3xZ zY4WF>_UERI^d6=dG^w?p>(G2ncW;nXm3Ji$IbR4dB5Lo<8B`uyABWiN_5~^}_NJ+> zP28h`pLJK=^X-Jq+SZi=xw1{}NhU1cuDx~k6YPZmP5Af9lZ2blT?($6@&ihXb)PDlJ) zJMGU(ds?elqL#D6#y7k4^J2q|6vKguNp^ag8^_8QZ;)kIir7fVr5i3509W4&1CS9w z%5aBqTVPflr3n>N8uKBv>O3)7+ZMK<>1R{xFfPZQ4La-w_JP$0v2xf2BUH6Pg@-5X zfF1B4`7SVm2K1f07)1vKfx5eylDL%74n$hQR{?&~zoXU{#9lwZcER>MR$V~am!THu zqG+a1klX~{lk9Ynq}p85bi`Gj5GYlL{H!mqI+Z3zo)qce?I^tv{b%=|eyOLq$(U_M zjC<>2zMt!KW$Gl23v81}_RmxlFT7se1-VT&x84;tcW3UZ#4GYYfw^*A+I=dI_lUHo zp7bq~`d@x?>;Pl!A>I>$k0fn&CT}_PRHx1vQ}_Ei!bPJP6~d&PA3xuH^>JBN`eT!f)p6uZne_`3lKuxYd`e>e6%sXhgKE7*@s$ca(NSa#pJ{kI7@(@=5vI^y1X zX{udbWhiYFv{;dsEWaFOe`_vpF8$ltjc4FQ9L^b*Xvbe}45b}enDq`)n*X7<5P=K9 zm-y|1|L}CdyQ+*ldx(brEhO8F);V z=iw1P|3Z}~UEI0Wb57e{JRA^Ui|a*@p?>`*aGA!%AV$qe+V~)Q+*rT2h>_R* z_e%8e<=Ggk9pXo%)=W|aJ56hfI#^ms#ijDogTGq}|JkR?^O8~@OYgX%XM%bNTL@vE z-D+FR>-fx9$D( z3GS9XN4i}@po__3mC=ETfk%p8a+tEr^It%bJ1gQhw4P;8+J08ovP2>dbYgBZh+T83)#SrxW#+h+%bzCZLK<&`sftxgz#9QC*LtF35+{~yWKB_S zgXg~si$QmLZ&b#{JXIEMO@JG^&K$er{F08i#ow;EqZfgLgzM2$e(W1zi=)S*Wyi@C z!`vN1d=PSXQ^*e~XG6n+kfK|&l{@+$kxchHrN@COf!SU6PnN-Vk|aX@zETyWl}?dU z;30OCWaxY86vx{<45A3A5%Z0f;y^q?HE)amSeAed1`W6W7tCFU#c$2&qVh6*LRWXRP4w8LUh(FC>Smm&L5(s5O%6r|#B;xZbj31Hx)0B=>)Q`a+ z%L9F_m8>F);^f3h9{)krkqrfQ)DV*G?pGaXer^b@^5drc^L>3QU2ma_h9%j4RL4{E zxe5;VBYkL6y|i$9VC;~kSB+IDq5 zt}VOEq^d|orWWTs)B=s8$Ka={~%pjxmR56?CLB;HL`xITg&Hd2jmvnorYe;p0J8K5Gi;aA{%UU<>c;KVb>*V zIorN*j>A5;X>2AfW&zc=Phd0`x%<2S7^_da6=jMG=0v}lJ+HQFv~67s!zFmDep2n0 zIp1$l02><1qgk9~^N!A05{qfYFRE}&(j|tYOQGo$9=~;2E~v+?CAPDmdMTOa>-d$$wi5NvAbet{>@RkHTP(@Lnqf8r|`OZN@IFLs`guwZ>cJh z?B^Cue~2^ae|QbAys1j>&9sWj-Cm11ZNW`X`cR83irl8sZ-KeGPs}9-;ALAoHPdct zQXFkR@(fgobVCZt8s*1Tcu`^$&hjo6-Nj)xJ8ZtgxXo`EHq&EnbGl$8kJTF<|EV7p)4%L{ zup||wImEy49Qt;w$aK!EXbpFn44WW>k*HUy%{YDEH|W(Ri+`GTEo&`H_t)O4+9u;+ zRDjrTG}CL|$V(O|MP*e-GTFrCKa(S;IbYmx^#FHl{=t>hMHgx)dHrni76ng5NBUh4 zIGmGRTW`_TsenR4YRa~EH5IY$uxdKJ`faNopKe`m$HL5m$I_g;omk%#PWmsfF|w2t zEMh<=9st>cp`A%hDoG}pW`VXGlYIL}nfUu1!} zTl7@Rv1e3>iFJhSpQ`);*GA)TPyamWIn0AFu8a?zz1k4nG@JHzLuJi_tOIoNJggkW zHvsC#;v@^|%;>~K^|RUR;xJm(`_ddPh6L}s;xsYh?R1O;K;hz0cC_S}2>MQxb#Vd; z+@>Dv096I_9X9ga_8t^-Y{;m8Dfm7EQQ@Q$AQIh4Y@?=Bc1I6faP=>%xv+B%gkQ=2 z1vnTb8SY2WM#NuN4fpeMtxD=o_KP_0-VFvcDkixon^N0=+2rOru{Ug0$;Nbvr~#Z^ z9S!@UZ&b=0p_Wht*oUN&)eO!Nx3`nuu&~u>Q+vo%due&XU6Iwwe5O`<^6I$B10p6t zHs4wqLqMzm0q<)y3Cs8d?QUl6Wb8IK@p@4pu5XdnHe0R4htPaG!@XOf;8tdtfZ&Qi z@13P$Ar9c(Q6R2Kjiu@_)dlBr#Nhj%>6*U`--2)lcd2<)>56QRpGW51t8wc%hnwG2 zz>)_&jxynG6Z(>gL$whwT*vFZg_bt2R%dl^Z7(Q2m&*FmHY^8x`` z9d(}|rDTU z-D_QRP15+I=`{S&H1IDVD$}Xs{QcHkL2Mi{b=v9L4T8pB-Pa1sGf;4<5p!{q0j*+S zELVJ2Ce}N%r+NN`%GQlq$({Krdm4Oh_9)zJyy?0mq832Jv-A#K{x2Zez~yGmYULPn z>N?@8uWiq%1;l!wu`I?9`H67DocSBBo1^51Mf{w2sTcnG*ZK7gIqWPsPK#%|E!?h% z1PQvpxvRzwKgt#}0;Lh@`vTwCK69pajYJYS7)$L__8Q<KmQ@I(sCJ5eXbD=YotoF`F8aCez@H70vQ6X?)qy z#g@6${kT9KdSAvXY;jEVO{v9|o=3{LnkMKfll%c{V`LYv z{6KWSyP~fpMcd!lsG<^D(qCrYIS4Nw+1ktbS{3|$&cXDoxch5O2su7R^V`7ot?YAK z?lPO1eD?V!PC=81;K>)li5d%wxF11aw&$hM?>9}h?qx(8-fb+mrr+Ph;#73oa<=to$txdPjza5KFRiOD35 zin-R)o=+YQM6)nF0{I(=KyxQg0QSyMY4n-)4#_2eIV?~w2_1a1fO%4KKtcS?S~z(K z0tGZUB1^R^LzW6akJ4(B#(seNDNt6}p;#V{#Z-u`@=gGH1#sh9JsB6-33)o>27X@z z)#2_Z%ykYMk-=|RgwulW@^H8r)>Ub=7#t)Qkd61|fO|Pg0QG*(WZhSCyO}yb_^*dz zcdmf*W3{1O?I8`s@T%V6yee60bWUTp<1Xg6FFPI4N?f)VWxLctx#s-~d$&T3pGTjz zWP-7JXg}YV`z0yit>&_4toZ(6n5HZ8Q^y7`Y{B3xF6$S(mu`((#N=S62^rr?@T9{} zZFVozywCAb(+Kk!;8NTM?`5?c2qS; zpd0EvsiN27t!xY0r%Qy4R(U8lX^{LPCcUKXy>*xR3z)vC@cYBjHP@STu}@wY;jh<2 z{hMR+b2Z3MJaum<_&}kH&r=y)hg_)8GM~mc%1b>+Aa)Vn1Y?UEE#wNZ9UU~Et*WMP zZXum%aKFr8NdYhX`viizi76yoZS;s4wYp`l32r#{au}LisJ`U9Vu}ru4-^s;6DlVr zHlAoMa1mbiQnOiRb@}@Ap-P6M=@!)FCuiD=Tm1t`tqS*q&vJcIl8jIJL~g(zE#Sh% ztn!C>JDCF$V>y6|+3tGerJO~Lt!l6wfe^>qvFmOPNWw`t1IdvO;7QpDJ2PM*Fk{UBFlE&tHuwW@JA#HY(H-FUj3S#jJ{ z`)|>*&~hQxBpdDRr5Fk{TJexxpPOoOzNw3iOol%q76UdPW~S6d%yT8QN~|sTU$2f8 zK39dNdUw5Qu+N>asb~$u-6+f9UTmyBH;KF9Wdc3vrFj1ZuvQ+Whu@$lYHaQw0&9Lc+&8DUA6T4sK0FMDky|x8a2z|z-c{hD)`blL>{efeBkxDA`-v0saf{Pz0l=!$n^C4(+~PX{vdsok`Rz4H+HVDfN{o10q|`8|^XLhy$-I!n1|_E*AwvUq zMxjT?z>)}TbLvq)J71&afP6Hj;8*7CB2#;tVjLh+0l7Ec(r`(45}C+{UQpFb^$4TL zsW81mA@+{`YN#qpe@w2U++LN&zbZ^TL;9h|C8ufLdd)cPY($t=mtgqCSr2crZkUv8GJcC-gO~9IC;o?3rC~KxV6^b4EBky0` z&KDPfv72>*yDwsiwM%(zQT_+9CB zwjmKR+P_^&R-35`!R2b&lL;%Qgx^8oW4UZdhCr^Srx;KqQWVh>To*l*`2rmNBKonM-oRbzCU z+iDhuljaob^1OoEZmmxX{h>e9uVPb&z8%3#oWnwX`@82ugbos2ypqWLqecbea&o6C ztxnP@FB0qao;;i*Mr1wt)e*c-seP~aRjQo*9w`fJ*FpIUq2sy|p|PylWlAMhmg`y` z>YlXHLIC(Y5l;Kswt(Bi~HQb#HYK=Tp* z^AQwa8^9V-IW#NEwe%OTd1~!anEY_i^!~9|U+T2{6XU#K_BrI+wR;(Q?KXnrVMl8< zk;(#eB6Z5!H_!9*o*s6?k=o#avca$96a7jdCs$`Iy&n=_UdOZkMYQVcaGEe{Nhi@< zhYV#`<&7Bq&{u|__#2lpCXmVK>JRVCy*u^{Ay8sU1mjic^jw=Sm#jgKxJi0+cY{d2 zyP~-O6VfL@oOHu1y;#qokZJuZDJ4n>NXU1SyO7UV*ScgZ@DVX(#$!Y_g?<*UUpgqDWt128l6YIwk5~ zawpazaT?z3M7n+$Tj5*{gr*K1-;RaR@HyG9ctS=P5d(Jh{V#FMmXOsl0TTLJjJSnbNzDOu zDyJ>mr7#*+Oi{q25T^!}>_^+x(;<~W)7?*N;sKhDQ*w7CZz*76)MdXGki=70CMFvm zP&)Z7NsGbFTp{7f`_?b>$bP=uFtDmN(vwc3yeb=g5@>tPlP;2;dICA(3983A-R5Gq_2N$Z1rsmX8ykbT&TgP zF7i~;takG(%A7JFY)A~7peiw5AnRwM)NS$^GiM5#@xJyifYNc{j!C0r0QI$s$~tj% z@-r50PKraY&0_y3REVXNB~TWx$ZHtF`QxlgrqYKK2FG@5(& z2im%Q8oLQszf|Z*$wT$Jj*3llv>P7c%75i7Ljl|)WX#qOy*i4S=wLh%o4apPU2JiV zPhnWEf#W(|TK@cg&O)n2V?X^|;VU#phO(%JyV9@Lii_7pQs2X>pZQ*_s&yp{0gF1e z_Sx<@XHDm?~ZAuyJA!M`j0L>cpiwWHL-qJ5=fjT zdZ}06#w#y4znSJJj`Q zey>yD`!I?!GLxgP^t}=ISE2o=%m8MI8q+woQuDsGtllI;+6Ch;Ie}GKMW=3Q4rKaU zCJu}Dbi#qNZjil-2qx6`Fm6LXE-wP~ucWM|DNU0r)2Oq~4C|+Ysk{5q#T8tz7J}2I zLRV^@kQ7XaAW9SkZ9}=})-G^U^{`Q7VjT>(31iv3PS{Df#!693+j!()kl=j9KTX1= zO`oj@t?KlAgZ#EhT{WeKAhsh={|wd>OWhj$Lp-zEr!}LBP_o5zTg5q+MM6dNJ`W2H zO9do!#6mGNUO_~*9+LdRn7Y#MpNyB< znZ>KpL1j}n9BEG&A&q;7yUg`3tD(2-0oJl=yCwW>E9WYZ8uqBFXK`Q;8O}O|2{ZKz}oN`n2dlX!^xA zK=Eyd!jBe2nMz`VBm1x6w_CrBmvx(C)EWXO6$+L_9j;LG z8$8~(IeqhItKKed-T_G=XHuNY-l0@VGM~0OTBBryFx7Cb2)fOODp|;aoGeUEv@qfk zcih6@vM*SXO{G#=4jayyI&nK>!UA92&qG4lTevWUR1D`Devtk2Iyls95wPm<`}*a9 zr2ja*U#F|FgFi;Jz!)k|zy*#|**3rigGQak-M+J$TniP(a$0^%J3BlczAJ4&Sc8~8 z>$D_Lg|>zL1>pN5vZVCqP02{zD2JH@Nxb~u=l<#R@@u*=+aS;4jJQeMY`3YZoFMtp zHd#D>apuEX7e&ZiDRQB(D8)&7!A{@uy6=oQ37sk-nA3b|XJ?zi$RX|+W30cTI&`kF zgKaA)p4cUh!Y*!zZKCq!br3XN2rYm#*>%K9NB}qPp>Xg3E#aCe#Jj1QWh(k%K5u>a z1DDS6>04QX&mnYndfa6x<*sNWTOme=v|>LU!9{Fq4g6d+)i1SE>cO0tHu)ls`Ci3m zo_z83t3<#=feoIGh;qt_mM2nCQXyl_3){`c*oGzNNA$%sW2-t9#$AjBW2u25xnF}+ zZIq`mI);u5>FNvn^SqJ+l0;EG)rwaSh(pz-9obQ~iITaFF=Bg(SwA$1=s|Z=7d%LL z1!GYwwlL~(qSq-$wXj|odTRr6AA*)+$>sXcZdiBGSF2vt)I%KbYJ@6-qj2h z5`sk%$;ITb7P`429FYWz2uxSUf0i(ICeN}xcs?g3uDX4EoCxlQo-kU3Ys%odn>Q&K zpw~w$p2=XOrs<01uzMimJy-K;VzwhPy;b|*D*=X+iN0V?N|LJ8c;YSXPJaJ657H`u zx%bCZrev>F5ABAqSTLGN^0K`X#-WZ<3MA@bKUuIM$REXV6WL%A={GiaYxV)d2Z`T5=z z3J}75geabpF(OtohsU09+@%W|`IzyCOC|kEiZ?B2oY6dqW4{yMrMOYt=V7R-O5%c- zGAn~>fP$RDNTQFWk7%wBlK?Xi#V(;>Hq;AE$ins=Y|ns-OB-WXl?njYMg0Y(j=N*S zK&$^jVmb=Py;iTpJe@BB=x|coYw@|S-M|AEngz*jNA-T}N2z~@}cLB#~z5AF|h zLW#>Ard!P{B&}0;a1&-~)V+TTF}k1RmUc6qb}7`9e!O1fhN!QBAEL?LtP4v7Ei_Sn zhmw0&l&Cb~{b{C#-lqiU?~8g&UNQ+RJ?R@SNN!W__wZGhRNq!BcO|PV05ufeGi07< zGh5?7FI@$*_-x}Yz z?Z8)o^HdXQHq#8ln>IfNoKvJOYz^-gmQ|u|fNd#7iC;qGyKiQo&0JUhQ7gd8nhoVV z{I&Ab`_wtpRp+5urUqA3ieCdE#D|Rwjlb0i>P^uC0GPFR&ogtIJI*^6$LR*nzG%*<|H8hNe>>J|OWmc^v(Q*$A>Jv((X@1Uc@D8$dc zPMm#c>B`?ShNB|^&g@uk)} zK92n}3Ajx3Y+xtnaFSN^N@@5LEBeN&1-r6{ZAopFIZi}s z8-FO?n^AgDY>V6Su${1NlxzJY@3t#Z130}e$3BUJ^wTz$fQr>1CFA~eycf@9QZBId zgh}^vLPE(nf6t)IQUygbRSpGjsj|e5)2s@AGkwA2bIr^HjniDg39HFbsaSxy*;j`6 z(6PfrlY1&07YT^~8@PnXrf#4ygR8wRF;M`vCM3B_)Xg(cTPlr=f@DC#aU^#y@q)KCB@fFaISM{CfMv<{ z?E{DaJm^@&Sz*&+cjaL_LiQhO&m;^&W^dd!h9I(}^&GKhSk_?=B~|Awt5|I0rFiQ( zieMF<^9_T!7|?tDbI%KAowk2}hgdn-qfwK9R~FD>;3!6iT|pJeT(QY8hi@8T@IA;} z7|)0bg%pR3pa9GfS|QcTIc&K%lWtM_`%)b+%O!lfl)pJPa0o4578RvnP0&i^us{#v z`65CqN)J(MmpVy%vBkR|M0V$+jKORozbf3xZKrk=f;+5l{4A*~@gEhv5)%+|dv_`Q0h%;YO^ff${024Fje@c^0t;MC=<(DSz5;( z+r*Lh;O;0nKg-2aTIRBq{g(mZsMy5F_n?}U>s zBc`^sUBa+v>*)jfkNGVSuQb7s}IIRgso88 zKPjLE>(5k3Eve?J{wo67Aj&HNFFD)gWg(XVTI#N=TebZR+7(Z54jLrM8RINOC9NWD zqq~z7C(G+1e2HQLYODZ5nLpLTSusP7?2pL4Po}OBYCy2`nvq?o{66{SZs935JQssB zGwwyA#=DfrQ&SXyFCMhSwyL09_@T#VQsp5B3k5j_ZSpqo6(_&sjC&Lc(J zkT~VL_DxmP>jk}cB7atUJVA&+0cm#^JohgYY(+vBSoWMsVVf>PdaYE_4WUkvR%bfJ zoHW&fkpSbVh{CFBt{b5R-AeYvj*rQMhWZ|QY7GtSEek?fBZzlREYjCBS2Dj1E#4~S zorb!Y_OC1Q@3-u}TpM#2FYb2o77Z!prSOOoPVzn&7ghX_TK=wB-#jLdf8S8vNA}5w zUoQp0);iZ7L6>=yhkq0lG<%rV5_@hDShL4>Y?z;k6SJrl&p@A>9EKs8>0443!*?YQ zNYs|20sN~oC$v^CVOLY;G-w=EvPvI>nfT?nDA#F{Nyd?V{3B$meT67lUA=&0tY^<6 z%MQ=AS_GS)xEWJ*o{^A~eF=^B{*yZ6A7dVGzKF1!$H~Wv!sLVe(enPSjz9!a1lv%p zG?w~1DvI!Ot_dUvOqE1dwugS!|G^wvjQ*W{oh8AR?vS=|<)=)ah-S54r9}4mzua2E zW+%RA8qi1=`C}NGsF>cYTK9-s*{KVyPVon}EMJdmNAhSJKChOY_$95Z0%v+0z%R(P-A%lWR7zXYq%HnxF7z#mi_5QlR0;{`H^#jJkrMb4hP;VK`mCsB4p#{8P# zwUgjcW_P+LRx{gvstT%p{TU1M1AAK97I$iWK+lIBr0GBeO>g*~V4ltAi$-Cw)z#y| zrj*j7>dmqdjaCQA9JNA$m-!~!yu(OE* zQPi#E)iBBcmh zG}7-t&CgQgHc6l}0sS6j%uxgllX}-;N!;cf3MbKH*j9*8$#yr|+taD4*(cHfs=^Qf zsfrC?FpykN%}^YJjBc;fcfMnyMCFl4O@k3rlAL zTob*Kd@tmj+_y^&TdmLosjHEyj3L9v%DwD+cGK`jo$%*PQ^!~{JF61L4!WT(oc601 zglBF?gYT2-`F*upOIOMDGNy$78lmQ?oAaj~>IXkm~R;@IBLhjN_$Y3pQonJVp?X>H= zwRVOg#2f|3TBR2C713AB!NKD_De4Je2L#u7;ZF%`ML__sfnizFLrAhQjzC6M~3wsQeovp%OCMO3<~XRd<}1H3G*%)T>bV8K<)2c zFWML&hfcQ>Nzf_}mbPrXIW5a9+LQGe@>iff^@6EdrmIA0%LwT8Fm#Zju?_>m?c-o2OXt0WR(sJ!q(@zDKC0zP) zYF#GkbH%hiYeSYu^35mV&Zl*|?PXw$U?YyCS9_uOJ5JHUy2d1!4rF20w1%k`;-p|z z*lr_~e2fis&ZS1I%pn?g(2-Phr+{%uP6ri%sNQo>8UNC>^;V6Wjk@oIa z>}!vf)@yxDCW3T(N0TUn``PM$&uVajl0hjn+1kN>Ykg}i)Cu#2AcOa-g|O6q&#p^% z6}z*Hs;g&{&#*kEKWlAaMBp%g!Nzs|mF^D3!C5Y#m(M3Y{*v-w#&2DY! zI^Wvkk!MdXc$agi>58pw;JV2F{Ito9IB;b(lei>Qq*iCzktTvFgaHAH9|Yf zxOBGruQDhoSpDjoMb;Nmgsh3>&YU__a^$-5XfFD{R+v{Hf=UI+t*KD2nUTPUvW z;kb>1&C*afU#YGAa@=R1Wf)~50UZrKT6_Qj&omK+}p!-6Gc2~ z@v!AsXEo*?EcjI&*n5o;(g_%I6Ld9{WSiGRp`*lAgk!FLwIUBO(K@pp0OvKKsCXO1 zI&9Az_A;;oJCtN%sl2y#@kXg+(0N^4#(Mr$*&H`A(nfA}=ye(;r-*c^Ug9m#+aSyN zhvg!=D7CEv#1Z*Agi*sW<9+}H*PKbOYBJl1CAgAEw`8e2A8Ms@J6z4Yjf>ewK~7n8 zWVLpE(3NJNk?xY&>My4x8g=Tmuo+bPbgo0h{wMJ!nPj$eGohHB?7WS>g1KD_#(Iv0 zAXJdeZ_vi)IIihDL817v*;aR3dx7_N*AtE-4KI0fN4dA|>Mb;91*7_)2>&C2hF|&&|^^b}d$5eT2jwAC<0X;BJ_>aoF3xA4Z?6z{Xru3T% z<$9_0uPT5!EV9EBvkZU*Hy7}f9$8c;rxlOmD_;s~mhW?rE_w!xVF`S7hDa;-swK@RzE`N@B%i!@I$wnC>@9`ApDd7$c9Vgc){550PEE>@M5B^U1xFU0 z;*CNS*%Ve_{o>tf{jY&x4d?2&vRts_f^fCP>}t>Wk~+PlWAGy}HFdwZYkf8sxsy2D z2g*Mh)v@r+-OPLSYmK9?l^;R%t=l%zH2A!`R)Qqy@}KKbYYV7P5{*7pPBngmB5Djp8Jok4Sxf8{HmRk#;}bDCZp4HE3sv%*Ph;gZvyT zR~vDzJ`KTo8FsL&YSXr;+Kopo7|3<&3DmO800$h^9}+TMX@WTJV?yj0 zWwDW2Q)w33IhPSJiH=WxD;nO_^(onkNYEhJ3b8-u{{Yuj8cOO(id9~zqjJeK>shX@ zS>8v5Lpt?Q^!)0z?!S9=4Y|@&$jQs^9jUR|U+H>1!raD_GmI>LqmKFZ{c63XyuM+Q z7~Aa5oDRx;KU$=dcVd%id!oy%wxfEM^D3F6U!1RQzvuC)BF=WzmjJ9F?ilHukMR9# zpt8`6lC;7-jNf>k^}lPRK>-X#=J%>~Aq8Y+p*g#lQE66|sM?L{1yva&^);oY_=irs zwGt$PB?kdq^Z8bt<)n93uL~$DM@rz0jj~@(1TydF#lG+1&1V}`-I_bn*&Rl=bquyE zBtjY3H^@oI2D$sadF~@id#$j_hdCV_dwxc~WD zc>sop@_SUb1}A%H^$(ZM1u{n)y?;?!)V(s7uq7Qw*1X>43#}hf4B(+g-4uhN723h74Md4#Wn#!1{QhF0^37g5 zGh4WCx;+tCNSPd~4n6CM@eY|4t;CW_qwLc1t^LDVS~k9<%>~EH(7jwkPLMG8la;gOkPWXRZCN7{aJ*jlHY2Q&pj4b0;`X=dgI{<5h#p zpHH}G{FA$A=zg_7g#2qBwTnx&5nd?o(f*Ylo2$X%%?M7LOsFv;j`hI!lSaF;xLe&e z7q1GuO7x#p0jp>FNu6NU@fzs_oD(&e8kDy-X%gc^s#of}i1&5X6Hw`KdJ z)+N`%DHF^Q-Vk&0ZX9*3O&`QAlC8bdSJzyon+#>i_XUpAAz-K zuOqmED_Btd?byoI%P)r}8>uJ$&;-hVco(PPU1W33E3+o{iN5e*)AgzawF`TK)|T=~ z89jNVsiC=f472ctg=27@a*1Xmk2LkI>DtdqwR@8iByq1LRA&{;YLh zSYDkxTddH?zkOS;)0%#v@bqd;6w*o~krx4b52aOu;Z3Q_UfxSAfc&fp;CgneN`iN} zH>%aFU^ca-&uCC~LpxJ=;~==B)~fi(Mf z&Gqj80QMuTi+g<^Pe3hf5R~Z7GhEyF(@2&#@+S_;GC}K5$K!1!unfCLsjGD4{wtaD zweYJR%`L=mN9IV_QSDe7hNq@#a6PQ@JZcznM+f}osSSQjS~N{K#_XeXfUZZ=y&7%$ z>oZGhc?1NZz$@5_;#BRcI~?z#BYS9#{0oc~#K&2Dx>y)1}7U+NH!y zryH<#pF>>2to|9&&9cascP{0XSmW@m-Cju}wJjU%KhC_}?r$RT5PQPXMK8+{C+=58 z7;`OlFsoCQ*=}}P4~kBqa4hd_O|8~A2cvx}qg$w6QwWg^gQ|nbuP@cEFCm)U>@aRs zvywYi>uF_%Y`N9ZrMX_sqndcq35@Z{!m|!R z^flSqY3prh%0S-3lU(FlY#MCV`h@d&YF|45`^O@>_@znW5=Oz1hCvkTP3(nAn)s}E zCyRBxLrK;yY&5mZsQcw0cFCz1S&zj29?4Lbmdpzb^drCLiasDkaiDnW;{O25^5VLP z{KRqe{Y`NYf1>@K;@l%icW~iGM2*^8#GzgCCb3(X7z>E_+`UxSmsw8@ zCn)?c@H#2-*NvX&uWw*{IjKbkqj2U1;U)AZv82IJJpnZ->rIN*FB*$O=idT&rMx|>Adu$TQ?>cxyRV4fXgX{fhM2+aTMS6%j~9F>jzVtq zPm!~^p*=qe;iA?xjUE}atwuQov;4axA9vhWT9j%@P8;<`GmA>h%ll!TRKB;9$-IxA zKZ?8k4_vm^w5cs_m`5uQm>*wS^7-OxTY$<&GxC~}-u`H3^KdczSnpfPnvtEIv^TFV z-%P!d*6kZ;KO~-_x~~s-kxQg9NEuHG+38rZPiL>#t*y`j@$yUEPo;8qwpaQ+>p;Y> zKP!KDdWu$3(U78^+n)abYkeiY9wy-P*1B5_W-EmYA=>`+;C>SEeAZGMZAr_*Ps+!& zdSn+dPZ*jh6obgYu3EGc+^Eh}+0yFb?Pf9~utU=oR?}FxfPo`eG2sI2^94B>ONw|W|d?`^N(7TTO5MeNw*S0k%< zHvTt~??Sp+kerB#1>9%JkZ;ET)fshbt5+*DIrkOgk$9h0 z(P#3n;F2Pz8DYogU1po{8q#P2U8rRz9MgDfYL(DBvf5f5i|dw`QS7(|K6>XBQd@Z2 z_mVZQSt9SXNfBN(kOZ&GE=En2^!(8H@; z+rG~;a2-La8mxLIxu5MjNZLWh@qk5ecRK!|rd>6yz?DiIZy?~+E6)(6!+DJ%S0fo6 z>!DEUjMV0ho3Dr3J;bbR?;apQ;PZ;cli{`5Bt>-#4D~qw02<3(cJ2~X-rD~MdQS(Uj4+?5Ji~+4Iqe$T#w7Bo-T9(t!+T*fY2pBK#U3xk7{#AqF zZx8BCDU_tf7JRFzz{kH8)@mD(+TF=Tegvp~8dV+666j z3}8qx$ZU0~r6(BdRakRRMso37-A@JNK6;CYj|G3otFlLa`Z|#cyz&j4oCDgbL2Pa9 z+Ix8S%P=BBcr|G0v+7T7m-kCN+OqLF`gE) ztZbI&T9)l(KVXCAk(gjUG3)gGYSTk6_9H{7N*%4+a+c`B-}0(DXNc2Lx46?Jh8P1T zV|c~^$2c{Iywhmuw0X5g&Az{)>e{qwmmJ0ltZujrdFlFAo{N8O?<_II(H;K)bpHTP z&bVpewp&=5c(4%T%#3oRai7w<%YQD~3wz-HZy4M=(w#K4LXB4S>{R%X;q})%H8EJj zPYB+IzI(cfVrXQJ+h*TVIbmN_MKoHCqk+o0l_Tq4KX~867k(PoJk`tD$mQcFBm#5A zYlNw0>9Xo8u4GcxZl;$(ie{T7{_>91N5cLklf(Brb6A&ZGtURz&u4*-Yx|xGBIrjdr+3ad4K#5PYvg-tF#m{{R!|K4e8$I{qkYg1@nk zNz{JNbnhPa%1C-QKmB^Z@Q%56q~15#h%w_W89i&z^-G%%5$GOz64EC-SMMu&SBFxS zOeIYpy3XEcR*jzK1W?^t+$^y=6&{BKj8>ew%jj28URX>L8Nm+`?A63WsNTnP>29uF zU*_YbX&q6n-BN2sc{9m8S4Xy}pTDXw_EGyf9*d-S^g?`%O~W}qD~{r~Z!Gi=71*(w zDCGchznbw9m;XOr#u zR2Mh4J|(yNHmMSSn~+38rjK>%8^|ymZ@p2TCKLGph!4L zdy1D+@GbNb7qE4>i2c#txx1S?yJ^gGK_a?(F9NzbqcU1dn^w~2)2=Xs%!u-?2L`R_ zo+G@|P98(FcFjX;tlU8o#_F=f-9wI*pLiphMtBgS=anbDR{Ait^k=E){xj3>cE>x2 z-yMBxtFhD!XUw;hFdl~@zIs$KfOF1kb4u|w=8V~m%#k_ym5ysEQBPudEvh}@^hK^% zNfJr+YqCC99nE;=v8i~M!@85(T$fB7EQEZlE2Yysb76F;=*Ot8OT=FdrM6iw zv`70zyK-Ufg1q+NO=%{_-yDOgo(QcJY1LO-jlr#te3}69m$j8<1aMUHE6=aIORso# z@#B;dg|e)DD~`1BE~%%;zh=5{ayZR(U+|w>-@*2MZ%>X<(|Yy#(4i>qu7sh~wI{?& zYv{x}uAG+GeIjz)W$QzHspSc~hFW%U$+Gej{Opv#pxZ&bCrorNEcTOct^8ijx?D6^2v_!dk zp%UJ`tRYJ?ws0}{R>EsG>{b~BcS_B-&1J?t`%{#a?DZziYE)kzY7;mYHqcz4>A7jA z#F}N~l1+J|pd@4DBahOx?=%k%>9MPulVIYsk};oN7zq$-_6<=3aON{{Z#s?Y)%#l@mF;J00+aXLXg=*I=ggY z4lB_-EAY2Z(aPN4;_B#g^L^1pTJVb9+);y;$3dxIM`vV_SugGzBQ+38Z-%J{nwP1L<0u#;tglHUwPR-4B*tzFGABYr=JlNbU7Y@<}oI zjpeX8=dU#_mxwi6I~c7jZl!`1ZO5K7TD(4rQF{4>)vV8d()=?ur~G}*8I4X+SB!pw zpQ&EkTDW_AB%WBh6I9nwO+&&qCgS~Lc~b@VV3R}|T+!O1p=n1@`Oa&aD*pf*6x)x& zj)zOtEwyWM!9vJB{A@WDfpf0dSlvpRZHmb%AD4bfkm;z zMOOuftwAKCQ_{>NqRno4cWUKO`~1BS8>T5(!T_b z5BcW0=v9p4Zm23#>Q%OtZ7zQ4Pz(-EMj{g4lS7rS6!^ZFv6x>!V{;uk;;G>i8ntD0J`B zcS_=8ymY-=ojr^)ug$^5dM1P8IJIbno}l6j!NR{{YsSRks!{i$s{D@HbBy$sjOMi~ z2DIGQi6m~M01qqpesvs#=g(9I7GF$wbX>Ru)mPRf>>u2scuxrE$&qVeeCBM>0Ly+Eu7IwC8nSd zw*zs&ADvK72i#62Z??xH=j3%cBio9#UN3iIT_@V+RDLCr%JHKv!X;ieaw~U2)Z_64 z(JRB(MaN|jQWX&aAHp%3 zsV{HC4+3`^iPP*Wwc4ThkEMG=YxD*0sC4- z4CG|LLMpAt!<{N=S=6+7?6pqk;r^MY+DjsN^0w3Rf_naRrH{kURdmH1HAro-$7=o; zvA5Nq>^Bm%pg7*@aC-DO{c#E8=UTEHPlm0PT!*{Hw0hbbT{Lw+U+maHt)7{&d@|a^3<}a_8EyooQ5l z>NJdFO?%9_XJc(+5Q^R^gPen&m3LdZ zj?YolEZ)s|0b|MKDuu1hlsZMscec*+%l@WMO7wSEXqBK?M6YWt)%cO29HHmtYN>M~ zCB!nu!ccHpKj9rXtzAgVXQb)F%iR-v>IZ+X@rQ!_#0Nw_c4I<9num<8g8k*T! z4LnANbbxftb2plPqZQL%LwF;Ye+#g|9+kx#yRKs8Z5xSiUGbie;j6T?ynT+tAOYI9 zEHAC~3rOy5oIFwtf-A$mB$w5-_+*P~xw%ih@RyCXKMra!TFA`yOyLY|+qV_B z)w!vuEx}b>);^D3YcpBC(ybx#^*IyElY*==D>`oxN;OC=?c!e`#3q(jKXq^q^{!9F z*LrV^bj!#zd$fD#hFMNI1FkCwQ+HR`w?2sJzu_J5zM)|xkXlv)BFdhYSucN*+eCh)HALj?JHRuuRvDxZo6;>8E04&Sfp5n3mVr+a(rKYo| z1s729bgDT%{VOWY?JjI$X2VSq;m4@1hx^FtV=r~x8k67u0AXuQ3{of|gCOk7Ud`cc zR?}P4Be;qnk|+x6j)uNz)URIt`R)kXM+^89UW4$j#M4`ApV`+E<{6jeCysy4D?0R? zso$zDo8_00?xod^(7>Ne)?TyWtuI}-Xs>Ni$rulgMPuq3ZN+Kq+LX@K1DZu`UO zS{E9`jF#aDj%fx=dE|Bf01DwycTOzb%~{C{im+WCSx4P!>!pkPGyBZvO8TS4 zC$VVNCWcM%H$uHf)}l9noXL&ge6GDkU%a%`d_61M-CH^Ua6=E2)eDI2?$sF?V{`l! z-E`50sp@uq9PyR?jk?8jMcuD*e)5XxggQ31P3D;-Yd$%Q{{R=^UPo;#_AF#SbZ~tQ zX~lIMkM@f>&9uXu1B&3UfQ+9sG-mLxC8V`SqBKZ>W1b@nnftv5=|_>WGO@d&lg=AI z&o#~1_})}iwAQV~%v}Vd?+%r#9?$_1bH}v$u&I_E zrFRjkotd|5uj)y8EVIO>+aDq)QT*!A@n9}(Aivo}PHyAU%#Ke-}W~ zDnoB1tbc_;IjZ`U@9+!Td7hPRt=oN_T4RYe<%glB&9B);o69#|*sc!3>tEGmQD>sc z%6c;l3hng&00!$95eB+=?Xokpa(}H%u#G)X)n{!2EoQ;;(T%Knwg9TPw=iDZKw`Ih zvaQHogWkHUPXpM-RqkYYn0C(<(b{R&c591x5k0`?^QRm%Q@dd5Nt}FsAGp7@G5wV- z(S|a^1P^MS!{P3eXzL}wGq@ZEJPNbo>#b(iG;KLPLCY}fURQnbH&Bj7gxo&mIBpGg z(84zx!JPFgYi5sXGg#@-?T=z8{uLFMed07#{m=tF*w>25pY!>_%0pw_No1J6zEp1ICy zh2-mXAd_>9{{RW-E1^@0MtqWQQ`JA=G}D-2vxEM9Duw;dgRI++I$;q9s`alkB~>w! z!2XogW?mz9K9u=VAF}Mu#>T?KSk(@BQb3F4M$*9U6@&3wL>bK0R;<&inTRbn}=r@&Wwhx{bA&uwup%5x@I zk+!j~PEwAn)ij;#dj73+BfctcyonM{?T_bNcZ-o(cJ=0*e)X2#rp$akgv!1_| zdmfcNtk4)I+U)?{hUWsLT~|xg#FqBbBT2NNA%5xUT&kp^oKiPQJ4)v%p^Ym?iGsM6 z^fvhb9CB-KTGw>1v4}1pX;S5WaEsGEznxT142v6C-uCGcHxja<@&P^ZipbjbuCcV@ zXrwrXY<=%)c8uNbOLBtI98-86?oD23v}@1Z+;XbJjlPDqCHO_CSXm3Jd$oI%Uz#j% z-qqjf@@aZS(6e??e#zzmkLCH+WPUciw!Ck*+Nic?+J$=kY5Pi_XZ{?e%l3~6)h$h> z)Pu@oeb{wj_e~mo*|EE~nioE5xIA{RVet-)Vd7HKyh{{^m5~NB^{!`B_-k=xq@7N} z;b&;RW=vr5U2&}*Uw=a_-J)Y%c&7eMeJ`S(M3I?68@R{Py#X1ob;$K4nX%=9Gt+nZ zEnXC0CwB26n-s)2K!sR*2XYQJl zNY~!x>DopkYwct`tHGz#^(#$EONb|QGV|AiUX$?8R*LQbMMf@G z8gh2j^-Jv{!&ZbX-K=vd92Okc3$1(~(l42$vUas|*mSK+e;3``&m>dZiNdx&b^JWz zo-1niTeyY=k_&Lfmv$Ss>s*mlVExu~^TJD0$n_tDy7jRp)@iw5s+{{US^A<}>hG-xyQ z{{ZXOpICh&N4wnci0q7{c{Ud~Vg1Ib;P8gOb#}4aSllRXMo9v_@o!@RL>hIvWB!?n zhT~Gbc~w_xDeOB^_Og8tIYo6mrr+V0iD$}OMj}!^^RdV4TH05^-RKSWr5YUk$b?sI zb8~dbfg^M8RhB7+8+&u#btgKHt%>%r>_?~kC-BvqZjsFVV+_Op0IyvXIwyxTXwl=e zfE4?rfK6i$Jq}rzel-)vB#ap3{xy??rSV99-7TVYzuWflMiSoK06GO26dw_g24WS> zY?IRhlp~&b$E`nO0)4#BjCh;n$cB`7qnvp^O6LS^w;=u$;R(-9DE2VyFK<)1)SnQ* z$%lyYITgp>cz41#w=z#YoOioqk&b^#x8yMxCZUqq@-k`0oMZ621Fbi?jdk$*!gmt+ zkvy<2KI;Df`s$JRS*A%Tdu?5#i#-oo=?qQ}0+htTFga}1`^#xC_OGfk*Wq7=unf2I zJ7b)a{xvxGP2qfRTe&gcpZ>jDNQ-0<$9kAV@hBYascL`B#;bpHs-F*hC20o9ftVB2 nnwH;1@RpWX+v&?8BOf<35XXRX&lM4ciRPaxd&!B`eG&iJA=wLr diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/zigzag_colorful.jpg b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_images/zigzag_colorful.jpg deleted file mode 100644 index bb0c46ea1debbe937188c7ba1811cb5a00a0a7c2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19632 zcmbTecQ{;a_clC22q8#FL>o1F?_~(02hkZVdheYuiA0GS1kro1qt}Sud+)uQ5e#B5 z-pPGG@AG@#@2~Isw!U;qFZ=pVrS5PD5kGuN z%Oi#TOx+ZR&Xt7sQ*8DldTD3}sm8=_20k;lAY8oXFUVe!GrnSCW?|(Q5EK#?5tVr( zD<`j@sHCZ-t)r`_4>q^3w6eCbwR89I^z!!c_51uKI3zUeYj|Az_k_fxAIT{>xq0~o zg+;|B)it$s^$m?p&7ED{J-vPX1A~)O(=)Sk^9zd`n_JsEyLOdsKi9~= zxc=h&-`Bsu{vWuA(YPL9VPRt7{KbXwz#ILGNsRT7mIwQ(lsb;7>oYpuPmf5XW3!wZbg<{aKEmlL2nm0V32ZtRv3i z%C6ND8QSjqqx-4*-t9Gj0Pfu zcg)6A_STR{cf-?&#KH#E@4MHwrLnWm8#WNFI!(3jl4RmUaV37D&<|~pb#-Q}?Pbx$ zm!t#9%Bu1vi1?;m>`T{DvT_izTUvPOXp|K`ROrBNZX@inD{26L@iiZmUTIreTe^+9 znu0O3DmQ$>KbSa0`IZXQy;Y8@9zSQgCu@-F}mNfok>Ktcs+Ju_MVBJ7b#QcY!i#AIY5Cni;-~v# zo>hw`cnZ@@*><`WvgIZbhXnh$sX%^-L*KQAhvi350$Yj2kTm9?vo=W9E)dbnNXKLj^3eJ3bF?$r)Zyt*IR~s%vy$CGm!B8A5MA zUhqq~7MLu9(xXO?iq4~!*&C0lZXjx^_GBG(clP`K@A5Tl2U!oCqz5Y^qq7r29?{c; zZ(W=a1|+D=q{IVu6_DSrsMm(Bz24$Mb+CwBq+p7w>57`(^3T~Gt37+7e=>0?{__z- zQ!;Jv@5imarGTP0mpGiBRPp&W#!r`+Vo` zQTCle7Zf&G%-qyt6F${JI?RRNPc1}d2|5a&<7b*u%f#NRV~X?3Mo6sQ1GcaDqa)~e zMMHGRXd;7Hi?uygL$SGV@ZUOeAo)hv%L=ABv4~#nffV<1r!$pKYDZWJIG5##R%n6Z zU)=hTgmxK|gR8wyfl~55PKNndm+1hxX>DM*~O& zTGj1bdOQmnlCcW0c&>Au4p)Dm-FQnFITn^x9j**|Wppk%#4&kE9f6WeG81DYeK;ai zxVQgd7KjOMGqEJKYB*!JTimG+t8}(HlE{rJf5rPP%|&#bD~d*qG^sS7pPSfM^N=`G-&R;8y`d&A_kE$uzn& zv6a=k-JclK9>67&+5sNVUYoLZBr2piDU$5y2gtezJ+C$p8X3%p6q~IN-J~yk<8bnB z%&d=A>N-cF6a~Eda53K9Yb)nGfi~Dh_TH}kTf0if&_6BS_kbV|Pj(_jcjjouAz(kD zrY2*KYN;#M{w#Zll!&ml`kZ$*aF22;c}`rBq^d0>@g>LD(_tT9`NHaZfTWZA1VeF! zU20%p=I#g7GYo@gnz3vK-t%>3f1)4T7!N6$O*Ra1tTnh~Ze4rPe=DX9Qe1Pe&(xgP zDZKQMkQ=_e^S&DUW zP0CCLE=NAkV58|Z@gduo)u#=|YJ5NH-e%CV?qYnNdAsmQa3%4uh>v_>^F4qo5CF>! z(|*Ce+6KJ72PAJ>=}V_v&U;pBqfE9PwPr7Oz0CB@qpJgO#^s2P6zK8sPaL(k6Ug4-zG^GqBg^G{ED|Eu{{s zwVcYac_1g)jBqw7_tic%oVse?h=JxRML);;{ycMbPb%NO9yys`x5X~{l|skR^cARP ze{I`~)J|cQg`Q?qThX2RM(EN?b62rT=_~Gd>IH=J z83-p|sk%J(PrKwF$>j5jM~?qP3LvkIyhIR2cb6#jNjDbigm0DUEtni z%M$K5Ufa5D&eTZvnVAGD|Azcl%NTaEemv84*aC@F_$Y<-fDntkzop8?ok8CQ8M|Vi z*Hck!=1Cw#tBaE0_&W3^K)&SW9&qui(3sIqpi0|FGy;WtWC~TXPr=4|;l9+z?=svU z&pAMz<5iR1wa=iRIjth@-`hzJg)!x~Y9}d6;iJ2P7%GNRWs7uZcqc#TTY>9STH8&FpJ6bdpHZ_!0i$#5qQ zZ8FAs?BC-?YM;5GE^g;{?&FmhDP5rQ$glbNu(JU9I-4AJ1!4f6C|>iA^cP&WJTt6S zL03ZNrol9kbody^=>)sc5$k9Jw8K4rhd;#UbxECfdVPlxih6n{Hg*HN^uASUZ@UL@ zpu-BJ0t7`~R-`>w59^hk26HAIqN4+}kke>QqjfxQ;=AiNh*D2eUED&|a`Apwo2O8s zGM=ovBXh=}>5l?qR3G|1L#OdnBj`w66F{=A0-6NsQ$^)Yp-w=el--DCtH&#rWEoHWGTWCe-CS+2H7w#(*ceT=laNkF zVXomu;zxKsjxnztp^W|GDKq09?>8CHNL>_EPIM%NseHVB{c+h!9ASB^`Qe z^X}tA0LD0;r_p2E{P`$(tpS~TfVxv50nf#y2?SNe1O5}&XJqbJ+(#Pu5!R5xbH;ip zkMu(zy^p)Lf21I&EDJ4{&+N=iU!K96+mGBu;S)}p^be#a+Ox~_>1~#Ci}A(C51q;x zs~c^sj4DKlCqj!ZOS}aK+^J{RqobB}u~?P9H@#|Kr%oq7T$zY+_XZsYDSu7T#UtXQ zOnZ{KcWAegk)ilhRrrty4J2_&?JRTndx+`P>wVDSowkEae67iF@wB2_*ZZ7`F@c6q z$GA-tEywX^Zt$}!`Kz_5gL}YSx0mQjVc4>PQ6FhR-m)yWaJ1%v!m;b8wZd^~r0?@r z1;8aw*lb@c`v(%(R8y*_RT5if3(fvK$}@^x4J(*|jQ&hu+Hgt5mM7eUwJXyMBz2-~ z70eWw8yRD~bztnksuaJS0s;#hx8!C{d9tOmUT~HJ%`35}BgKNm!jEYZ=el*qT+NBX z&uEijIH!asOqEmUkeTMFDStjc<^cG1HZU&KcvYtC|Mi%2*2ZWvL$Du&mwDdHhH!Ag zPUOvGl-<7pa>>=Si&Gb>4eaF`RM)(~<>)>uZ%Y#&5<^gTP2U4Dwpi@T4@8GKB=R~t z4~d(NKJTPM7J#c`*Pto^nD2$P0<8>oCJs@jB;wVDInw%2ZN${atFXbYhK}Cu@@Ke#_Q%3XySQw4>;P&oJN?HS=SmC@8nYb%-qmNI_10Pt>&+k zrCJz20#SNvh{c@GHrC)bCkke=H|&oT&ANU-ptkZ#6E9~^!GhUE{ifA2ALUmQ+M9E4 zQC~tTpc^--=0!@>xI`PrN$JPF57n-GL=Iw{_=b)|M@DNr7?ZX`c3s` z$eB%eEt*Z{?IiV1qW*Ll)$4r|^!gfcC1#i;LA}hKmR)1s>dy7@1gVElP#Ycw;e?m0+H15wZ*O;VUNYTTv z5sjTLyayPp=1yFsMvHkM4=GWIYM=hq9%urw{A+W|LcQO|f~7?zt#Lw+P4DKYyn*mDoljz+{>)S#?*Za-UlV`dCa8W;9kN_LERS+GtVO`@0hPn4`m+_D zrR{?6pOR3Rev&v=-Ur>OApm%4&it+nF%+zlD`vt1mHGR_!%Qu2nRm=VsGqAdE$oW? zDwN?b$=-i90mp?`i#N?h8pXzB{Rwu+5e>|X5=_~nA#8gL@eOE`zXupnl@196(>K+B zS2O1x)~u8{4j}v!@OnppfY*SD9m9uE9&ssPFEzKO=pAH(nlP0_?&IDA`jRF>k7C}- z$0zUbpT_Qj)pG9vd=bEyAeNHl_7kOozd-5^7sk*F5ae?c@%040>YJROp)mbb7yR0 z((Gtftht|ny#*(XI?WQ?3bWyaMU^EJHZg*FVZAyAB(-Wp26ZL2DMLJ;B&Q9d!N5zk z5_FzF^y0)}yE@iQMlELRczIl3oZ9s|p2hf( zGi|gMa~G6!(l$kD0d;98zSnMvghYW`t3!}9ai3!cs}9)?#zZ#h@-3m7@~8ZAj}UjZ zBzN`)Utqfdru-sJk77x%xCk~SRF%*0Q2j(xtJu)HZV$8Y;9Ofl3l)S#bDMx(N zyie-)*uVqQ;Nk-|-<$cW_~^UTBb2}tc=gBkF5c__r=Qj>4$(23kvIx-*4|GJTo8>`fuMr1FqJPC$>z zuB6SnlNG+D?)ES}BydMo#O(1j)&mrLJq}uy{m}_urHQj&%%j>iY-RUf&YO{!^ynxR ztkDKd9DFNC{c2a_1-z7meuIG9rkx25_V6d|dO^4GpmR;*+qmToeizEnT7_c{*p31zg2?ZJ;na}vQTXilQ? zF5T?Ku`vYcY$EhW2;z=@_C;}}Csm}}V)r&8F%TZmhI4%-a?zpt^aP$d-r+8HPM?BH zlaQN?CyGvI+cf3g_xisX%mDK&9iT{;8K=D`9vfVC98lh&kD-`zIA6u_ZcSQ`==xWf% zpDa7UmM>fv&v358%PRlJm=d~FpQcL8znfOY!s@gRv)VeT+3+COVY}#+K`TY{}7IJ4+C_w7j)o1}UM%FI{SWMC2NNrDPW+_$}Kn*{dSPUrm{}+=X-yNUh7u183@)?To_N{33-(PlNgHraFJCz3(9M zyxKM%mLSV>$J7v+rDZ?z84rD;(vMmfr172>v-bee^aSksBKV{XeX zpG$S(%UMcaYII7+yCvqn{{#oxJ;6c0`~+$&V&Lrb+;eSBz=}DJtdr&!$^-c&B3)-S zgyc7<1r`{ZeQY<3dJ~LBRsZOGV4Vq6#4=vYLsxG>C6-`a@f^R0Ww#F9aHP|bLyklMI&4~Uyi1AVm z6lM5NQM`_!9H})=r7z^3NxKo>ESi~(U6~ZBBSpDn*4j&-n&rN~ws%XATB_Y>h;W@~ z&$sv&40`G5S)^88Fn6{(atyiH9+?$vy^~51j>bRR_C{~dqlY8Yb8gr1f@<$Y3G?OI ztKZ)2czN~m_6hh+{(7&-jig1@X|pQjPN!&@N(=c%)MJ-=M+F?NjFcE+WRQ87 z>Ya7|{yAreCRD#HQ%$%KPt}qj-|D#t2Z0{t&MI#PqQrgXdwJ97+}Ow*!S! z3CsL%XqaM`ewVtw`PNH}4Y;%2e!qTEFvKr7J85J%2sP;Dg|D=Q7GLJ{jjNXD`htko zNw8yA%%h~NW1~r5A1YSx-UBwD^%R%b*6&~P)Km6L*!DrLzL2aC4R9ealeP2|Xl+oJ9w9La2z5TaLgekno;HPeYLwk&P*~2jwad)2 zS31@ikp6_a{T9LkwnnY!dC}iqzLLgd!NBS+jOzTYVwQ^U3qNy(ca4?t!jg#7hF!aq>_Gu z@G>#r`MwI1`|6N8*r)NV?Nu`**2tshnZ-|Y#EI@lJOC!Z<3VDv-C|wECkxIK32TS2 z_c#Rfkk?|Fs46=7ViRI=7k4yjfeToDlFeMhST)u@&T4E6YxLeNGIl`kgG`)}Y`jJg ze^qQ%5be3N81OQ*5~!xMe@xvbx?s(yXvotGi_zjJS3Fxc8TjZ&W%6Dc0ICu<9XTIpf^ z;=`oH3KZHL935Q>YvWJ$?g3?lpd6obSQ5}`_KRy-q?aMhugLh$D%6PsDxiOp>jZwG zT+rN-T=3({scZa6*J9l6;OV|3(x=lezWZuzCZBhDpNZPPNCVr5lLkaO4=_ zpIHvsp97@>ltP$y+P^5Y?^3Hk=Tir3WBGBLm+J%cisID`3~|G~iHD<`qwWOVazec> zzWtY=!+?T%3El$F@C0XP0=n7;wajFgYSc4F$k=Rl9NCU^8Lwo5i!sG%TJr~2TVxoY z9$|t-uQBb?VeV9gs7H_AgCE0BJo%AP>oCru4bRLcjJr3f zub*z8Y3B4pveE8&(VshF>BjIiFT`fdO#Ay>p^gMJOR5+Mn4W=wO+INQ#O z^MFE2BpU1A9t)L<#TeG?)l+$XOR6V$DT@>0qF1S$P94hT{i=~$NW$|IqneUjikc9; zRx4gM{8e;W+JT^%~Vq$L!uHZOGS+AyXXl$3o9jkvw#(Azi->vGN&#cmvf>6{jN=( zF|R1`zPzh(g5hP|Mr|vXyX}`o@oEo!LUcyD+K?O8&_e&2B8mU59w@&M7&6ixT|2;U z)z%o}cySOr*arydnf3yIZLSGfC&0ZuSVSx&-I*mx~ zk-+r+Px8^Baw*879^paDt)!&VQL(nHQCeKL+4&PN$3B|W71Wt%6`l)Edf}_=K3FLI zuR%f+fbl!N_B*hp8cOp5o&&X<*1-fuAus4CO&YZuvH`Q(OFwf!qN_bbgHIw)xyr`cFu)ovvT`k&*FRi`Yi8 z{oE?ds*;kI3+7tv!!CfA40Gl=K8}trIV-u(qje;*3ICP`&33czbgjk8!-w;-nR0I0 z0+f(kAwPF!v)W1Fgf{-GTHzh9ooJm8|+qxyRb1GXcj%{vu z{URMZfIE5Xm4TaUOEc2?))oak<$wo^`O>I3qr2iF*J_Oj;SJU1lvU^|8`x4Va*l(o4?|Z;UPto>h2S)@VfF_v}j;+<- zehL?*>bhAN#o3yAdA_jQ*w)Oe%n(1u=GK>Jt`dY>!K6SZeJ0XbbYj#SlObfB-q>HG)N?sxO>?B+ z&awIt&!`n$m6GVDvc`RHN`_B%Y2>Rd^w?e_Gki;IuE-5D+pagwgA?R&=8@!6ztfQ zN_$C$cqDiE)Rhd}$a3?6Z|dwXZV86dJnml{{EFTKteA}BKv#u+?R$Vdm6~(q%EnWX z6V5Mo)cQNlhKZm7$=P&EYtUWi(o(?zp@tvxg~ZIt0a&2^`b#;&KGC0(rM0>j+1N2z9R;w&x5}Q;Cg|RJ3rfK6Mzwz0^bAhF5(zWy;u@bRVH!M6Z-FjgAYs-tB#)wBJHMzn6pnUeY}zz)5pb!# z{9~PSGk2Za-1xC`P=>9y%S|+`l&eU9)O`^rOyUmyI}+JE@MS!VQIxMO}P(Su=9DTXwF*z_4ka4P$YtAU5ZnXZI1kXHY| z%qaDlIv>QNMKV0a;>JS}Jq$Dtxi(cv$X>}G#ZWOuPg%n=z_dI)T}mzrK0z0dp7zI?nr|jgUD#1IN4|c_G1@7aS)oMv%SHXroVw0C0|2U#;)3$ zZ*)`}HXqgQ8+X0^4jPJ7acl3CY!01UYnUsf$Z%gR@XS^`TRf?}SVmGlQDmLIJ`yu= zJ_q6bj}RbL%fp#Swn#b=oR`f>WJXaaE{rr3Egmt?*SlHCv}abrM51?Nm3Ham1t7=V*a}W zF(*KA9V;l&X;u4-`@+#ra|N`?*Vk(v8`_WZY;XSj|6g+mM3PY zIq3;C35DE>%~?4#@B9!lM!#5-NSDSqWM5Hv+g`4gm;G(f!?#3C5w>%5 zXXEV=S+d`M_)_nE5bvY>smhI0{8tLnvL8P4cBEmWH-zNap9Gpx()Ut0SkyDQU?aQa z4c%N(K955--u%RL`$D+^N21q#?Cx<8pk1A;((^|gDQ8IMS2L;@@VyCn$oYx1s>5Ic zNAZ;LS1ebq?^iFLvI|Z`wnmNn6Kbl;*0nX*mkX||;oTun!l>Nel6xsz7bEDNj&>AD zmo@I)FuI>E?W}p*&TDgk3?5DPN)oJg@LWf7!ym!fn7$U|qe0yTn#>kXO!hPE8>>#; zf}X5~^TU+px`{(#EMBaCxZGYBp_;mtytC9-`d2(h89vjeT&{WOLs~SQD})z5ya9(% zFP!mCWJKK&7>(B*@FyFK5RF(0A?FqUb}9a0pCfu?9z4RL&YA`7M7&n+#SOa$Y}IX= zLRERe3zK77wqUVLwnw^C%agtaQk1nE|LHhbdMJL&EwThfnHen#)SSk@ z++Q1?)1e^QVk^G~@LH4m6p3o9?+5jbEU@#*GnN*+x=`%kiSA9^-pmPA`g{^Eobu?C zP*a5CV9V@-UR>Xao)`PNQ>7(-<&SWt$MN}etrv5SB$QHgbhzSmGzJ{5Camc`{~ple zbPxFb1pk$?GNJl((TQVw#nxX@1|5FlO$P%@!SLd*Y>+fr}^H0W9}OAq+ErepF}|`QcMH-+zN;nAImHm1tfu>P;voN5-N` zc|V8(S~1MY(R0wg%Q{_b&T=DKiaZ06%##u@#J@=_$sXPb=`75~ZsLG+Ke)QX?(^_^ z)%4xF7BN-}&t#&cvJ|%Gtk_5v$Hy8ar%(<~`9GDlMX|0`s$dzr5&Ep*9OtFa5tc%_Ef41+fHm-OZ z1%NFTrh6=j?}klo`1~@_9n*Bs&d9J!pZKIgRYm_yv~-N~`TUd4f|_c+Z)XqafPXGL zHhJt5Cnf^fcYf_?{f4v_ozNyGM!DVYm{S=b@mf3!=?mUt7(PjlGS zkmss>b4Pk*)Iv*x{e`Eb`x~xah{MQJVX)rehw6!nHnb)o!o|^}NMfS6De~zX7p^ro z(XFMmwF5&rXqlnCkg>zO-n3&l=?%0p>*_*s>OrZIvEk}4V{>x;H|BSvZm+4(tqIWK zOu{EK#^gZbLH|2@2UQNYMN#=vD?Q5QL_G2HS)+SEfAnbgs7(K(Fv!-7*@-H_`7Z!y z4vN%?BHb1}b8DJBQcl?uE<07CdKve|A?OKS>}`}6*yO9}%bR3_8!6wi8(;M8Moh!oA5XP%*(5qvZhLMcMIStow`8ur)XyaYAB&<%C%e;Vo*)KD0#($V1EWd6BhC>QZPK%Wpwoq=uy zmHRZ+E3%7u7P0j;IRC-10=~Fp1^G!H6^}_IrGBb|;%*~j>?Wf5zdoP~RzFZJt)Jzo zYcV!{TR*Gd8{6DaMKyN4+p6SJ{xzlWp_lhuSds^JfTWSM2d<%O8zUj_DJc>%Sf_f&(-(+>{rSOh4@9qt5=ab z221lZEcA-+a*J5~$%n>0_a-fTm}FcF$GU$5?{EXi0vnOU6DxW>m!`hvqRR_BzheE0 z5vKEK@d?{rjbVz9F3i@Rupi!K2A^hX4@AdY)d2ohL#mM<^7PX;I&mdA!S4W6fc#SE zk^va}_)b-wv}%VQc=YO0eLnz_1U@U8D32&A?l4OK5vlaWx1e*em(R)BxzNB~K$urZ zS0Y?2^L#{x@GIg@_2@?QaE1L?e#ln%W+Lm;`D?tY z!REA5!gqF&v!~0i=3XK|J=){quM?r^LYl+P9-%=ZHWW8QutQ1YI&Age#OA4X?5q0t zJHviA^a(asnK;6m@`J^_OV1m2=qGGwBDRwecRNfGz~|6CEJK(1op2MazasTggWA*& zc^0RiSHr_c0?(1p5x~8Sgs#vo-QMZ#p!V(PG*uQ&!zoe@7`x;1dw@>T;Ky@HWmL6; zAE9d6&E@*EjvuV>E2F(w;|8=5=XtcmhVev0d-mn!8bQvJ5Wju&u zit?MWj@qIT<&EPN!ng37TQ)s&5MX^0nQ+n~u0K;Oaz3ztI1^(_)i2xSQF|#*xqd~R zQ)~#%AH3bd@-Us&M1TUte!Nhgwd^*J@Gscz4j{WS*+Fy|{56>peX<;xC6%uZb=F+} zuD+2W{&gaB{~j=m_}%z3fI1FU&r^oD2e4nqzps*Hrow5n(I}~De4{bk2f&C^F=%P2 z`91hda41UGhNmMf?+t=8z<&#ENIg?-&(j%S)AIo03MP{5vWETOq+>8av!NXXG(>Q! z|D=51WE>J2C@XJSrvE3h%-f$sKu*lu5=&kv3*_P-Xyo@qaCg`Ls4&CZ_El_k_XX4K zWznk8*FlBIGSLt+flEa72X<~$NAK0jMbqYBzM@)sn`*DWZ@DGUXk@e)H(K~!GPTs_ zl4@ayq~1(YI_38!e$3&!9mg}QE0atQ>VW@3F&*`C8S5!q$vkEF^owV>we)j8%^lk| zA7w!Wp5c%lT3f?6-DNf-Z=Kg(9vNVua4uwMz%@2A4tdddg@TMdUEKAj$jew+6v=h? zybL^FY&lwDVK2bz7I)yIjF>=IC@C5%#VJJ3U9TnQ)>hkPRQNw!u+Cmt*%=(B2-b(} z!ab+p@0=UeHsYz2{4-V6pht$6O`Es^x&}TZ9fdO8+?aEB1!$>Ppv9IbzmxiO>65HT zVldeF``j00W;Vd>@{Pa$90w%BKm@Fk{ zd7iI+W4+1Bg$J+;4r)(2%Plz66;(Mj0xQCg(n0(B_HmLd2Wrj)^6{d<)T=0Zuo?r-*jDh?P=P_)RaVym`Pki;Dv8LA4;k& zSJago9_dQ!yL{^v?7|C}Pa5|}W*&Ux^K&&2RgK*<+uVV8&k)@MUNl{zjh;jplpXp# zUscdyKV2@)-MhY96m5@4#vh~~f!UD4hW2YJrq~zp+t>CxTNJK6YFLEqU-4O*QiHY2 zosWmu17cv32kmPpf;Dt@EqQ0R>Pb&E%A(|HG$%7wr@~TBHuQ)CLoz%d0Ig&laEdBh zwrL2_`{-jqVf_hpT)H2CL@Q6;ex4nA)TQ@6BR7GEI62kdXzEZ7Hrte>6)|~FY%B*)@+Ek&}N7`&z zleI1%4UISr&!CwbR}$!B;$SSK<6Y<4g4~=?v3jMS(>B^ByPN;FP2B8#tV0Lm1S}8; zq;`w4zBA`@GF^7Rj9-7{ua1JF9^Cq);6t8UL-wDIXw+KPxeS zSxl#Dm1uJdF!#E;#x+|ZO5Oddmk&AXI4ER8VO~irMTU(l&eRfG2I$f*D;e$2iic{~F!hp}Ds7En9{_&5kzi6jN6yy*j6%sj=y{VKJ13j&K2N(u0G_3rfYfZgVPw}i&aGWHUFgD2yR1Z3X(UZ$I-)T+xvw)=W8z+acE=%CV+Qx5!GlIMJ=P{9i_L2q6VI~Nao4X_xHkftGb0k z$Ak|M;!z7`(LReJ?|6mY+5edKNM>*M`7(eDavi*b^p>bcZf-GpcubZR<~4{NDNj&Z zt#0xNKt&e)Dw}8gk1!qU^-@Rb}7u4FbdhQF74nyMOFJbdHXLyNZP$8WS7X>1u3Xkyp{wY5p}U&T$e zy3K$1tu)^u-s>N7`B(X9CBskd;YbhwMOSRBuV_u9L%U`A-E+TdTC3stnA4gNs~>l9c_k^G z;2h6&efOTTheRoOqGJ8{lDpefTXsy%{UPE;pEGeI=i#qg!X~N_8}O2{uV~kUofyq| zLQR~rrrFv0Ud&<5lH;cEOX&hhpR@m zMp={p6JOXdy$!qS0hwdmR@0@4+WAEpGLDSed-1bGVO2TT@t@YH%$_;O2egj9&iBkM zwFRUnN21G0s|Y*U(&LI=04!1cIs{?*#hjn$HwaFFRmX=LO~)_kr~S32UjLSu1Io=vWE2^gUxUpra+5u*VfM_SE7bhNFg*T_#tVDy-#@u0SX z!nEZ|GxLRjqlcApkX3@t#A|oq{XxI~s4RA2S>2YZJAZ%D;(RMsbFV6 zmHD7FnCsw8y&83d3ap!wCmw&vx_${QPRXD->|9bKXXJ!==7-q$Ga4y zeSA zcK!J0+5$25_r94Kd17ZTd2A-!p_8fky>f@F8aX{(ifBdgc!TGHtP`m%)fzKu0MGA8 z!c(+AZR+tOsx{qIZoFv&N+C|Jy==!z=G0|GzJlR5Nt4?C?H3 z09wzXdbLUN{A!3GxMa)%_i1ag4&(|w)kAsXSu%eH^$n)vNY^bDEtHML6CTXSspfQ8 zT}}k@2gDT4)|OS4dnz;jpNb91y~bB&m=7NHgQT)^tAjn6i#t~M0LpZo6WFE{L<)ap zQ!K>^7v>Bp#&bnIA@NM)+3}MSN4D_k^>VDv1h? zcz`+%w-4|N``bi zHM-;F)O+R6%IqU-IhF|i8P6PclW}*Znkhpo2tK=sq6My@iwP4J($TzyMcKK{QI?j> zEd{18^K5kJj?Fdn@P891JK9{~VZ4NxmH-K;iU zQu<$%il?cXhoU3$p|b9soyXPl1J{3sQx5bdJJG?63;~LA{wBzlTNc}hA05j23qdpO z_8(-$HV9ko>ND;zzuzlB1;_<>l%!OYTW8JRm7KGWbrZ2%=SiZq54W#{QzWCyFCK?{)>bGRa9$ov zp2xW|07p1d znmp}sD?%~@LJOvTbIvZ##`-j|U}hx8kTdWoI!ga(_wRo7-bZ)#*>QqjFB?0GEQ4R6 zR~zIX9ulLGZs12PQ()%}mOV3C_@^%D_x!gm@Cym9H1G*uu2rIK4YRWUut$G0bSRm$ zt;SOR#~a2gF!Sv(&(799CGvj-atV$0k3;};&}L#H9=cxju}y}bwGdjZbHYjHF(O&l)q#xm>&0<<)p8hu(|?ixFA zerY{@m*4q*mEA!ER?ws{ph(bRR97WzLW{E1pFc|pTOTK0v$g*KtvrqoL)PxJEAc(C z42{IFx80F_{rwO0Bp*=H^;)lv)ALp%YPO6%^FAI#XmR0t3HHY=8J&OSIQC?nJ z>DDiMeJ_^+@G}`d|%PNcQo$-5OMrqWPru^7SnEkHnW6{NX?_Sww$z27CVi zdA(cteq`i*j^NsTtI2;SIP)>ZY-qk8migA==}KCTKs*^fo`c`=^&Ov;ACu*5UoURm znByHWk4ogLiBWc1o(&ugY-Q_Km7Cl8>~Ve_@g>iP?pX@?)^Z=-z)yegFQNW_ksg_4 zd26a!E$xf%f%qW(=-axm?0?UpK54D^dP^sJi&Zae_lW9a{{Rv1`TEse4_mv?Z^ZWJ zayJsf-*!d!_w+r!goC|0ic09)1&^(l)b=(0AL~`*dfijh^=mB>%v|3Rckv7>_oY7G z-v0pNGnFTfG<4Ha^%6(8Y=LRqFzb{3-bN39z=7wwo~L!IUcKGqo1|_494aqmQP>aj z&1l~#>t;FKI#lgJ-cRr>YntWWy?Aae7%XTu#S)*EHTL)QJ^p|JMq5jW;f_~@);T#w zBZFNIiQ&d~Tbuay#yLn>a^CDcJ-^B7XM!u39vP54ZZOJgj;19>R$87NEV{NPF0E-= zPiy|CFX3+$$)g{!-iDV?W1J7Y5%phhQTY1j(PNSei+JvBWm&D|8Ao1++t~Y8lj|BK z#+frZgC(&)ES{dt@BIG&O10s?5>KGqlqzPil>YVtfAQt?Z|C_D(+R07qoXg(>fopK zd`H9if51ILvvtjS*TdH8XF)aOoW*VqFh9KBgZch=&eZ&IrrBCYCYnCgb1LpY{0N6n zpWi*dA58h(&CSN4diN7M&2uwklgVS)0r~(c+p*~8dE9Bo+2P&f`t~H$^-HZ<`R?xI z-y?7U;Zb`kj>G)*sAsgfv~^kGc-e;F7-J&3%@4z$W{_Q8g=vQ=A5e$X`k&?ewy$iU z)Ge9|gxsg09aQ>{dg7ibDOqZd8lG2HwKr0uw4Z;}$MCm{mdZ8Lb!KL@96>)*7yBpG zeLXsK#yi3RM09m@bqtP&DnaN$uN~C1yNxp7q{Dn<{E~sg59ocXZ^GUzeI zJSgR&KCA8OKM~v>xXnpi?8~!Sm}z}q6SC{u{s;B3=(~3?0A!lP@fV6<@cu2}Rk+lM z$x?E$4#lzD`hGldni}tot?ZNR`ZFcO?3rtlGO3Zk1G{6dr+o4U0~%ajUF=ZvPb(5h zIV?{kllqfX>Tzo_Y!+MEJ8)a6c9*M?uEaXeB|w?oQ~Q&x1G zS@V~Kd;b7=N9H~F72lOp5=xG*6O62ga-<%F8u9H< zNxIW+3d*IUC*+h5mA}CIRqqdYn(slGqbg^!ll^78HyQr&`XA@{5!V`uO6OJsH>-u0 zu*B@X_5Nt>>l0hhQ&Ii=C1be*1fGsP{r&#{$mniuWwn!RgM;^2au|D5HRLBMa_rAm zwHQ^4g+-y}6I;s##E{H}IS}EL_N{#rKzsQXdxd1R9D&fu>^;5z055K`?@P13o*Awo z4)Gp6$D#EZ=l=lKYUOlK6x?jLJenCqVO7ebyjIKn#-c1}sEQR-U;r=xr%{ZY4oz5S ztzdZV?yk}s$;M%YzxHoT^dHpmT+xe`$GHefT18y1T;{vs<8vckSf~>oWRIYKaqs;9 z0G~}XQb6S-LaaL)WjM-I<#co6Fz~MqN{I5tQjW;VRg~a@FhQ=HL-6BB;_Ci9Amt%$ zt?t9q+x(t}O)kz|K`)*lXF*w%x=F7f8JZRU0NH(U(Ek8K#bFpqO6c~Z z2}w5*_qrG;T9)Ox~^XJ<4L~)LYJqN$P`8Vw}yE_@?n%)qP zDd*UF4?&)P$g7pDY(t)r*yOmmVi#C}lBXpLh?#Ygx z&F}pG07~!au8SfvI)+vlD#IX(s#RS#1`g+?YW6LC(vimRl2&>-1J)*;Bx5U{rT%J z$SA4cwrQ25W?11)UPm}kC=o$*YS(&n;^NpGG5#eZu0{vf>G{@5oSM1oC{4$mEeeyt z70eF|$R0NsWj*VQ);v8foP6tG^4nu1<1C+FZ-4T8iYe2pDsMxF7lWsXjitYH|Jf>w Bz90Yq diff --git a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_tune.py b/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_tune.py deleted file mode 100644 index c9ecedc5971..00000000000 --- a/examples/tensorflow/style_transfer/arbitrary_style_transfer/quantization/ptq/style_tune.py +++ /dev/null @@ -1,190 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -import os -import io -import skimage.io -import glob -import numpy as np -import tensorflow.compat.v1 as tf -from PIL import Image -import time -from neural_compressor.experimental import Quantization -from neural_compressor.data import DATALOADERS, DATASETS -from neural_compressor.adaptor.tf_utils.util import _parse_ckpt_bn_input - -flags = tf.flags -flags.DEFINE_string('style_images_paths', None, 'Paths to the style images' - 'for evaluation.') -flags.DEFINE_string('content_images_paths', None, 'Paths to the content images' - 'for evaluation.') -flags.DEFINE_string('output_dir', './result', 'Output stylized image directory.') - -flags.DEFINE_string('output_model', None, 'Output model directory.') - -flags.DEFINE_string('input_model', None, 'Output directory.') - -flags.DEFINE_integer('batch_size', 1, 'batch_size') - -flags.DEFINE_bool('tune', False, 'if use tune') - -flags.DEFINE_string('config', None, 'yaml configuration for tuning') - -FLAGS = flags.FLAGS - -def load_img(path, resize_shape=(256, 256), crop_ratio=0.1): - img = Image.open(path) - width, height = img.size - crop_box = (crop_ratio*height, crop_ratio*width, (1-crop_ratio)*height, (1-crop_ratio)*width) - img = np.asarray(img.crop(crop_box).resize(resize_shape)) - if img.max() > 1.0: - img = img / 255. - img = img.astype(np.float32)[np.newaxis, ...] - return img - -def save_image(image, output_file, save_format='jpeg'): - image = np.uint8(image * 255.0) - buf = io.BytesIO() - skimage.io.imsave(buf, np.squeeze(image, 0), format=save_format) - buf.seek(0) - f = tf.gfile.GFile(output_file, 'w') - f.write(buf.getvalue()) - f.close() - -def image_style_transfer(sess, content_img_path, style_img_path): - stylized_images = sess.graph.get_tensor_by_name('import/import/transformer/expand/conv3/conv/Sigmoid:0') - style_img_np = load_img(style_img_path, crop_ratio=0) - content_img_np = load_img(content_img_path, crop_ratio=0) - stylized_image_res = sess.run( - stylized_images, - feed_dict={ - 'import/import/style_input:0': style_img_np, - 'import/import/content_input:0': content_img_np}) - # saves stylized image. - save_image(stylized_image_res, os.path.join(FLAGS.output_dir, 'stylized_image.jpg')) - -def main(args=None): - tf.logging.set_verbosity(tf.logging.INFO) - if not tf.gfile.Exists(FLAGS.output_dir): - tf.gfile.MkDir(FLAGS.output_dir) - - with tf.Session() as sess: - if FLAGS.input_model.rsplit('.', 1)[-1] == 'ckpt': - style_img_ph = tf.placeholder(tf.float32, shape=[None, 256, 256, 3], name='style_input') - content_img_ph = tf.placeholder(tf.float32, shape=[None, 256, 256, 3], name='content_input') - # import meta_graph - meta_data_path = FLAGS.input_model + '.meta' - saver = tf.train.import_meta_graph(meta_data_path, clear_devices=True) - - sess.run(tf.global_variables_initializer()) - saver.restore(sess, FLAGS.input_model) - graph_def = sess.graph.as_graph_def() - - replace_style = 'style_image_processing/ResizeBilinear_2' - replace_content = 'batch_processing/batch' - for node in graph_def.node: - for idx, input_name in enumerate(node.input): - # replace style input and content input nodes to placeholder - if replace_content == input_name: - node.input[idx] = 'content_input' - if replace_style == input_name: - node.input[idx] = 'style_input' - - if FLAGS.tune: - _parse_ckpt_bn_input(graph_def) - output_name = 'transformer/expand/conv3/conv/Sigmoid' - frozen_graph = tf.graph_util.convert_variables_to_constants(sess, graph_def, [output_name]) - # use frozen pb instead - elif FLAGS.input_model.rsplit('.', 1)[-1] == 'pb': - with open(FLAGS.input_model, 'rb') as f: - frozen_graph = tf.GraphDef() - frozen_graph.ParseFromString(f.read()) - else: - print("not supported model format") - exit(-1) - - if FLAGS.tune: - with tf.Graph().as_default() as graph: - tf.import_graph_def(frozen_graph, name='') - quantizer = Quantization(FLAGS.config) - quantizer.model = graph - quantized_model = quantizer.fit() - quantized_model.save(FLAGS.output_model) - frozen_graph= quantized_model.graph_def - - # validate the quantized model here - with tf.Graph().as_default(), tf.Session() as sess: - if FLAGS.tune: - # create dataloader using default style_transfer dataset - # generate stylized images - dataset = DATASETS('tensorflow')['style_transfer']( \ - FLAGS.content_images_paths.strip(), - FLAGS.style_images_paths.strip(), - crop_ratio=0.2, - resize_shape=(256, 256)) - else: - dataset = DATASETS('tensorflow')['dummy_v2'](\ - input_shape=[(256, 256, 3), (256, 256, 3)], label_shape=(1, )) - dataloader = DATALOADERS['tensorflow'](dataset=dataset, batch_size=FLAGS.batch_size) - tf.import_graph_def(frozen_graph, name='') - style_transfer(sess, dataloader) - -def add_import_to_name(sess, name, try_cnt=2): - for i in range(0, try_cnt): - try: - sess.graph.get_tensor_by_name(name) - return name - except: - name = 'import/' + name - - raise ValueError('can not find tensor by name') - -# validate and save the files -def style_transfer(sess, dataloader): - time_list = [] - output_name = add_import_to_name(sess, 'transformer/expand/conv3/conv/Sigmoid:0', 3) - style_name = add_import_to_name(sess, 'style_input:0', 3) - content_name = add_import_to_name(sess, 'content_input:0', 3) - - stylized_images = sess.graph.get_tensor_by_name(output_name) - - for idx, ((content_img_np, style_img_np), _) in enumerate(dataloader): - start_time = time.time() - stylized_image_res = sess.run( - stylized_images, - feed_dict={ - style_name: style_img_np, - content_name: content_img_np}) - duration = time.time() - start_time - time_list.append(duration) - if idx + 1 == 20: - break - warm_up = 1 - throughput = (len(time_list) - warm_up)/ np.array(time_list[warm_up:]).sum() - print('Batch size = {}'.format(FLAGS.batch_size)) - print('Latency: {:.3f} ms'.format(np.array(time_list[warm_up:]).mean() * 1000)) - print('Throughput: {:.3f} images/sec'.format(throughput)) - - -def run_tuning(): - tf.disable_v2_behavior() - tf.app.run(main) - -if __name__ == '__main__': - run_tuning() From bb8402f4c039c5f9e1637f530e5e348e8b0c5932 Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Fri, 9 Dec 2022 14:21:50 +0800 Subject: [PATCH 06/14] remove oldapi examples Signed-off-by: Lv, Liang1 --- .../quantization/ptq/README.md | 277 ------------------ .../ptq/faster_rcnn_inception_resnet_v2.yaml | 77 ----- .../faster_rcnn_inception_resnet_v2_itex.yaml | 77 ----- .../ptq/faster_rcnn_resnet101.yaml | 71 ----- .../ptq/faster_rcnn_resnet101_itex.yaml | 71 ----- .../ptq/faster_rcnn_resnet50.yaml | 73 ----- .../ptq/faster_rcnn_resnet50_itex.yaml | 73 ----- .../quantization/ptq/label_map.yaml | 80 ----- .../quantization/ptq/main.py | 59 ---- .../ptq/mask_rcnn_inception_v2.yaml | 82 ------ .../ptq/mask_rcnn_inception_v2_itex.yaml | 82 ------ .../quantization/ptq/prepare_dataset.sh | 136 --------- .../quantization/ptq/prepare_model.py | 99 ------- .../quantization/ptq/requirements.txt | 8 - .../quantization/ptq/run_benchmark.sh | 41 --- .../quantization/ptq/run_tuning.sh | 41 --- .../quantization/ptq/ssd_mobilenet_v1.yaml | 74 ----- .../ptq/ssd_mobilenet_v1_itex.yaml | 74 ----- .../quantization/ptq/ssd_resnet34.yaml | 90 ------ .../quantization/ptq/ssd_resnet34_itex.yaml | 90 ------ .../quantization/ptq/ssd_resnet50_v1.yaml | 79 ----- .../ptq/ssd_resnet50_v1_itex.yaml | 79 ----- .../yolo_v3/quantization/ptq/README.md | 99 ------- .../quantization/ptq/coco_constants.py | 6 - .../quantization/ptq/infer_detections.py | 154 ---------- .../quantization/ptq/prepare_dataset.sh | 136 --------- .../yolo_v3/quantization/ptq/requirements.txt | 8 - .../yolo_v3/quantization/ptq/run_benchmark.sh | 36 --- .../yolo_v3/quantization/ptq/run_tuning.sh | 39 --- .../yolo_v3/quantization/ptq/utils.py | 205 ------------- .../yolo_v3/quantization/ptq/yolo_v3.yaml | 85 ------ .../quantization/ptq/yolo_v3_itex.yaml | 85 ------ 32 files changed, 2686 deletions(-) delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/README.md delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_inception_resnet_v2.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_inception_resnet_v2_itex.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet101.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet101_itex.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet50.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet50_itex.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/label_map.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/main.py delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/mask_rcnn_inception_v2.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/mask_rcnn_inception_v2_itex.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/prepare_dataset.sh delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/prepare_model.py delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/requirements.txt delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/run_benchmark.sh delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/run_tuning.sh delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_mobilenet_v1.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_mobilenet_v1_itex.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet34.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet34_itex.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet50_v1.yaml delete mode 100644 examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet50_v1_itex.yaml delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/README.md delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/coco_constants.py delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/infer_detections.py delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/prepare_dataset.sh delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/requirements.txt delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/run_benchmark.sh delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/run_tuning.sh delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/utils.py delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/yolo_v3.yaml delete mode 100644 examples/tensorflow/object_detection/yolo_v3/quantization/ptq/yolo_v3_itex.yaml diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/README.md b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/README.md deleted file mode 100644 index 1c9b6b455d9..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/README.md +++ /dev/null @@ -1,277 +0,0 @@ -Step-by-Step -============ - -This document is used to list steps of reproducing TensorFlow Object Detection models tuning results. This example can run on Intel CPUs and GPUs. -Currently, we've enabled below models. - * ssd_resnet50_v1 - * ssd_resnet34 - * ssd_mobilenet_v1 - * fastrcnn_inception_resnet_v2 - * fastrcnn_resnet101 - * fastrcnn_resnet50 - * maskrcnn_inception_v2 -## Prerequisite - - -### 1. Installation -Recommend python 3.6 or higher version. - -```shell -# Install Intel® Neural Compressor -pip install neural-compressor -``` - -### 2. Install Intel Tensorflow -```shell -pip install intel-tensorflow -``` -> Note: Supported Tensorflow [Version](../../../../../../README.md#supported-frameworks). - -### 3. Installation Dependency packages -```shell -cd examples/tensorflow/object_detection/tensorflow_models/quantization/ptq -pip install -r requirements.txt -``` - -### 4. Install Protocol Buffer Compiler - -`Protocol Buffer Compiler` in version higher than 3.0.0 is necessary ingredient for automatic COCO dataset preparation. To install please follow -[Protobuf installation instructions](https://grpc.io/docs/protoc-installation/#install-using-a-package-manager). - -### 5. Install Intel Extension for Tensorflow - -#### Quantizing the model on Intel GPU -Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. - -```shell -pip install --upgrade intel-extension-for-tensorflow[gpu] -``` -For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers) - -#### Quantizing the model on Intel CPU(Experimental) -Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. - -```shell -pip install --upgrade intel-extension-for-tensorflow[cpu] -``` - -### 6. Prepare Dataset - -#### Automatic dataset download - -> **_Note: `prepare_dataset.sh` script works with TF version 1.x._** - -Run the `prepare_dataset.sh` script located in `examples/tensorflow/object_detection/tensorflow_models/quantization/ptq`. - -Usage: -```shell -cd examples/tensorflow/object_detection/tensorflow_models/quantization/ptq -. prepare_dataset.sh -``` - -This script will download the *train*, *validation* and *test* COCO datasets. Furthermore it will convert them to -tensorflow records using the `https://github.com/tensorflow/models.git` dedicated script. - -#### Manual dataset download -Download CoCo Dataset from [Official Website](https://cocodataset.org/#download). - -### 7. Download Model - -#### Automated approach -Run the `prepare_model.py` script located in `examples/tensorflow/object_detection/tensorflow_models/quantization/ptq`. - -``` -usage: prepare_model.py [-h] [--model_name {ssd_resnet50_v1,ssd_mobilenet_v1}] - [--model_path MODEL_PATH] - -Prepare pre-trained model for COCO object detection - -optional arguments: - -h, --help show this help message and exit - --model_name {ssd_resnet50_v1,ssd_mobilenet_v1} - model to download, default is ssd_resnet50_v1 - --model_path MODEL_PATH - directory to put models, default is ./model -``` - -#### Manual approach - -##### ssd_resnet50_v1 -```shell -wget http://download.tensorflow.org/models/object_detection/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz -tar -xvzf ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz -C /tmp -``` - -##### ssd_mobilenet_V1 - -```shell -wget http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz -tar -xvzf ssd_mobilenet_v1_coco_2018_01_28.tar.gz -``` - -##### faster_rcnn_inception_resnet_v2 - -```shell -wget http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_v2_coco_2018_01_28.tar.gz -tar -xvzf faster_rcnn_inception_v2_coco_2018_01_28.tar.gz -``` - -##### faster_rcnn_resnet101 - -```shell -wget http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_coco_2018_01_28.tar.gz -tar -xvzf faster_rcnn_resnet101_coco_2018_01_28.tar.gz -``` - -##### faster_rcnn_resnet50 - -```shell -wget https://storage.googleapis.com/intel-optimized-tensorflow/models/faster_rcnn_resnet50_fp32_coco_pretrained_model.tar.gz -tar -xvf faster_rcnn_resnet50_fp32_coco_pretrained_model.tar.gz -``` - -##### mask_rcnn_inception_v2 - -```shell -wget http://download.tensorflow.org/models/object_detection/mask_rcnn_inception_v2_coco_2018_01_28.tar.gz -tar -xvzf mask_rcnn_inception_v2_coco_2018_01_28.tar.gz -``` - -##### ssd_resnet34 -```shell -wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_8/ssd_resnet34_fp32_1200x1200_pretrained_model.pb -``` -You need to install intel-tensorflow==2.4.0 to enable ssd_resnet34 model. - -## Run Command - -Now we support both pb and ckpt formats. - -### For PB model - - ```shell - # The cmd of running ssd_resnet50_v1 - bash run_tuning.sh --config=ssd_resnet50_v1.yaml --input_model=/tmp/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03/frozen_inference_graph.pb --output_model=./tensorflow-ssd_resnet50_v1-tune.pb - ``` - -### For ckpt model - - ```shell - # The cmd of running ssd_resnet50_v1 - bash run_tuning.sh --config=ssd_resnet50_v1.yaml --input_model=/tmp/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03/ --output_model=./tensorflow-ssd_resnet50_v1-tune.pb - ``` -> Note -> -> 1. Make sure to add dataset_location=/path/to/dataset/coco_val.record in config file: "ssd_resnet50_v1.yaml" -> -> 2. For ssd_resnet34 model, anno_path of evaluation/accuracy/metric/COCOmAP in config file should be "label_map.yaml" - -Details of enabling Intel® Neural Compressor on ssd_resnet50_v1 for Tensorflow. -========================= - -This is a tutorial of how to enable ssd_resnet50_v1 model with Intel® Neural Compressor. -## User Code Analysis -1. User specifies fp32 *model*, calibration dataset *q_dataloader*, evaluation dataset *eval_dataloader* and metric in tuning.metric field of model-specific yaml config file. - -2. User specifies fp32 *model*, calibration dataset *q_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. - -For ssd_resnet50_v1, we applied the latter one because our philosophy is to enable the model with minimal changes. Hence we need to make two changes on the original code. The first one is to implement the q_dataloader and make necessary changes to *eval_func*. - - -### q_dataloader Part Adaption -Specifically, we need to add one generator to iterate the dataset per Intel® Neural Compressor requirements. The easiest way is to implement *__iter__* interface. Below function will yield the images to feed the model as input. - -```python -def __iter__(self): - """Enable the generator for q_dataloader - - Yields: - [Tensor]: images - """ - data_graph = tf.Graph() - with data_graph.as_default(): - self.input_images, self.bbox, self.label, self.image_id = self.get_input( - ) - - self.data_sess = tf.compat.v1.Session(graph=data_graph, - config=self.config) - for i in range(COCO_NUM_VAL_IMAGES): - input_images = self.data_sess.run([self.input_images]) - yield input_images -``` - -### Evaluation Part Adaption -The Class model_infer has the run_accuracy function which actually could be re-used as the eval_func. - -Compare with the original version, we added the additional parameter **input_graph** as the Intel® Neural Compressor would call this interface with the graph to be evaluated. The following code snippet also need to be added into the run_accuracy function to update the class members like self.input_tensor and self.output_tensors. -```python -if input_graph: - graph_def = get_graph_def(self.args.input_graph, self.output_layers) - input_graph = tf.Graph() - with input_graph.as_default(): - tf.compat.v1.import_graph_def(graph_def, name='') - - self.infer_graph = input_graph - # Need to reset the input_tensor/output_tensor - self.input_tensor = self.infer_graph.get_tensor_by_name( - self.input_layer + ":0") - self.output_tensors = [ - self.infer_graph.get_tensor_by_name(x + ":0") - for x in self.output_layers - ] -``` - -### Write Yaml config file -In examples directory, there is a ssd_resnet50_v1.yaml for tuning the model on Intel CPUs. The 'framework' in the yaml is set to 'tensorflow'. If running this example on Intel GPUs, the 'framework' should be set to 'tensorflow_itex' and the device in yaml file should be set to 'gpu'. The ssd_resnet50_v1_itex.yaml is prepared for the GPU case. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. - -```yaml -model: # mandatory. used to specify model specific information. - name: ssd_resnet50_v1 - framework: tensorflow # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 100 # optional. default value is 100. used to set how many samples should be used in calibration. - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - op_wise: { - 'FeatureExtractor/resnet_v1_50/fpn/bottom_up_block5/Conv2D': { - 'activation': {'dtype': ['fp32']}, - }, - 'WeightSharedConvolutionalBoxPredictor_2/ClassPredictionTower/conv2d_0/Conv2D': { - 'activation': {'dtype': ['fp32']}, - } - } - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 100 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. -``` -Here we set the input tensor and output tensors name into *inputs* and *outputs* field. Meanwhile, we set mAp target as tolerating 0.01 relative mAp of baseline. The default tuning strategy is basic strategy. The timeout 0 means early stop as well as a tuning config meet accuracy target. - -### Code update - -After prepare step is done, we just need update infer_detections.py like below. -```python -from neural_compressor.experimental import Quantization,common - -quantizer = Quantization(args.config) -quantizer.model = common.Model(args.input_graph) -quantizer.calib_dataloader = infer -quantizer.eval_dataloader = infer -quantizer.eval_func = infer.accuracy_check -q_model = quantizer.fit() -``` - -The quantizer.fit() function will return a best quantized model during timeout constrain. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_inception_resnet_v2.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_inception_resnet_v2.yaml deleted file mode 100644 index 2452828cee3..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_inception_resnet_v2.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: faster_rcnn_inception_resnet_v2 - framework: tensorflow # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - granularity: per_tensor - algorithm: minmax - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 600 -tuning: - accuracy_criterion: - absolute: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_inception_resnet_v2_itex.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_inception_resnet_v2_itex.yaml deleted file mode 100644 index 24039f9700d..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_inception_resnet_v2_itex.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: faster_rcnn_inception_resnet_v2 - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - granularity: per_tensor - algorithm: minmax - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 600 -tuning: - accuracy_criterion: - absolute: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet101.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet101.yaml deleted file mode 100644 index a879c83451d..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet101.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: faster_rcnn_resnet101 - framework: tensorflow # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 600 -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet101_itex.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet101_itex.yaml deleted file mode 100644 index e7ab49ab0dc..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet101_itex.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: faster_rcnn_resnet101 - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 600 -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet50.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet50.yaml deleted file mode 100644 index 91f07504f4a..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet50.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: 1.0 - -model: # mandatory. used to specify model specific information. - name: faster_rcnn_resnet50 - framework: tensorflow # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 600 -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet50_itex.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet50_itex.yaml deleted file mode 100644 index 61254dfd224..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/faster_rcnn_resnet50_itex.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: 1.0 - -model: # mandatory. used to specify model specific information. - name: faster_rcnn_resnet50 - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 600 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 600 -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/label_map.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/label_map.yaml deleted file mode 100644 index 1fbc9263dc9..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/label_map.yaml +++ /dev/null @@ -1,80 +0,0 @@ -person: 1 -bicycle: 2 -car: 3 -motorcycle: 4 -airplane: 5 -bus: 6 -train: 7 -truck: 8 -boat: 9 -traffic light: 10 -fire hydrant: 11 -stop sign: 12 -parking meter: 13 -bench: 14 -bird: 15 -cat: 16 -dog: 17 -horse: 18 -sheep: 19 -cow: 20 -elephant: 21 -bear: 22 -zebra: 23 -giraffe: 24 -backpack: 25 -umbrella: 26 -handbag: 27 -tie: 28 -suitcase: 29 -frisbee: 30 -skis: 31 -snowboard: 32 -sports ball: 33 -kite: 34 -baseball bat: 35 -baseball glove: 36 -skateboard: 37 -surfboard: 38 -tennis racket: 39 -bottle: 40 -wine glass: 41 -cup: 42 -fork: 43 -knife: 44 -spoon: 45 -bowl: 46 -banana: 47 -apple: 48 -sandwich: 49 -orange: 50 -broccoli: 51 -carrot: 52 -hot dog: 53 -pizza: 54 -donut: 55 -cake: 56 -chair: 57 -couch: 58 -potted plant: 59 -bed: 60 -dining table: 61 -toilet: 62 -tv: 63 -laptop: 64 -mouse: 65 -remote: 66 -keyboard: 67 -cell phone: 68 -microwave: 69 -oven: 70 -toaster: 71 -sink: 72 -refrigerator: 73 -book: 74 -clock: 75 -vase: 76 -scissors: 77 -teddy bear: 78 -hair drier: 79 -toothbrush: 80 diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/main.py b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/main.py deleted file mode 100644 index 3d2713107f6..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/main.py +++ /dev/null @@ -1,59 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2020 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# - -from __future__ import division -import time -import numpy as np -import tensorflow as tf -from argparse import ArgumentParser - -class eval_object_detection_optimized_graph(object): - - def __init__(self): - arg_parser = ArgumentParser(description='Parse args') - - arg_parser.add_argument('-g', - "--input-graph", - help='Specify the input graph.', - dest='input_graph') - arg_parser.add_argument('--config', type=str, default='') - arg_parser.add_argument('--output_model', type=str, default='') - arg_parser.add_argument('--mode', type=str, default='performance') - arg_parser.add_argument('--tune', action='store_true', default=False) - arg_parser.add_argument('--benchmark', dest='benchmark', - action='store_true', help='run benchmark') - self.args = arg_parser.parse_args() - - def run(self): - if self.args.tune: - from neural_compressor.experimental import Quantization - quantizer = Quantization(self.args.config) - quantizer.model = self.args.input_graph - q_model = quantizer.fit() - q_model.save(self.args.output_model) - - if self.args.benchmark: - from neural_compressor.experimental import Benchmark - evaluator = Benchmark(self.args.config) - evaluator.model = self.args.input_graph - evaluator(self.args.mode) - -if __name__ == "__main__": - evaluate_opt_graph = eval_object_detection_optimized_graph() - evaluate_opt_graph.run() diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/mask_rcnn_inception_v2.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/mask_rcnn_inception_v2.yaml deleted file mode 100644 index c89ac397954..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/mask_rcnn_inception_v2.yaml +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: mask_rcnn_inception_v2 - framework: tensorflow # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 50 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - filter: - LabelBalance: - size: 1 - #op_wise: { - # 'FirstStageFeatureExtractor/InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d': { - # 'activation': {'dtype': ['fp32']}, - # } - # } - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 1 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - ResizeWithRatio: - min_dim: 800 - max_dim: 1356 - padding: False - - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - ResizeWithRatio: - min_dim: 800 - max_dim: 1356 - padding: True - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 2%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/mask_rcnn_inception_v2_itex.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/mask_rcnn_inception_v2_itex.yaml deleted file mode 100644 index a253f9e1a27..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/mask_rcnn_inception_v2_itex.yaml +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: mask_rcnn_inception_v2 - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: cpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 50 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - filter: - LabelBalance: - size: 1 - #op_wise: { - # 'FirstStageFeatureExtractor/InceptionV2/InceptionV2/Conv2d_1a_7x7/separable_conv2d': { - # 'activation': {'dtype': ['fp32']}, - # } - # } - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 1 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - ResizeWithRatio: - min_dim: 800 - max_dim: 1356 - padding: False - - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - ResizeWithRatio: - min_dim: 800 - max_dim: 1356 - padding: True - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 2%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/prepare_dataset.sh b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/prepare_dataset.sh deleted file mode 100644 index 3cc0cc16f80..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/prepare_dataset.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# set -x - -DATA_DIR="${PWD}/data" -DATA_NAME="val2017" -DATA_URL_LIST='http://images.cocodataset.org/zips/val2017.zip http://images.cocodataset.org/annotations/annotations_trainval2017.zip' -PACKAGES_LIST='val2017.zip annotations_trainval2017.zip' -VAL_IMAGE_DIR=$DATA_DIR/val2017 -TRAIN_ANNOTATIONS_FILE=$DATA_DIR/annotations/empty.json -VAL_ANNOTATIONS_FILE=$DATA_DIR/annotations/instances_val2017.json -TESTDEV_ANNOTATIONS_FILE=$DATA_DIR/annotations/empty.json -OUTPUT_DIR=$DATA_DIR - -help() -{ - cat <<- EOF - - Desc: Prepare dataset for Tensorflow COCO object detection. - - -h --help help info - - --dataset_location set dataset location, default is ./data - -EOF - exit 0 -} - -function main { - init_params "$@" - download_dataset - convert_to_tf_record -} - -# init params -function init_params { - - for var in "$@" - do - case $var in - --dataset_location=*) - DATA_DIR=$(echo "$var" |cut -f2 -d=) - ;; - -h|--help) help - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - -# removes files that will not be used anymore -function remove_zipped_packages { - for package in $PACKAGES_LIST; do - rm "$package" - done -} - -function download_tf_models_repo { - if [ ! -d models ]; then - git clone https://github.com/tensorflow/models.git - fi - cd models || exit - git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 - cd .. -} - -function divide_tf_records_by_dataset { - if [ ! -d "${DATA_DIR}/tf_test2017" ]; then - mkdir "${DATA_DIR}/tf_test2017" - fi - if [ ! -d "${DATA_DIR}/tf_train2017" ]; then - mkdir "${DATA_DIR}/tf_train2017" - fi - if [ ! -d "${DATA_DIR}/tf_val2017" ]; then - mkdir "${DATA_DIR}/tf_val2017" - fi - mv ${DATA_DIR}/coco_testdev.record* ${DATA_DIR}/tf_test2017 - mv ${DATA_DIR}/coco_train.record* ${DATA_DIR}/tf_train2017 - mv ${DATA_DIR}/coco_val.record* ${DATA_DIR}/tf_val2017 -} - -function convert { - cd models/research - protoc object_detection/protos/*.proto --python_out=. - export PYTHONPATH=$PYTHONPATH:$(pwd) - export PYTHONPATH=$PYTHONPATH:$(pwd)/slim - python ./object_detection/dataset_tools/create_coco_tf_record.py --logtostderr \ - --train_image_dir=empty_dir \ - --val_image_dir="${VAL_IMAGE_DIR}" \ - --test_image_dir=empty_dir \ - --train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \ - --val_annotations_file="${VAL_ANNOTATIONS_FILE}" \ - --testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \ - --output_dir="${OUTPUT_DIR}" -} - -function convert_to_tf_record { - download_tf_models_repo - convert - divide_tf_records_by_dataset -} - -# download_dataset -function download_dataset { - if [ ! -d "${DATA_DIR}" ]; then - mkdir "${DATA_DIR}" - fi - - cd "${DATA_DIR}" || exit - if [ ! -f "${VAL_IMAGE_DIR}" ]; then - - for dataset_dowload_link in $DATA_URL_LIST; do - wget "$dataset_dowload_link" - done - for package in $PACKAGES_LIST; do - unzip -o "$package" - done - remove_zipped_packages - if [ ! -d empty_dir ]; then - mkdir empty_dir - fi - - cd annotations || exit - echo "{ \"images\": {}, \"categories\": {}}" > empty.json - cd .. - else - echo "Dataset ${DATA_NAME} is exist!" - fi - - cd ../ -} - -main "$@" diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/prepare_model.py b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/prepare_model.py deleted file mode 100644 index 51882cf0bfe..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/prepare_model.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -import argparse -import enum -import tarfile -import abc - - -class SupportedModels(enum.Enum): - """ - Enumeration containing supported models - """ - ssd_resnet50_v1 = 'ssd_resnet50_v1' - ssd_mobilnet_v1 = 'ssd_mobilenet_v1' - - -class Model(abc.ABC): - """ - Base model class used to obtain the model (and perform any necessary operations to make it usable) - """ - - @abc.abstractmethod - def get_pretrained_model(self, destination): - """ - Base method for obtaining a ready to use model - Args: - destination: path to where the file should be stored - """ - pass - - -class SsdMobilenetV1(Model): - """ Concrete implementation of the Model base class for ssd_mobilenet_v1""" - - def get_pretrained_model(self, destination): - """ - Obtains a ready to use ssd_mobilenet_v1 model file. - Args: - destination: path to where the file should be stored - """ - url = 'http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz' - os.system("curl -o ssd_mobilenet_v1_coco_2018_01_28.tar.gz {0}".format(url)) - with tarfile.open("ssd_mobilenet_v1_coco_2018_01_28.tar.gz") as tar: - if not os.path.exists(destination): - os.makedirs(destination) - tar.extractall(destination) - - -class SsdResnet50(Model): - """ Concrete implementation of the Model base class for ssd_resnet_50""" - - def get_pretrained_model(self, destination): - """ - Obtains a ready to use ssd_resnet_50 model file. - Args: - destination: path to where the file should be stored - """ - url = "http://download.tensorflow.org/models/object_detection/" \ - "ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz" - os.system("curl -o ssd_resnet50_v1.tar.gz {0}".format(url)) - with tarfile.open("ssd_resnet50_v1.tar.gz") as tar: - if not os.path.exists(destination): - os.makedirs(destination) - tar.extractall(destination) - - -def get_model(model: SupportedModels) -> Model: - """ - Factory method that returns the requested model object - Args: - model: model from SupportedModels enumeration - - Returns: Concrete object inheriting the Model base class - - """ - if model == SupportedModels.ssd_resnet50_v1: - return SsdResnet50() - if model == SupportedModels.ssd_mobilnet_v1: - return SsdMobilenetV1() - else: - raise AttributeError("The model {0} is not supported. Supported models: {1}" - .format(model_name, SupportedModels.__members__.keys())) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Prepare pre-trained model for COCO object detection') - parser.add_argument('--model_name', type=str, default='ssd_resnet50_v1', - help='model to download, default is ssd_resnet50_v1', - choices=["ssd_resnet50_v1", "ssd_mobilenet_v1"]) - parser.add_argument('--model_path', type=str, default='./model', help='directory to put models, default is ./model') - - args = parser.parse_args() - model_name = args.model_name - model_path = args.model_path - try: - model = get_model(SupportedModels(model_name)) - model.get_pretrained_model(model_path) - except AttributeError: - print("The model {0} is not supported. Supported models: {1}" - .format(model_name, SupportedModels.__members__.keys())) diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/requirements.txt b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/requirements.txt deleted file mode 100644 index 865df0f3a6b..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -Cython -contextlib2 -pillow>=8.2.0 -lxml>=4.6.2 -matplotlib -numpy>=1.17.4 -pycocotools -protobuf diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/run_benchmark.sh b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/run_benchmark.sh deleted file mode 100644 index fc59d4b83e4..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/run_benchmark.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - for var in "$@" - do - case $var in - --config=*) - config=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --mode=*) - mode=$(echo $var |cut -f2 -d=) - ;; - esac - done - -} - - -# run_tuning -function run_benchmark { - - python main.py \ - --input-graph ${input_model} \ - --config ${config} \ - --mode ${mode} \ - --benchmark -} - -main "$@" diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/run_tuning.sh b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/run_tuning.sh deleted file mode 100644 index 23e86e2dc42..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/run_tuning.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - - run_tuning - -} - -# init params -function init_params { - - for var in "$@" - do - case $var in - --config=*) - config=$(echo "$var" |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo "$var" |cut -f2 -d=) - ;; - --output_model=*) - output_model=$(echo "$var" |cut -f2 -d=) - ;; - esac - done - -} - -# run_tuning -function run_tuning { - python main.py \ - --input-graph "${input_model}" \ - --config ${config} \ - --output_model "${output_model}" \ - --tune -} - -main "$@" diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_mobilenet_v1.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_mobilenet_v1.yaml deleted file mode 100644 index ded63da480e..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_mobilenet_v1.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: ssd_mobilenet_v1 - framework: tensorflow # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 300 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 300 -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_mobilenet_v1_itex.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_mobilenet_v1_itex.yaml deleted file mode 100644 index c312ecc348f..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_mobilenet_v1_itex.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: ssd_mobilenet_v1 - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 300 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 300 -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet34.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet34.yaml deleted file mode 100644 index e4e1b978277..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet34.yaml +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: ssd_resnet34 - framework: tensorflow # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image - outputs: detection_bboxes,detection_scores,detection_classes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 100 # optional. default value is the size of whole dataset. used to set how many portions of calibration dataset is used. exclusive with iterations field. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 1 - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation raw image folder - transform: - Rescale: {} - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Resize: - size: 1200 - - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - granularity: per_channel - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAP: - anno_path: /path/to/annotation - - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 1 - dataset: - COCORecord: - root: /path/to/evaluation/dataset/ # NOTE: modify to coco2017 validation raw image datafolder - transform: - Rescale: {} - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Resize: - size: 1200 - - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 1 - dataset: - COCORecord: - root: /path/to/evaluation/dataset/ - transform: - Rescale: {} - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Resize: - size: 1200 - -tuning: - accuracy_criterion: - absolute: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet34_itex.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet34_itex.yaml deleted file mode 100644 index caa95464a5e..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet34_itex.yaml +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: ssd_resnet34 - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image - outputs: detection_bboxes,detection_scores,detection_classes - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 100 # optional. default value is the size of whole dataset. used to set how many portions of calibration dataset is used. exclusive with iterations field. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 1 - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation raw image folder - transform: - Rescale: {} - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Resize: - size: 1200 - - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - granularity: per_channel - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAP: - anno_path: /path/to/annotation - - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 1 - dataset: - COCORecord: - root: /path/to/evaluation/dataset/ # NOTE: modify to coco2017 validation raw image datafolder - transform: - Rescale: {} - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Resize: - size: 1200 - - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 1 - dataset: - COCORecord: - root: /path/to/evaluation/dataset/ - transform: - Rescale: {} - Normalize: - mean: [0.485, 0.456, 0.406] - std: [0.229, 0.224, 0.225] - Resize: - size: 1200 - -tuning: - accuracy_criterion: - absolute: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet50_v1.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet50_v1.yaml deleted file mode 100644 index 7ea3c76f660..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet50_v1.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: ssd_resnet50_v1 - framework: tensorflow # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 100 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 640 - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 640 - - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 640 - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 100 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet50_v1_itex.yaml b/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet50_v1_itex.yaml deleted file mode 100644 index faff3306989..00000000000 --- a/examples/tensorflow/object_detection/tensorflow_models/quantization/ptq/ssd_resnet50_v1_itex.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# -# Copyright (c) 2021 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -model: # mandatory. used to specify model specific information. - name: ssd_resnet50_v1 - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. - inputs: image_tensor - outputs: num_detections,detection_boxes,detection_scores,detection_classes - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 100 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - dataset: - COCORecord: - root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 640 - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - algorithm: minmax - -evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. - metric: - COCOmAPv2: - output_index_mapping: - num_detections: 0 - boxes: 1 - scores: 2 - classes: 3 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset # NOTE: modify to coco2017 validation dataset TFRecord - transform: - Resize: - size: 640 - - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - Resize: - size: 640 - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - max_trials: 100 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/README.md b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/README.md deleted file mode 100644 index fa82967ce42..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/README.md +++ /dev/null @@ -1,99 +0,0 @@ -This document describes the step-by-step to reproduce Yolo-v3 tuning result with Neural Compressor. This example can run on Intel CPUs and GPUs. - -## Prerequisite - - -### 1. Installation -Recommend python 3.6 or higher version. - -```shell -# Install Intel® Neural Compressor -pip install neural-compressor -``` - -### 2. Install Intel Tensorflow -```shell -pip install intel-tensorflow -``` -> Note: Only supported Tensorflow 1.x versions. - -### 3. Installation Dependency packages -```shell -cd examples/tensorflow/object_detection/yolo_v3/quantization/ptq -pip install -r requirements.txt -``` - -### 4. Install Intel Extension for Tensorflow -#### Quantizing the model on Intel GPU -Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. - -```shell -pip install --upgrade intel-extension-for-tensorflow[gpu] -``` -For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel-innersource/frameworks.ai.infrastructure.intel-extension-for-tensorflow.intel-extension-for-tensorflow/blob/master/docs/install/install_for_gpu.md#install-gpu-drivers) - -#### Quantizing the model on Intel CPU(Experimental) -Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. - -```shell -pip install --upgrade intel-extension-for-tensorflow[cpu] -``` - -### 5. Downloaded Yolo-v3 model -```shell -git clone https://github.com/mystic123/tensorflow-yolo-v3.git -cd tensorflow-yolo-v3 -``` - -### 6. Download COCO Class Names File -```shell -wget https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names -``` - -### 7. Download Model Weights (Full): -```shell -wget https://pjreddie.com/media/files/yolov3.weights -``` - -### 8. Generate PB: -```shell -python convert_weights_pb.py --class_names coco.names --weights_file yolov3.weights --data_format NHWC --size 416 --output_graph yolov3.pb -``` - -### 9. Prepare Dataset - -#### Automatic dataset download - -> **_Note: `prepare_dataset.sh` script works with TF version 1.x._** - -Run the `prepare_dataset.sh` script located in `examples/tensorflow/object_detection/yolo_v3/quantization/ptq`. - -Usage: -```shell -cd examples/tensorflow/object_detection/yolo_v3/quantization/ptq -. prepare_dataset.sh -``` - -This script will download the *train*, *validation* and *test* COCO datasets. Furthermore it will convert them to -tensorflow records using the `https://github.com/tensorflow/models.git` dedicated script. - -#### Manual dataset download -Download CoCo Dataset from [Official Website](https://cocodataset.org/#download). - -## Get Quantized Yolo-v3 model with Neural Compressor - -### 1.Config the yolo_v3.yaml with the valid cocoraw data path or the yolo_v3_itex.yaml if using the Intel Extension for Tensorflow. - -### 2.Config the yaml file -In examples directory, there is a yolo_v3.yaml for tuning the model on Intel CPUs. The 'framework' in the yaml is set to 'tensorflow'. If running this example on Intel GPUs, the 'framework' should be set to 'tensorflow_itex' and the device in yaml file should be set to 'gpu'. The yolo_v3_itex.yaml is prepared for the GPU case. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. - -### 3.Run below command one by one. -Usage -```shell -cd examples/tensorflow/object_detection/yolo_v3/quantization/ptq -``` -```python -python infer_detections.py --input_graph /path/to/yolov3_fp32.pb --config ./yolo_v3.yaml --output_graph /path/to/save/yolov3_tuned3.pb -``` - -Finally, the program will generate the quantized Yolo-v3 model with relative 1% loss. diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/coco_constants.py b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/coco_constants.py deleted file mode 100644 index 8c91524da25..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/coco_constants.py +++ /dev/null @@ -1,6 +0,0 @@ -COCO_NUM_VAL_IMAGES = 4952 -LABEL_MAP = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, - 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, - 84, 85, 86, 87, 88, 89, 90] diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/infer_detections.py b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/infer_detections.py deleted file mode 100644 index e2d961be896..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/infer_detections.py +++ /dev/null @@ -1,154 +0,0 @@ -import time -import numpy as np -import tensorflow as tf - -from absl import app, flags - -from tensorflow.python.client import timeline -from coco_constants import LABEL_MAP -from utils import read_graph, non_max_suppression - -flags.DEFINE_integer('batch_size', 1, "batch size") - -flags.DEFINE_string("ground_truth", None, "ground truth file") - -flags.DEFINE_string("input_graph", None, "input graph") - -flags.DEFINE_string("output_graph", None, "input graph") - -flags.DEFINE_string("config", None, "Neural Compressor config file") - -flags.DEFINE_float("conf_threshold", 0.5, "confidence threshold") - -flags.DEFINE_float("iou_threshold", 0.4, "IoU threshold") - -flags.DEFINE_integer("num_intra_threads", 0, "number of intra threads") - -flags.DEFINE_integer("num_inter_threads", 1, "number of inter threads") - -flags.DEFINE_boolean("benchmark", False, "benchmark mode") - -flags.DEFINE_boolean("profiling", False, "Signal of profiling") - -FLAGS = flags.FLAGS - - -class NMS(): - def __init__(self, conf_threshold, iou_threshold): - self.conf_threshold = conf_threshold - self.iou_threshold = iou_threshold - - def __call__(self, sample): - preds, labels = sample - if not isinstance(preds, np.ndarray): - preds = np.array(preds) - filtered_boxes = non_max_suppression(preds, - self.conf_threshold, - self.iou_threshold) - - det_boxes = [] - det_scores = [] - det_classes = [] - for cls, bboxs in filtered_boxes.items(): - det_classes.extend([LABEL_MAP[cls + 1]] * len(bboxs)) - for box, score in bboxs: - rect_pos = box.tolist() - y_min, x_min = rect_pos[1], rect_pos[0] - y_max, x_max = rect_pos[3], rect_pos[2] - height, width = 416, 416 - det_boxes.append( - [y_min / height, x_min / width, y_max / height, x_max / width]) - det_scores.append(score) - - if len(det_boxes) == 0: - det_boxes = np.zeros((0, 4)) - det_scores = np.zeros((0, )) - det_classes = np.zeros((0, )) - - return [np.array([det_boxes]), np.array([det_scores]), np.array([det_classes])], labels - - -def create_tf_config(): - config = tf.compat.v1.ConfigProto() - config.intra_op_parallelism_threads = FLAGS.num_intra_threads - config.inter_op_parallelism_threads = FLAGS.num_inter_threads - return config - - -def run_benchmark(): - config = create_tf_config() - - graph_def = read_graph(FLAGS.input_graph) - - tf.import_graph_def(graph_def, name='') - - input_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('inputs:0') - output_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('output_boxes:0') - - dummy_data_shape = list(input_tensor.shape) - dummy_data_shape[0] = FLAGS.batch_size - dummy_data = np.random.random(dummy_data_shape).astype(np.float32) - - if FLAGS.profiling != True: - num_warmup = 200 - total_iter = 1000 - else: - num_warmup = 20 - total_iter = 100 - - total_time = 0.0 - - with tf.compat.v1.Session(config=config) as sess: - print("Running warm-up") - for i in range(num_warmup): - sess.run(output_tensor, {input_tensor: dummy_data}) - print("Warm-up complete") - - for i in range(1, total_iter + 1): - start_time = time.time() - sess.run(output_tensor, {input_tensor: dummy_data}) - end_time = time.time() - - if i % 10 == 0: - print( - "Steps = {0}, {1:10.6f} samples/sec".format(i, FLAGS.batch_size / duration)) - - duration = end_time - start_time - total_time += duration - - if FLAGS.profiling: - options = tf.compat.v1.RunOptions( - trace_level=tf.compat.v1.RunOptions.FULL_TRACE) - run_metadata = tf.compat.v1.RunMetadata() - - sess.run(output_tensor, {input_tensor: dummy_data}, - options=options, run_metadata=run_metadata) - - fetched_timeline = timeline.Timeline(run_metadata.step_stats) - chrome_trace = fetched_timeline.generate_chrome_trace_format() - with open("timeline_%s.json" % (time.time()), 'w') as f: - f.write(chrome_trace) - - throughput = total_iter * FLAGS.batch_size / total_time - print("Batch size = {}".format(FLAGS.batch_size)) - print("Latency: {} ms".format(1 / throughput * 1000)) - print("Throughput: {} samples/sec".format(throughput)) - - -def main(_): - if FLAGS.benchmark: - run_benchmark() - else: - FLAGS.batch_size = 1 - from neural_compressor.experimental import Quantization, common - quantizer = Quantization(FLAGS.config) - quantizer.model = common.Model(FLAGS.input_graph) - kwargs = {'conf_threshold': FLAGS.conf_threshold, - 'iou_threshold': FLAGS.iou_threshold} - quantizer.postprocess = common.Postprocess(NMS, 'NMS', **kwargs) - q_model = quantizer.fit() - q_model.save(FLAGS.output_graph) - - -if __name__ == '__main__': - app.run(main) diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/prepare_dataset.sh b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/prepare_dataset.sh deleted file mode 100644 index 3cc0cc16f80..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/prepare_dataset.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# set -x - -DATA_DIR="${PWD}/data" -DATA_NAME="val2017" -DATA_URL_LIST='http://images.cocodataset.org/zips/val2017.zip http://images.cocodataset.org/annotations/annotations_trainval2017.zip' -PACKAGES_LIST='val2017.zip annotations_trainval2017.zip' -VAL_IMAGE_DIR=$DATA_DIR/val2017 -TRAIN_ANNOTATIONS_FILE=$DATA_DIR/annotations/empty.json -VAL_ANNOTATIONS_FILE=$DATA_DIR/annotations/instances_val2017.json -TESTDEV_ANNOTATIONS_FILE=$DATA_DIR/annotations/empty.json -OUTPUT_DIR=$DATA_DIR - -help() -{ - cat <<- EOF - - Desc: Prepare dataset for Tensorflow COCO object detection. - - -h --help help info - - --dataset_location set dataset location, default is ./data - -EOF - exit 0 -} - -function main { - init_params "$@" - download_dataset - convert_to_tf_record -} - -# init params -function init_params { - - for var in "$@" - do - case $var in - --dataset_location=*) - DATA_DIR=$(echo "$var" |cut -f2 -d=) - ;; - -h|--help) help - ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; - esac - done - -} - -# removes files that will not be used anymore -function remove_zipped_packages { - for package in $PACKAGES_LIST; do - rm "$package" - done -} - -function download_tf_models_repo { - if [ ! -d models ]; then - git clone https://github.com/tensorflow/models.git - fi - cd models || exit - git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 - cd .. -} - -function divide_tf_records_by_dataset { - if [ ! -d "${DATA_DIR}/tf_test2017" ]; then - mkdir "${DATA_DIR}/tf_test2017" - fi - if [ ! -d "${DATA_DIR}/tf_train2017" ]; then - mkdir "${DATA_DIR}/tf_train2017" - fi - if [ ! -d "${DATA_DIR}/tf_val2017" ]; then - mkdir "${DATA_DIR}/tf_val2017" - fi - mv ${DATA_DIR}/coco_testdev.record* ${DATA_DIR}/tf_test2017 - mv ${DATA_DIR}/coco_train.record* ${DATA_DIR}/tf_train2017 - mv ${DATA_DIR}/coco_val.record* ${DATA_DIR}/tf_val2017 -} - -function convert { - cd models/research - protoc object_detection/protos/*.proto --python_out=. - export PYTHONPATH=$PYTHONPATH:$(pwd) - export PYTHONPATH=$PYTHONPATH:$(pwd)/slim - python ./object_detection/dataset_tools/create_coco_tf_record.py --logtostderr \ - --train_image_dir=empty_dir \ - --val_image_dir="${VAL_IMAGE_DIR}" \ - --test_image_dir=empty_dir \ - --train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \ - --val_annotations_file="${VAL_ANNOTATIONS_FILE}" \ - --testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \ - --output_dir="${OUTPUT_DIR}" -} - -function convert_to_tf_record { - download_tf_models_repo - convert - divide_tf_records_by_dataset -} - -# download_dataset -function download_dataset { - if [ ! -d "${DATA_DIR}" ]; then - mkdir "${DATA_DIR}" - fi - - cd "${DATA_DIR}" || exit - if [ ! -f "${VAL_IMAGE_DIR}" ]; then - - for dataset_dowload_link in $DATA_URL_LIST; do - wget "$dataset_dowload_link" - done - for package in $PACKAGES_LIST; do - unzip -o "$package" - done - remove_zipped_packages - if [ ! -d empty_dir ]; then - mkdir empty_dir - fi - - cd annotations || exit - echo "{ \"images\": {}, \"categories\": {}}" > empty.json - cd .. - else - echo "Dataset ${DATA_NAME} is exist!" - fi - - cd ../ -} - -main "$@" diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/requirements.txt b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/requirements.txt deleted file mode 100644 index 865df0f3a6b..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -Cython -contextlib2 -pillow>=8.2.0 -lxml>=4.6.2 -matplotlib -numpy>=1.17.4 -pycocotools -protobuf diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/run_benchmark.sh b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/run_benchmark.sh deleted file mode 100644 index 47682835a2c..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/run_benchmark.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - for var in "$@" - do - case $var in - --config=*) - config=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - esac - done - -} - - -# run_tuning -function run_benchmark { - python infer_detections.py \ - --input_graph ${input_model} \ - --config ${config} \ - --benchmark -} - -main "$@" diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/run_tuning.sh b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/run_tuning.sh deleted file mode 100644 index 120725a1110..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/run_tuning.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -x - -function main { - - init_params "$@" - run_benchmark - -} - -# init params -function init_params { - for var in "$@" - do - case $var in - --config=*) - config=$(echo $var |cut -f2 -d=) - ;; - --input_model=*) - input_model=$(echo $var |cut -f2 -d=) - ;; - --output_model=*) - output_model=$(echo $var |cut -f2 -d=) - ;; - esac - done - -} - - -# run tuning -function run_benchmark { - python infer_detections.py \ - --input_graph ${input_model} \ - --config ${config} \ - --output_graph ${output_model} -} - -main "$@" diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/utils.py b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/utils.py deleted file mode 100644 index 9e15a75fda4..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/utils.py +++ /dev/null @@ -1,205 +0,0 @@ -import tensorflow as tf -import numpy as np - -from PIL import Image -from tensorflow.core.framework import graph_pb2 -from tensorflow.python.platform import gfile -from tensorflow.python.data.experimental import parallel_interleave -from tensorflow.python.data.experimental import map_and_batch - -def read_graph(input_graph): - if not gfile.Exists(input_graph): - print("Input graph file '" + input_graph + "' does not exist!") - exit(-1) - - input_graph_def = graph_pb2.GraphDef() - with gfile.Open(input_graph, "rb") as f: - data = f.read() - input_graph_def.ParseFromString(data) - - return input_graph_def - -def parse_and_preprocess(serialized_example): - # Dense features in Example proto. - feature_map = { - 'image/encoded': tf.compat.v1.FixedLenFeature([], dtype=tf.string, - default_value=''), - 'image/object/class/text': tf.compat.v1.VarLenFeature(dtype=tf.string), - 'image/source_id': tf.compat.v1.FixedLenFeature([], dtype=tf.string, - default_value=''), - } - sparse_float32 = tf.compat.v1.VarLenFeature(dtype=tf.float32) - # Sparse features in Example proto. - feature_map.update( - {k: sparse_float32 for k in ['image/object/bbox/xmin', - 'image/object/bbox/ymin', - 'image/object/bbox/xmax', - 'image/object/bbox/ymax']}) - - features = tf.compat.v1.parse_single_example(serialized_example, feature_map) - - xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) - ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) - xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) - ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) - - # Note that we impose an ordering of (y, x) just to make life difficult. - bbox = tf.concat([ymin, xmin, ymax, xmax], 0) - - # Force the variable number of bounding boxes into the shape - # [1, num_boxes, coords]. - bbox = tf.expand_dims(bbox, 0) - bbox = tf.transpose(bbox, [0, 2, 1]) - - encoded_image = features['image/encoded'] - image_tensor = tf.image.decode_image(encoded_image, channels=3) - image_tensor.set_shape([None, None, 3]) - - label = features['image/object/class/text'].values - - image_id = features['image/source_id'] - - return image_tensor, bbox[0], label, image_id - -def get_input(data_location, batch_size=1): - tfrecord_paths = [data_location] - ds = tf.data.TFRecordDataset.list_files(tfrecord_paths) - - ds = ds.apply( - parallel_interleave( - tf.data.TFRecordDataset, cycle_length=28, block_length=5, - sloppy=True, - buffer_output_elements=10000, prefetch_input_elements=10000)) - ds = ds.prefetch(buffer_size=10000) - ds = ds.apply( - map_and_batch( - map_func=parse_and_preprocess, - batch_size=batch_size, - num_parallel_batches=28, - num_parallel_calls=None)) - ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) - ds_iter = tf.compat.v1.data.make_one_shot_iterator(ds) - images, bbox, label, image_id = ds_iter.get_next() - - return images, bbox, label, image_id - -def letter_box_image(image, output_height, output_width, fill_value): - """ - Fit image with final image with output_width and output_height. - :param image: PILLOW Image object. - :param output_height: width of the final image. - :param output_width: height of the final image. - :param fill_value: fill value for empty area. Can be uint8 or np.ndarray - :return: numpy image fit within letterbox. dtype=uint8, shape=(output_height, output_width) - """ - - height_ratio = float(output_height) / image.size[1] - width_ratio = float(output_width) / image.size[0] - fit_ratio = min(width_ratio, height_ratio) - fit_height = int(image.size[1] * fit_ratio) - fit_width = int(image.size[0] * fit_ratio) - fit_image = np.asarray(image.resize((fit_width, fit_height), resample=Image.BILINEAR)) - - if isinstance(fill_value, int): - fill_value = np.full(fit_image.shape[2], fill_value, fit_image.dtype) - - to_return = np.tile(fill_value, (output_height, output_width, 1)) - pad_top = int(0.5 * (output_height - fit_height)) - pad_left = int(0.5 * (output_width - fit_width)) - to_return[pad_top: pad_top+fit_height, pad_left: pad_left+fit_width] = fit_image - - return to_return - -def _iou(box1, box2): - """ - Computes Intersection over Union value for 2 bounding boxes - - :param box1: array of 4 values (top left and bottom right coords): [x0, y0, x1, x2] - :param box2: same as box1 - :return: IoU - """ - b1_x0, b1_y0, b1_x1, b1_y1 = box1 - b2_x0, b2_y0, b2_x1, b2_y1 = box2 - - int_x0 = max(b1_x0, b2_x0) - int_y0 = max(b1_y0, b2_y0) - int_x1 = min(b1_x1, b2_x1) - int_y1 = min(b1_y1, b2_y1) - - int_area = max(int_x1 - int_x0, 0) * max(int_y1 - int_y0, 0) - - b1_area = (b1_x1 - b1_x0) * (b1_y1 - b1_y0) - b2_area = (b2_x1 - b2_x0) * (b2_y1 - b2_y0) - - # we add small epsilon of 1e-05 to avoid division by 0 - iou = int_area / (b1_area + b2_area - int_area + 1e-05) - return iou - -def non_max_suppression(predictions_with_boxes, confidence_threshold, iou_threshold=0.4): - """ - Applies Non-max suppression to prediction boxes. - - :param predictions_with_boxes: 3D numpy array, first 4 values in 3rd dimension are bbox attrs, 5th is confidence - :param confidence_threshold: the threshold for deciding if prediction is valid - :param iou_threshold: the threshold for deciding if two boxes overlap - :return: dict: class -> [(box, score)] - """ - conf_mask = np.expand_dims( - (predictions_with_boxes[:, :, 4] > confidence_threshold), -1) - predictions = predictions_with_boxes * conf_mask - - result = {} - for i, image_pred in enumerate(predictions): - shape = image_pred.shape - tmp = image_pred - sum_t = np.sum(tmp, axis=1) - non_zero_idxs = sum_t != 0 - image_pred = image_pred[non_zero_idxs, :] - image_pred = image_pred.reshape(-1, shape[-1]) - - bbox_attrs = image_pred[:, :5] - classes = image_pred[:, 5:] - classes = np.argmax(classes, axis=-1) - - unique_classes = list(set(classes.reshape(-1))) - - for cls in unique_classes: - cls_mask = classes == cls - cls_boxes = bbox_attrs[np.nonzero(cls_mask)] - cls_boxes = cls_boxes[cls_boxes[:, -1].argsort()[::-1]] - cls_scores = cls_boxes[:, -1] - cls_boxes = cls_boxes[:, :-1] - - while len(cls_boxes) > 0: - box = cls_boxes[0] - score = cls_scores[0] - if cls not in result: - result[cls] = [] - result[cls].append((box, score)) - cls_boxes = cls_boxes[1:] - cls_scores = cls_scores[1:] - ious = np.array([_iou(box, x) for x in cls_boxes]) - iou_mask = ious < iou_threshold - cls_boxes = cls_boxes[np.nonzero(iou_mask)] - cls_scores = cls_scores[np.nonzero(iou_mask)] - - return result - -def letter_box_pos_to_original_pos(letter_pos, current_size, ori_image_size)-> np.ndarray: - """ - Parameters should have same shape and dimension space. (Width, Height) or (Height, Width) - :param letter_pos: The current position within letterbox image including fill value area. - :param current_size: The size of whole image including fill value area. - :param ori_image_size: The size of image before being letter boxed. - :return: - """ - letter_pos = np.asarray(letter_pos, dtype=np.float) - current_size = np.asarray(current_size, dtype=np.float) - ori_image_size = np.asarray(ori_image_size, dtype=np.float) - final_ratio = min(current_size[0] / ori_image_size[0], current_size[1] / ori_image_size[1]) - pad = 0.5 * (current_size - final_ratio * ori_image_size) - pad = pad.astype(np.int32) - to_return_pos = (letter_pos - pad) / final_ratio - - return to_return_pos - diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/yolo_v3.yaml b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/yolo_v3.yaml deleted file mode 100644 index 91197b50762..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/yolo_v3.yaml +++ /dev/null @@ -1,85 +0,0 @@ -model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. - name: yolo_v3 - framework: tensorflow # mandatory. supported values are tensorflow, pytorch, or mxnet; allow new framework backend extension. - inputs: inputs - outputs: output_boxes - -device: cpu # optional. default value is cpu, other value is gpu. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 2 # optional. default value is the size of whole dataset. used to set how many portions of calibration dataset is used. exclusive with iterations field. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 1 - dataset: - COCORecord: - root: /path/to/calibration/dataset - filter: - LabelBalance: - size: 1 - transform: - ParseDecodeCoco: - ResizeWithRatio: - min_dim: 416 - max_dim: 416 - padding: True - - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - granularity: per_channel - op_wise: { # optional. tuning constraints on op-wise for advance user to reduce tuning space. - 'detector/yolo-v3/Conv_6/Conv2D': { - 'activation': {'dtype': ['fp32']}, - }, - 'detector/yolo-v3/Conv_14/Conv2D': { - 'activation': {'dtype': ['fp32']}, - }, - 'detector/yolo-v3/Conv_22/Conv2D': { - 'activation': {'dtype': ['fp32']}, - } - } - -evaluation: # optional. used to config evaluation process. - accuracy: - metric: - COCOmAP: - map_key: 'DetectionBoxes_Precision/mAP@.50IOU' - dataloader: - batch_size: 1 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - ParseDecodeCoco: {} - ResizeWithRatio: - min_dim: 416 - max_dim: 416 - padding: True - constant_value: 128 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - ParseDecodeCoco: - ResizeWithRatio: - min_dim: 416 - max_dim: 416 - padding: True - constant_value: 128 - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. diff --git a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/yolo_v3_itex.yaml b/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/yolo_v3_itex.yaml deleted file mode 100644 index 6a8c4483d9b..00000000000 --- a/examples/tensorflow/object_detection/yolo_v3/quantization/ptq/yolo_v3_itex.yaml +++ /dev/null @@ -1,85 +0,0 @@ -model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. - name: yolo_v3 - framework: tensorflow_itex # mandatory. supported values are tensorflow, tensorflow_itex, pytorch, or mxnet; allow new framework backend extension. - inputs: inputs - outputs: output_boxes - -device: gpu # optional. set cpu if installed intel-extension-for-tensorflow[cpu], set gpu if installed intel-extension-for-tensorflow[gpu]. - -quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - calibration: - sampling_size: 2 # optional. default value is the size of whole dataset. used to set how many portions of calibration dataset is used. exclusive with iterations field. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. - batch_size: 1 - dataset: - COCORecord: - root: /path/to/calibration/dataset - filter: - LabelBalance: - size: 1 - transform: - ParseDecodeCoco: - ResizeWithRatio: - min_dim: 416 - max_dim: 416 - padding: True - - model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. - activation: - algorithm: minmax - weight: - granularity: per_channel - op_wise: { # optional. tuning constraints on op-wise for advance user to reduce tuning space. - 'detector/yolo-v3/Conv_6/Conv2D': { - 'activation': {'dtype': ['fp32']}, - }, - 'detector/yolo-v3/Conv_14/Conv2D': { - 'activation': {'dtype': ['fp32']}, - }, - 'detector/yolo-v3/Conv_22/Conv2D': { - 'activation': {'dtype': ['fp32']}, - } - } - -evaluation: # optional. used to config evaluation process. - accuracy: - metric: - COCOmAP: - map_key: 'DetectionBoxes_Precision/mAP@.50IOU' - dataloader: - batch_size: 1 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - ParseDecodeCoco: {} - ResizeWithRatio: - min_dim: 416 - max_dim: 416 - padding: True - constant_value: 128 - performance: - iteration: 100 - configs: - cores_per_instance: 28 - num_of_instance: 1 - kmp_blocktime: 1 - dataloader: - batch_size: 10 - dataset: - COCORecord: - root: /path/to/evaluation/dataset - transform: - ParseDecodeCoco: - ResizeWithRatio: - min_dim: 416 - max_dim: 416 - padding: True - constant_value: 128 - -tuning: - accuracy_criterion: - relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. - exit_policy: - timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit. - random_seed: 9527 # optional. random seed for deterministic tuning. From 3c91f7717f2ef9b8e861be722b2a1ed99aa3205c Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Fri, 9 Dec 2022 17:50:31 +0800 Subject: [PATCH 07/14] add oldapi example link Signed-off-by: Lv, Liang1 --- examples/README.md | 88 +++++++++---------- .../quantization/ptq/tf_benchmark.py | 5 +- .../quantization/ptq/inference.py | 6 +- .../quantization/ptq/run_accuracy.py | 7 +- 4 files changed, 54 insertions(+), 52 deletions(-) diff --git a/examples/README.md b/examples/README.md index 03cfc8a8f59..ec676b7ea6b 100644 --- a/examples/README.md +++ b/examples/README.md @@ -37,175 +37,175 @@ Intel® Neural Compressor validated examples with multiple compression technique ResNet50 V1.0 Image Recognition Post-Training Static Quantization - pb + pb ResNet50 V1.5 Image Recognition Post-Training Static Quantization - pb + pb ResNet101 Image Recognition Post-Training Static Quantization - pb + pb MobileNet V1 Image Recognition Post-Training Static Quantization - pb + pb MobileNet V2 Image Recognition Post-Training Static Quantization - pb / keras + pb / keras MobileNet V3 Image Recognition Post-Training Static Quantization - pb + pb Inception V1 Image Recognition Post-Training Static Quantization - pb + pb Inception V2 Image Recognition Post-Training Static Quantization - pb + pb Inception V3 Image Recognition Post-Training Static Quantization - pb + pb Inception V4 Image Recognition Post-Training Static Quantization - pb + pb Inception ResNet V2 Image Recognition Post-Training Static Quantization - pb + pb VGG16 Image Recognition Post-Training Static Quantization - pb / keras + pb / keras VGG19 Image Recognition Post-Training Static Quantization - pb / keras + pb / keras ResNet V2 50 Image Recognition Post-Training Static Quantization - pb / keras + pb / keras ResNet V2 101 Image Recognition Post-Training Static Quantization - pb / keras + pb / keras ResNet V2 152 Image Recognition Post-Training Static Quantization - pb + pb DenseNet121 Image Recognition Post-Training Static Quantization - pb + pb DenseNet161 Image Recognition Post-Training Static Quantization - pb + pb DenseNet169 Image Recognition Post-Training Static Quantization - pb + pb EfficientNet B0 Image Recognition Post-Training Static Quantization - ckpt + ckpt MNIST Image Recognition Quantization-Aware Training - keras + keras ResNet50 Image Recognition Post-Training Static Quantization - keras + keras ResNet50 Fashion Image Recognition Post-Training Static Quantization - keras + keras ResNet101 Image Recognition Post-Training Static Quantization - keras + keras Inception V3 Image Recognition Post-Training Static Quantization - keras + keras Inception Resnet V2 Image Recognition Post-Training Static Quantization - keras + keras Xception Image Recognition Post-Training Static Quantization - keras + keras ResNet V2 Image Recognition Quantization-Aware Training - keras + keras EfficientNet V2 B0 Image Recognition Post-Training Static Quantization - SavedModel + SavedModel BERT base MRPC @@ -217,13 +217,13 @@ Intel® Neural Compressor validated examples with multiple compression technique BERT large SQuAD (Model Zoo) Natural Language Processing Post-Training Static Quantization - pb + pb BERT large SQuAD Natural Language Processing Post-Training Static Quantization - pb + pb DistilBERT base @@ -247,49 +247,49 @@ Intel® Neural Compressor validated examples with multiple compression technique SSD ResNet50 V1 Object Detection Post-Training Static Quantization - pb / ckpt + pb / ckpt SSD MobileNet V1 Object Detection Post-Training Static Quantization - pb / ckpt + pb / ckpt Faster R-CNN Inception ResNet V2 Object Detection Post-Training Static Quantization - pb / SavedModel + pb / SavedModel Faster R-CNN ResNet101 Object Detection Post-Training Static Quantization - pb / SavedModel + pb / SavedModel Faster R-CNN ResNet50 Object Detection Post-Training Static Quantization - pb + pb Mask R-CNN Inception V2 Object Detection Post-Training Static Quantization - pb / ckpt + pb / ckpt SSD ResNet34 Object Detection Post-Training Static Quantization - pb + pb YOLOv3 Object Detection Post-Training Static Quantization - pb + pb Wide & Deep @@ -301,7 +301,7 @@ Intel® Neural Compressor validated examples with multiple compression technique Arbitrary Style Transfer Style Transfer Post-Training Static Quantization - ckpt + ckpt @@ -323,21 +323,21 @@ Intel® Neural Compressor validated examples with multiple compression technique Image Recognition Unstructured Magnitude - pb + pb ResNet V2 Image Recognition Unstructured Magnitude - pb + pb ViT Image Recognition Unstructured Magnitude - ckpt + ckpt @@ -359,7 +359,7 @@ Intel® Neural Compressor validated examples with multiple compression technique DenseNet201 Image Recognition Knowledge Distillation - pb + pb diff --git a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py index fea77e5d60f..00b8ccd07c1 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py +++ b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py @@ -334,7 +334,8 @@ def __iter__(self): from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, set_random_seed + from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.utils.utility import set_random_seed set_random_seed(9527) config = PostTrainingQuantConfig( @@ -376,7 +377,7 @@ def __iter__(self): if model_detail.get('model_name')!='DLRM' \ else oob_dlrm_collate_func) q_model = fit( - model=common.Model(args.model_path), + model=args.model_path, conf=config, calib_dataloader=calib_dataloader) q_model.save(args.output_path) diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py index a048f4e5237..63444ea727e 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/inference.py @@ -181,9 +181,9 @@ def auto_tune(self): Returns: graph: it will return a quantized pb """ - from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, set_random_seed + from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.utils.utility import set_random_seed infer_graph = load_graph(self.args.input_graph) set_random_seed(9527) @@ -199,7 +199,7 @@ def auto_tune(self): if self.args.calib_data: q_model = fit( - model=common.Model(infer_graph), + model=infer_graph, conf=config, calib_dataloader=Dataloader(self.args.calib_data, self.args.batch_size), eval_func=self.eval_inference) diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py index 3472c152598..fbed7e1f876 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py @@ -126,7 +126,7 @@ def eval_func(graph): file_name = preprocessed_files[test_data_index] with open(os.path.join(preprocessed_data_dir, "{:}.pkl".format(file_name)), "rb") as f: data = pickle.load(f)[0] - if args.mode == 'benchmark' and i < args.iters: + if args.mode == 'performance' and i < args.iters: time_start = time.time() predictions[i] = sess.run(output_tensor, feed_dict={input_tensor: data[np.newaxis, ...]})[0].astype(np.float32) duration = time.time() - time_start @@ -205,12 +205,13 @@ def __len__(self): if args.mode == 'tune': from neural_compressor.experimental import common from neural_compressor.quantization import fit - from neural_compressor.config import PostTrainingQuantConfig, set_random_seed + from neural_compressor.config import PostTrainingQuantConfig + from neural_compressor.utils.utility import set_random_seed set_random_seed(9527) config = PostTrainingQuantConfig(calibration_sampling_size=[40]) q_model = fit( - model=common.Model(graph), + model=graph, conf=config, calib_dataloader=common.DataLoader(CalibrationDL()), eval_dataloader=common.DataLoader(CalibrationDL()), From 524f8d8bc36ebb6e3c11c8644cc16df290be476a Mon Sep 17 00:00:00 2001 From: chensuyue Date: Mon, 12 Dec 2022 00:41:52 +0800 Subject: [PATCH 08/14] support removed model test Signed-off-by: chensuyue --- .azure-pipelines/scripts/models/env_setup.sh | 45 +++++++++++++------ .../models/run_model_trigger_common.sh | 5 ++- .../models/run_mxnet_models_trigger.sh | 2 +- .../models/run_onnxrt_models_trigger.sh | 3 +- .../models/run_pytorch_models_trigger.sh | 3 +- .../models/run_tensorflow_models_trigger.sh | 2 + 6 files changed, 42 insertions(+), 18 deletions(-) diff --git a/.azure-pipelines/scripts/models/env_setup.sh b/.azure-pipelines/scripts/models/env_setup.sh index be3092bea75..e715976ae1b 100644 --- a/.azure-pipelines/scripts/models/env_setup.sh +++ b/.azure-pipelines/scripts/models/env_setup.sh @@ -37,6 +37,9 @@ for i in "$@"; do --new_benchmark=*) new_benchmark=$(echo $i | sed "s/${PATTERN}//") ;; + --inc_new_api=*) + inc_new_api=$(echo $i | sed "s/${PATTERN}//") + ;; *) echo "Parameter $i not recognized." exit 1 @@ -61,7 +64,19 @@ fi $BOLD_YELLOW && echo "====== install requirements ======" && $RESET /bin/bash /neural-compressor/.azure-pipelines/scripts/install_nc.sh -cd ${WORK_SOURCE_DIR}/${model_src_dir} +cd ${WORK_SOURCE_DIR} +if [[ "${inc_new_api}" == "false" ]]; then + echo "copy old api examples to workspace..." + git clone -b old_api_examples https://github.com/intel/neural-compressor.git old-lpot-models + cd old-lpot-models + git branch + cd - + rm -rf ${model_src_dir} + mkdir -p ${model_src_dir} + cp -r old-lpot-models/examples/${framework}/${model_src_dir} ${WORK_SOURCE_DIR}/${model_src_dir}/../ +fi + +cd ${model_src_dir} pip install ruamel_yaml pip install psutil pip install protobuf==3.20.1 @@ -109,16 +124,18 @@ else $BOLD_RED && echo "Not found requirements.txt file." && $RESET fi -$BOLD_YELLOW && echo "======== update yaml config ========" && $RESET -$BOLD_YELLOW && echo -e "\nPrint origin yaml..." && $RESET -cat ${yaml} -python ${SCRIPTS_PATH}/update_yaml_config.py \ - --yaml=${yaml} \ - --framework=${framework} \ - --dataset_location=${dataset_location} \ - --batch_size=${batch_size} \ - --strategy=${strategy} \ - --new_benchmark=${new_benchmark} \ - --multi_instance='true' -$BOLD_YELLOW && echo -e "\nPrint updated yaml... " && $RESET -cat ${yaml} +if [[ "${inc_new_api}" == "false" ]]; then + $BOLD_YELLOW && echo "======== update yaml config ========" && $RESET + $BOLD_YELLOW && echo -e "\nPrint origin yaml..." && $RESET + cat ${yaml} + python ${SCRIPTS_PATH}/update_yaml_config.py \ + --yaml=${yaml} \ + --framework=${framework} \ + --dataset_location=${dataset_location} \ + --batch_size=${batch_size} \ + --strategy=${strategy} \ + --new_benchmark=${new_benchmark} \ + --multi_instance='true' + $BOLD_YELLOW && echo -e "\nPrint updated yaml... " && $RESET + cat ${yaml} +fi diff --git a/.azure-pipelines/scripts/models/run_model_trigger_common.sh b/.azure-pipelines/scripts/models/run_model_trigger_common.sh index a1e6719c583..88b20ba258c 100644 --- a/.azure-pipelines/scripts/models/run_model_trigger_common.sh +++ b/.azure-pipelines/scripts/models/run_model_trigger_common.sh @@ -29,6 +29,8 @@ do strategy=`echo $i | sed "s/${PATTERN}//"`;; --new_benchmark=*) new_benchmark=`echo $i | sed "s/${PATTERN}//"`;; + --inc_new_api=*) + inc_new_api=`echo $i | sed "s/${PATTERN}//"`;; --tuning_cmd=*) tuning_cmd=`echo $i | sed "s/${PATTERN}//"`;; --benchmark_cmd=*) @@ -62,7 +64,8 @@ if [ "${mode}" == "env_setup" ]; then --dataset_location=${dataset_location} \ --batch_size=${batch_size} \ --strategy=${strategy} \ - --new_benchmark=${new_benchmark} + --new_benchmark=${new_benchmark} \ + --inc_new_api="${inc_new_api}" elif [ "${mode}" == "tuning" ]; then cd ${WORK_SOURCE_DIR}/${model_src_dir} $BOLD_YELLOW && echo "workspace ${WORK_SOURCE_DIR}/${model_src_dir}" && $RESET diff --git a/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh b/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh index 7d97a35d95e..907326041b6 100644 --- a/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh @@ -24,7 +24,7 @@ done FRAMEWORK="mxnet" FRAMEWORK_VERSION="1.9.1" - +inc_new_api=false # ======== set up config for mxnet models ======== if [ "${model}" == "resnet50v1" ]; then model_src_dir="image_recognition/cnn_models/quantization/ptq" diff --git a/.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh b/.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh index 112888f07db..1ac594168f6 100644 --- a/.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh @@ -24,7 +24,7 @@ done FRAMEWORK="onnxrt" FRAMEWORK_VERSION="1.13.1" - +inc_new_api=false # ======== set up config for onnxrt models ======== if [ "${model}" == "resnet50-v1-12" ]; then model_src_dir="image_recognition/onnx_model_zoo/resnet50/quantization/ptq" @@ -82,6 +82,7 @@ fi --new_benchmark=${new_benchmark} \ --tuning_cmd="${tuning_cmd}" \ --benchmark_cmd="${benchmark_cmd}" \ + --inc_new_api="${inc_new_api}" \ --mode=${mode} \ --USE_TUNE_ACC=${USE_TUNE_ACC} \ --PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \ diff --git a/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh b/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh index 30b5a0c8344..1953cd872db 100644 --- a/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh @@ -25,7 +25,7 @@ FRAMEWORK="pytorch" FRAMEWORK_VERSION="1.12.0+cpu" TORCH_VISION_VERSION="0.13.0+cpu" - +inc_new_api=false # ======== set up config for pytorch models ======== if [ "${model}" == "resnet18" ]; then model_src_dir="image_recognition/torchvision_models/quantization/ptq/cpu/eager" @@ -64,6 +64,7 @@ fi --new_benchmark=${new_benchmark} \ --tuning_cmd="${tuning_cmd}" \ --benchmark_cmd="${benchmark_cmd}" \ + --inc_new_api="${inc_new_api}" \ --mode=${mode} \ --USE_TUNE_ACC=${USE_TUNE_ACC} \ --PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \ diff --git a/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh b/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh index 92d6735eae6..18165c10161 100644 --- a/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh @@ -24,6 +24,7 @@ done FRAMEWORK="tensorflow" FRAMEWORK_VERSION="2.10.0" +inc_new_api=false # ======== set up config for tensorflow models ======== if [ "${model}" == "resnet50v1.5" ]; then model_src_dir="image_recognition/tensorflow_models/quantization/ptq" @@ -131,6 +132,7 @@ fi --new_benchmark=${new_benchmark} \ --tuning_cmd="${tuning_cmd}" \ --benchmark_cmd="${benchmark_cmd}" \ + --inc_new_api="${inc_new_api}" \ --mode=${mode} \ --USE_TUNE_ACC=${USE_TUNE_ACC} \ --PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \ From 353d606f56a49596f5304c3b551ee88a26688285 Mon Sep 17 00:00:00 2001 From: chensuyue Date: Mon, 12 Dec 2022 01:20:19 +0800 Subject: [PATCH 09/14] update tensorflow CI models with newAPI Signed-off-by: chensuyue --- .azure-pipelines/model-test.yml | 1 + .../models/run_tensorflow_models_trigger.sh | 15 +++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/.azure-pipelines/model-test.yml b/.azure-pipelines/model-test.yml index 270a245bb65..8e7fd29ac28 100644 --- a/.azure-pipelines/model-test.yml +++ b/.azure-pipelines/model-test.yml @@ -10,6 +10,7 @@ pr: include: - neural_compressor - setup.py + - examples/tensorflow/oob_models/quantization/ptq exclude: - neural_compressor/ux diff --git a/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh b/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh index 18165c10161..1e38888a768 100644 --- a/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh @@ -84,8 +84,9 @@ elif [ "${model}" == "darknet19" ]; then strategy="basic" batch_size=1 new_benchmark=false - tuning_cmd="bash run_tuning.sh --topology=${model} --dataset_location= --input_model=${input_model}" - benchmark_cmd="bash run_benchmark.sh --topology=${model} --dataset_location= --mode=benchmark --batch_size=1 --iters=500" + inc_new_api=true + tuning_cmd="bash run_tuning.sh --topology=${model} --input_model=${input_model}" + benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=benchmark --batch_size=1 --iters=500" elif [ "${model}" == "densenet-121" ]; then model_src_dir="oob_models/quantization/ptq" dataset_location="" @@ -94,8 +95,9 @@ elif [ "${model}" == "densenet-121" ]; then strategy="basic" batch_size=1 new_benchmark=false - tuning_cmd="bash run_tuning.sh --topology=${model} --dataset_location= --input_model=${input_model}" - benchmark_cmd="bash run_benchmark.sh --topology=${model} --dataset_location= --mode=benchmark --batch_size=1 --iters=500" + inc_new_api=true + tuning_cmd="bash run_tuning.sh --topology=${model} --input_model=${input_model}" + benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=benchmark --batch_size=1 --iters=500" elif [ "${model}" == "resnet-101" ]; then model_src_dir="oob_models/quantization/ptq" dataset_location="" @@ -104,8 +106,9 @@ elif [ "${model}" == "resnet-101" ]; then strategy="basic" batch_size=1 new_benchmark=false - tuning_cmd="bash run_tuning.sh --topology=${model} --dataset_location= --input_model=${input_model}" - benchmark_cmd="bash run_benchmark.sh --topology=${model} --dataset_location= --mode=benchmark --batch_size=1 --iters=500" + inc_new_api=true + tuning_cmd="bash run_tuning.sh --topology=${model} --input_model=${input_model}" + benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=benchmark --batch_size=1 --iters=500" elif [ "${model}" == "resnet50_fashion" ]; then model_src_dir="image_recognition/keras_models/resnet50_fashion/quantization/ptq" dataset_location="/tf_dataset2/datasets/mnist/FashionMNIST_small" From 1887fb3f727060706061b0f9bee8be18dcf02d92 Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Mon, 12 Dec 2022 17:59:59 +0800 Subject: [PATCH 10/14] refine for PR 244 Signed-off-by: Lv, Liang1 --- .../quantization/ptq/run_benchmark.sh | 6 ++-- .../quantization/ptq/tf_benchmark.py | 30 ++++++++++--------- .../quantization/ptq/run_accuracy.py | 6 ++-- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/examples/tensorflow/oob_models/quantization/ptq/run_benchmark.sh b/examples/tensorflow/oob_models/quantization/ptq/run_benchmark.sh index 7a4d02b2a09..f549d719b41 100755 --- a/examples/tensorflow/oob_models/quantization/ptq/run_benchmark.sh +++ b/examples/tensorflow/oob_models/quantization/ptq/run_benchmark.sh @@ -32,9 +32,8 @@ function init_params { --batch_size=*) batch_size=$(echo $var |cut -f2 -d=) ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 + --iters=*) + iters=$(echo ${var} |cut -f2 -d=) ;; esac done @@ -42,7 +41,6 @@ function init_params { } function define_mode { - if [[ ${mode} == "accuracy" ]]; then echo "For TF OOB models, there is only benchmark mode!, num iter is: ${iters}" exit 1 diff --git a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py index 00b8ccd07c1..54b74fc9e76 100644 --- a/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py +++ b/examples/tensorflow/oob_models/quantization/ptq/tf_benchmark.py @@ -332,7 +332,7 @@ def __iter__(self): inputs = model_detail['input'] outputs = model_detail['output'] - from neural_compressor.experimental import common + from neural_compressor.data.dataloaders.dataloader import DataLoader from neural_compressor.quantization import fit from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.utils.utility import set_random_seed @@ -350,18 +350,19 @@ def __iter__(self): for i in range(1, len(sparse_input_names)): sparse_input_seq += sparse_input_names[i] input_dense_shape = [tuple(list(i.values())[0]) for i in model_detail['sparse_d_shape'].values()] - from neural_compressor.data import DATASETS - dataset = DATASETS('tensorflow')['sparse_dummy_v2']( + from neural_compressor.data import Datasets + dataset = Datasets('tensorflow')['sparse_dummy_v2']( dense_shape=input_dense_shape, label_shape=[[1] for _ in range(len(input_dense_shape))], sparse_ratio=[1-1/np.multiply(*i) for i in input_dense_shape]) seq_idxs = [sparse_input_seq.index(i) for i in inputs.keys()] - calib_dataloader = common.DataLoader(dataset=dataset, - batch_size=1, - collate_fn=oob_collate_sparse_func) + calib_dataloader = DataLoader(framework='tensorflow', + dataset=dataset, + batch_size=1, + collate_fn=oob_collate_sparse_func) else: - from neural_compressor.data import DATASETS - dataset = DATASETS('tensorflow')['dummy']( + from neural_compressor.data import Datasets + dataset = Datasets('tensorflow')['dummy']( shape=inputs_shape, low=low, high=high, dtype=inputs_dtype, @@ -370,12 +371,13 @@ def __iter__(self): if args.model_name and args.model_name in dataloader_dict.keys(): Dataloader = dataloader_dict[args.model_name] else: - Dataloader = common.DataLoader - calib_dataloader = Dataloader(dataset=dataset, - batch_size=args.batch_size, - collate_fn=oob_collate_data_func \ - if model_detail.get('model_name')!='DLRM' \ - else oob_dlrm_collate_func) + Dataloader = DataLoader + calib_dataloader = Dataloader(framework='tensorflow', + dataset=dataset, + batch_size=args.batch_size, + collate_fn=oob_collate_data_func \ + if model_detail.get('model_name')!='DLRM' \ + else oob_dlrm_collate_func) q_model = fit( model=args.model_path, conf=config, diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py index fbed7e1f876..d0e2e761ecd 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py @@ -203,7 +203,7 @@ def __len__(self): print(args) graph = load_graph(args.input_model) if args.mode == 'tune': - from neural_compressor.experimental import common + from neural_compressor.data.dataloaders.dataloader import DataLoader from neural_compressor.quantization import fit from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.utils.utility import set_random_seed @@ -213,8 +213,8 @@ def __len__(self): q_model = fit( model=graph, conf=config, - calib_dataloader=common.DataLoader(CalibrationDL()), - eval_dataloader=common.DataLoader(CalibrationDL()), + calib_dataloader=DataLoader(framework='tensorflow', dataset=CalibrationDL()), + eval_dataloader=DataLoader(framework='tensorflow', dataset=CalibrationDL()), eval_func=eval_func) try: q_model.save(args.output_model) From 596ab0b0e0b22e0c9925d9c03357dce588a5bcf6 Mon Sep 17 00:00:00 2001 From: "chen, suyue" Date: Tue, 13 Dec 2022 09:43:12 +0800 Subject: [PATCH 11/14] Update run_tensorflow_models_trigger.sh --- .../scripts/models/run_tensorflow_models_trigger.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh b/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh index 1e38888a768..adaba1b739d 100644 --- a/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh @@ -86,7 +86,7 @@ elif [ "${model}" == "darknet19" ]; then new_benchmark=false inc_new_api=true tuning_cmd="bash run_tuning.sh --topology=${model} --input_model=${input_model}" - benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=benchmark --batch_size=1 --iters=500" + benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=performance --batch_size=1 --iters=500" elif [ "${model}" == "densenet-121" ]; then model_src_dir="oob_models/quantization/ptq" dataset_location="" @@ -97,7 +97,7 @@ elif [ "${model}" == "densenet-121" ]; then new_benchmark=false inc_new_api=true tuning_cmd="bash run_tuning.sh --topology=${model} --input_model=${input_model}" - benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=benchmark --batch_size=1 --iters=500" + benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=performance --batch_size=1 --iters=500" elif [ "${model}" == "resnet-101" ]; then model_src_dir="oob_models/quantization/ptq" dataset_location="" @@ -108,7 +108,7 @@ elif [ "${model}" == "resnet-101" ]; then new_benchmark=false inc_new_api=true tuning_cmd="bash run_tuning.sh --topology=${model} --input_model=${input_model}" - benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=benchmark --batch_size=1 --iters=500" + benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=performance --batch_size=1 --iters=500" elif [ "${model}" == "resnet50_fashion" ]; then model_src_dir="image_recognition/keras_models/resnet50_fashion/quantization/ptq" dataset_location="/tf_dataset2/datasets/mnist/FashionMNIST_small" From f8e8f5d2924a98d91f5fdb937abf155a3e32b7be Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Fri, 16 Dec 2022 15:39:35 +0800 Subject: [PATCH 12/14] update wide_deep_large_ds params Signed-off-by: Lv, Liang1 --- examples/.config/model_params_tensorflow.json | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/examples/.config/model_params_tensorflow.json b/examples/.config/model_params_tensorflow.json index 43da4e4633e..6d1470c74e9 100644 --- a/examples/.config/model_params_tensorflow.json +++ b/examples/.config/model_params_tensorflow.json @@ -535,10 +535,8 @@ "model_src_dir": "recommendation/wide_deep_large_ds/quantization/ptq", "dataset_location": "/tf_dataset/tensorflow/wide_deep_large_ds/dataset", "input_model": "/tf_dataset/tensorflow/wide_deep_large_ds/fp32_optimized_graph.pb", - "yaml": "wide_deep_large_ds.yaml", - "strategy": "basic", - "batch_size": 256, - "new_benchmark": false + "main_script": "inference.py", + "batch_size": 256 }, "style_transfer": { "model_src_dir": "style_transfer/arbitrary_style_transfer/quantization/ptq", From af34a5bdd1d7b1d7af8e89490e5396761f34b06d Mon Sep 17 00:00:00 2001 From: "chen, suyue" Date: Sat, 17 Dec 2022 16:14:09 +0800 Subject: [PATCH 13/14] Update run_benchmark.sh --- .../wide_deep_large_ds/quantization/ptq/run_benchmark.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh index f50f01b4935..045d3a261aa 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/quantization/ptq/run_benchmark.sh @@ -27,10 +27,6 @@ function init_params { --batch_size=*) batch_size=$(echo $var |cut -f2 -d=) ;; - *) - echo "Error: No such parameter: ${var}" - exit 1 - ;; esac done From ceafcaa2835b8972c0fc58b4e2fa41f11688fd6d Mon Sep 17 00:00:00 2001 From: "Lv, Liang1" Date: Mon, 19 Dec 2022 22:25:59 +0800 Subject: [PATCH 14/14] refine benchmark for 3dunet Signed-off-by: Lv, Liang1 --- .../3dunet-mlperf/quantization/ptq/run_accuracy.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py index d0e2e761ecd..43136535cf7 100644 --- a/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py +++ b/examples/tensorflow/semantic_image_segmentation/3dunet-mlperf/quantization/ptq/run_accuracy.py @@ -221,4 +221,10 @@ def __len__(self): except Exception as e: print("Failed to save model due to {}".format(str(e))) else: - eval_func(graph) + from neural_compressor.data.dataloaders.dataloader import DataLoader + from neural_compressor.benchmark import fit + from neural_compressor.config import BenchmarkConfig + conf = BenchmarkConfig(cores_per_instance=4, num_of_instance=7) + fit(graph, conf, + b_dataloader=DataLoader(framework='tensorflow', dataset=CalibrationDL()), + b_func=eval_func)