Skip to content

Commit

Permalink
Add hawq_v2 tuning strategy (#230)
Browse files Browse the repository at this point in the history
Signed-off-by: yiliu30 <yi4.liu@intel.com>
Co-authored-by: lvliang-intel <liang1.lv@intel.com>
Co-authored-by: chen, suyue <suyue.chen@intel.com>
Co-authored-by: xinhe <xin3.he@intel.com>
Co-authored-by: Ray <106061964+yiliu30@users.noreply.github.com>
  • Loading branch information
5 people committed Dec 9, 2022
1 parent 8b652cd commit 83018ef
Show file tree
Hide file tree
Showing 9 changed files with 876 additions and 9 deletions.
18 changes: 18 additions & 0 deletions examples/.config/model_params_pytorch.json
Expand Up @@ -9,6 +9,24 @@
"batch_size": 100,
"new_benchmark": false
},
"efficientnet_b0_fx": {
"model_src_dir": "image_recognition/torchvision_models/quantization/ptq/cpu/fx/",
"dataset_location": "/tf_dataset/pytorch/ImageNet/raw",
"input_model": "",
"yaml": "conf.yaml",
"strategy": "hawq_v2",
"batch_size": 100,
"new_benchmark": false
},
"efficientnet_b3_fx": {
"model_src_dir": "image_recognition/torchvision_models/quantization/ptq/cpu/fx/",
"dataset_location": "/tf_dataset/pytorch/ImageNet/raw",
"input_model": "",
"yaml": "conf.yaml",
"strategy": "hawq_v2",
"batch_size": 100,
"new_benchmark": false
},
"resnet18_fx": {
"model_src_dir": "image_recognition/torchvision_models/quantization/ptq/cpu/fx/",
"dataset_location": "/tf_dataset/pytorch/ImageNet/raw",
Expand Down
Expand Up @@ -77,4 +77,4 @@ tuning:
relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%.
exit_policy:
timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit.
random_seed: 9527 # optional. random seed for deterministic tuning.
random_seed: 9527 # optional. random seed for deterministic tuning.
29 changes: 28 additions & 1 deletion neural_compressor/adaptor/pytorch.py
Expand Up @@ -30,7 +30,6 @@
from .query import QueryBackendCapability
from ..experimental.data.dataloaders.base_dataloader import BaseDataLoader


torch = LazyImport("torch")
json = LazyImport("json")
hvd = LazyImport("horovod.torch")
Expand Down Expand Up @@ -1094,6 +1093,34 @@ def is_fused_module(self, module):
return True
else:
return False

def calculate_hessian_trace(self,
fp32_model,
dataloader,
q_model,
criterion,
enable_act = False
):
"""Calculate hessian trace.
Args:
fp32_model: The original fp32 model.
criterion: The loss function for calculate the hessian trace. # loss = criterion(output, target)
dataloader: The dataloader for calculate the gradient.
q_model: The INT8 AMAP model.
enable_act: Enabling quantization error or not.
Return:
hessian_trace(Dict[Tuple, float]), key: (op_name, op_type); value: hessian trace.
"""
from .torch_utils.hawq_metric import hawq_top
op_to_traces=hawq_top(fp32_model=fp32_model,
dataloader=dataloader,
q_model=q_model,
criterion=criterion,
enable_act=enable_act)
return op_to_traces
pass


unify_op_type_mapping = {
Expand Down

0 comments on commit 83018ef

Please sign in to comment.