Skip to content

Commit 57abd39

Browse files
BiaoFangAIAlvliang-intelchensuyuexin3heyiliu30
authored andcommitted
Add hawq_v2 tuning strategy (#230)
Signed-off-by: yiliu30 <[email protected]> Co-authored-by: lvliang-intel <[email protected]> Co-authored-by: chen, suyue <[email protected]> Co-authored-by: xinhe <[email protected]> Co-authored-by: Ray <[email protected]> Signed-off-by: zehao-intel <[email protected]>
1 parent ce8d724 commit 57abd39

File tree

9 files changed

+876
-9
lines changed

9 files changed

+876
-9
lines changed

examples/.config/model_params_pytorch.json

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,24 @@
99
"batch_size": 100,
1010
"new_benchmark": false
1111
},
12+
"efficientnet_b0_fx": {
13+
"model_src_dir": "image_recognition/torchvision_models/quantization/ptq/cpu/fx/",
14+
"dataset_location": "/tf_dataset/pytorch/ImageNet/raw",
15+
"input_model": "",
16+
"yaml": "conf.yaml",
17+
"strategy": "hawq_v2",
18+
"batch_size": 100,
19+
"new_benchmark": false
20+
},
21+
"efficientnet_b3_fx": {
22+
"model_src_dir": "image_recognition/torchvision_models/quantization/ptq/cpu/fx/",
23+
"dataset_location": "/tf_dataset/pytorch/ImageNet/raw",
24+
"input_model": "",
25+
"yaml": "conf.yaml",
26+
"strategy": "hawq_v2",
27+
"batch_size": 100,
28+
"new_benchmark": false
29+
},
1230
"resnet18_fx": {
1331
"model_src_dir": "image_recognition/torchvision_models/quantization/ptq/cpu/fx/",
1432
"dataset_location": "/tf_dataset/pytorch/ImageNet/raw",

examples/pytorch/image_recognition/torchvision_models/quantization/ptq/cpu/fx/conf.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,4 +77,4 @@ tuning:
7777
relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%.
7878
exit_policy:
7979
timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit.
80-
random_seed: 9527 # optional. random seed for deterministic tuning.
80+
random_seed: 9527 # optional. random seed for deterministic tuning.

neural_compressor/adaptor/pytorch.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
from .query import QueryBackendCapability
3131
from ..experimental.data.dataloaders.base_dataloader import BaseDataLoader
3232

33-
3433
torch = LazyImport("torch")
3534
json = LazyImport("json")
3635
hvd = LazyImport("horovod.torch")
@@ -1094,6 +1093,34 @@ def is_fused_module(self, module):
10941093
return True
10951094
else:
10961095
return False
1096+
1097+
def calculate_hessian_trace(self,
1098+
fp32_model,
1099+
dataloader,
1100+
q_model,
1101+
criterion,
1102+
enable_act = False
1103+
):
1104+
"""Calculate hessian trace.
1105+
1106+
Args:
1107+
fp32_model: The original fp32 model.
1108+
criterion: The loss function for calculate the hessian trace. # loss = criterion(output, target)
1109+
dataloader: The dataloader for calculate the gradient.
1110+
q_model: The INT8 AMAP model.
1111+
enable_act: Enabling quantization error or not.
1112+
1113+
Return:
1114+
hessian_trace(Dict[Tuple, float]), key: (op_name, op_type); value: hessian trace.
1115+
"""
1116+
from .torch_utils.hawq_metric import hawq_top
1117+
op_to_traces=hawq_top(fp32_model=fp32_model,
1118+
dataloader=dataloader,
1119+
q_model=q_model,
1120+
criterion=criterion,
1121+
enable_act=enable_act)
1122+
return op_to_traces
1123+
pass
10971124

10981125

10991126
unify_op_type_mapping = {

0 commit comments

Comments
 (0)