Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
143 commits
Select commit Hold shift + click to select a range
2eabc87
rewrite INC config class
violetch24 Mar 27, 2023
d7e77f3
edit configs for quantization
violetch24 Mar 27, 2023
e456cf8
minor fix
violetch24 Mar 27, 2023
6d582b9
Merge branch 'master' into zixuan/new_config
violetch24 Mar 28, 2023
35f699c
fix quantcfg and add dict
violetch24 Mar 28, 2023
842a6e8
Merge branch 'master' into zixuan/new_config
violetch24 Mar 28, 2023
1c3e240
Adjust strategy layer for new API (#766)
yiliu30 Mar 29, 2023
53dc10d
remove some comments
yiliu30 Mar 29, 2023
19a8f03
Merge branch 'master' into zixuan/new_config
violetch24 Mar 29, 2023
57da118
Merge branch 'master' into zixuan/new_config
violetch24 Mar 29, 2023
9ec6ef4
rewrite configs for cofig classes
violetch24 Mar 29, 2023
eae0001
add deploy config
yiliu30 Mar 29, 2023
a886025
adjust other strategies
yiliu30 Mar 30, 2023
3e11863
replace conf.config with config
yiliu30 Mar 30, 2023
4629174
update strategy uts
yiliu30 Mar 30, 2023
7243bd2
update for mixed precision config
violetch24 Mar 30, 2023
410a793
fixed mix precision
yiliu30 Mar 30, 2023
1643038
update strategies
yiliu30 Mar 30, 2023
6997b7d
update benchmark config
violetch24 Mar 30, 2023
a5590c5
Merge branch 'master' into zixuan/new_config
violetch24 Mar 30, 2023
3a0dd74
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
yiliu30 Mar 30, 2023
459d323
update tpe and sigopt
yiliu30 Mar 30, 2023
e922b3f
update tpe ut
yiliu30 Mar 30, 2023
772be16
keep old ut
yiliu30 Mar 30, 2023
3820c45
fix ut
yiliu30 Mar 30, 2023
84e69a0
update strategy UTs
yiliu30 Mar 31, 2023
410cbe9
update callbacks for qat config
violetch24 Mar 31, 2023
60f4256
fix format
violetch24 Mar 31, 2023
435ba6d
update log
yiliu30 Mar 31, 2023
bb2026c
add dump config
yiliu30 Mar 31, 2023
f35d53a
add ut for metric + dataloader
yiliu30 Mar 31, 2023
9e42dd5
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
yiliu30 Mar 31, 2023
c00bbd7
fixed metric
yiliu30 Mar 31, 2023
a235d22
Merge branch 'master' into zixuan/new_config
violetch24 Apr 3, 2023
bb36abb
fix for pydocstyle and pylint scan
violetch24 Apr 3, 2023
203db11
minor fix
violetch24 Apr 3, 2023
464fe1f
fix for distillation and pruning 2.x UT
violetch24 Apr 3, 2023
0d662d8
adjust diagnosis
yiliu30 Apr 3, 2023
f89d037
Merge branch 'master' into zixuan/new_config
violetch24 Apr 4, 2023
de1242f
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
violetch24 Apr 4, 2023
7a688f9
split old config and new
violetch24 Apr 4, 2023
be1f526
fixed metric setting
yiliu30 Apr 4, 2023
3d250c0
minor fix
violetch24 Apr 4, 2023
7c56af1
minor fix
violetch24 Apr 4, 2023
debc64c
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
yiliu30 Apr 4, 2023
972cf39
update tpe ut
yiliu30 Apr 4, 2023
8f66bc8
config refine
violetch24 Apr 4, 2023
36fc40a
minor fix
violetch24 Apr 4, 2023
9555640
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
violetch24 Apr 4, 2023
d80437d
resolve conflict
yiliu30 Apr 4, 2023
535376c
fix for ut
violetch24 Apr 4, 2023
6ef6d36
udapte obj
yiliu30 Apr 6, 2023
6e5cb51
fix for ut
violetch24 Apr 6, 2023
414ae30
fixed sigopt and tpe
yiliu30 Apr 6, 2023
ebaab08
fixed fake startegy
yiliu30 Apr 6, 2023
019ba9c
fixed obj
yiliu30 Apr 6, 2023
21b3de6
fixed obj
yiliu30 Apr 6, 2023
4f29ea6
fixed ut
yiliu30 Apr 6, 2023
11a23b5
fixed circular import
yiliu30 Apr 6, 2023
12a153e
Merge branch 'master' into conf
yiliu30 Apr 6, 2023
0bfa7bc
merge with master
yiliu30 Apr 6, 2023
5346588
add contrib
yiliu30 Apr 6, 2023
b01ae68
fix for ut
violetch24 Apr 7, 2023
1cfb3e9
update conf compare
yiliu30 Apr 7, 2023
4e5017c
Merge branch 'master' into zixuan/new_config
violetch24 Apr 7, 2023
a2c5620
fix ut
yiliu30 Apr 7, 2023
e387d79
refine the log hints
yiliu30 Apr 7, 2023
ce763db
add docstring
yiliu30 Apr 7, 2023
3b572af
update conf compare
yiliu30 Apr 7, 2023
6925168
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
violetch24 Apr 7, 2023
d9193ac
update the condition for not tuning
yiliu30 Apr 7, 2023
4a0b396
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
violetch24 Apr 7, 2023
34d8926
fix for ut
violetch24 Apr 7, 2023
d782c9e
minor fix
violetch24 Apr 7, 2023
41ce7f1
fix for ut
violetch24 Apr 7, 2023
cb81d69
fixed tpe
yiliu30 Apr 9, 2023
894c326
fixed resnet50 example
yiliu30 Apr 9, 2023
22e161d
fixed mxnet query
yiliu30 Apr 9, 2023
08d3390
fixed ipex ut
yiliu30 Apr 9, 2023
0704714
Merge branch 'master' into zixuan/new_config
violetch24 Apr 10, 2023
7b1542b
fix for 1.x distillation
violetch24 Apr 10, 2023
5688795
fix for 1.x config ut
violetch24 Apr 10, 2023
c389cb4
add more ut for strategy
yiliu30 Apr 10, 2023
282c3fc
update cfg intialization
yiliu30 Apr 10, 2023
b396b79
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
yiliu30 Apr 10, 2023
5e597c9
Merge branch 'master' of https://github.com/intel/neural-compressor i…
lvliang-intel Apr 11, 2023
15cb16d
add metric for tf examples
lvliang-intel Apr 11, 2023
15aa91c
Merge branch 'master' of https://github.com/intel/neural-compressor i…
lvliang-intel Apr 11, 2023
faf1ce4
fix for test
violetch24 Apr 11, 2023
f4b8dce
fix dict
violetch24 Apr 11, 2023
6f138e8
Update inc_dict.txt
violetch24 Apr 14, 2023
4438c62
Merge branch 'master' into zixuan/new_config
violetch24 Apr 14, 2023
61bda8d
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
violetch24 Apr 14, 2023
01be432
fix for merge
violetch24 Apr 14, 2023
5dac605
Merge branch 'master' into zixuan/new_config
violetch24 Apr 14, 2023
cd19d36
Merge branch 'master' into zixuan/new_config
violetch24 Apr 17, 2023
6d01298
fix for ipex recipes
violetch24 Apr 17, 2023
32132c3
Merge branch 'master' into zixuan/new_config
violetch24 Apr 19, 2023
022282c
add config 2.x ut
violetch24 Apr 19, 2023
f025a2e
improve code coverage
yiliu30 Apr 20, 2023
98b724f
fixed pylint check
yiliu30 Apr 20, 2023
62cc581
Merge branch 'master' into zixuan/new_config
violetch24 Apr 20, 2023
5f504a9
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
violetch24 Apr 20, 2023
a486a29
improve code coverage
yiliu30 Apr 20, 2023
8769171
edit ut for coverage
violetch24 Apr 20, 2023
25392dd
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
yiliu30 Apr 20, 2023
fdc5669
enhance ut
yiliu30 Apr 20, 2023
d2d19e5
enhance ut
yiliu30 Apr 20, 2023
8c0c5e7
fixed ut
yiliu30 Apr 20, 2023
6ddd67a
Merge branch 'master' into zixuan/new_config
violetch24 Apr 21, 2023
b2f639a
enhance ut coverage
violetch24 Apr 21, 2023
828a1ba
Change global options and _Config
PenghuiCheng Apr 21, 2023
f73760d
Update Code
PenghuiCheng Apr 21, 2023
8c41121
Merge master branch
PenghuiCheng Apr 21, 2023
48da0a3
Fixed UT error
PenghuiCheng Apr 22, 2023
2b104d4
Support method of Obtaining the built-in metric by Metric class
PenghuiCheng Apr 22, 2023
5e0fe92
Fixed pylink error
PenghuiCheng Apr 22, 2023
a2d6933
Fixed typo
PenghuiCheng Apr 23, 2023
5e298d6
add mixed_precision config
PenghuiCheng Apr 23, 2023
bcf0fbc
distinguish mix precision and quantization
yiliu30 Apr 23, 2023
03e10ad
trim trailing space
yiliu30 Apr 23, 2023
e566721
fixed the mixed precision config
yiliu30 Apr 23, 2023
3cdf18d
Merge branch 'master' into zixuan/new_config
violetch24 Apr 24, 2023
a7b4dd6
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
violetch24 Apr 24, 2023
c0eba52
fix for code scan and enhance coverage
violetch24 Apr 24, 2023
dc8fb6b
Merge branch 'master' into zixuan/new_config
violetch24 Apr 24, 2023
13305aa
fix for ut
violetch24 Apr 24, 2023
5202cf7
fixed format config
yiliu30 Apr 25, 2023
0b94beb
Merge branch 'master' into zixuan/new_config
violetch24 Apr 25, 2023
c056c3a
fix benchmark default value
violetch24 Apr 25, 2023
971659e
minor fix
violetch24 Apr 25, 2023
65258c0
Merge branch 'zixuan/new_config' of https://github.com/intel/neural-c…
yiliu30 Apr 25, 2023
3935732
clean code
yiliu30 Apr 25, 2023
149a557
enhance auto strategy
yiliu30 Apr 26, 2023
510bb32
merge with master
yiliu30 Apr 26, 2023
ffd1543
fixed docstring format issue
yiliu30 Apr 26, 2023
d611d4e
fixed docstring format
yiliu30 Apr 26, 2023
ad61374
fixed coverage
yiliu30 Apr 26, 2023
e73aa34
disable some pylint check
yiliu30 May 4, 2023
d078d45
Merge branch 'master' into enhance_auto
yiliu30 May 4, 2023
849f637
enhance distributed log
yiliu30 May 5, 2023
c6b8b6a
update the initialization method
yiliu30 May 6, 2023
d57de40
Merge branch 'master' into enhance_auto
yiliu30 May 7, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions neural_compressor/adaptor/torch_utils/hawq_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,9 @@ def remove(self):
class HessianTrace:
"""HessianTrace Class.

Please refer to Yao, Zhewei, et al. "Pyhessian: Neural networks through the lens of the hessian."
Please refer to Yao, Zhewei, et al. "Pyhessian: Neural networks through the lens of the hessian."
2020 IEEE international conference on big data (Big data). IEEE, 2020.
Dong, Zhen, et al. "Hawq-v2: Hessian aware trace-weighted quantization of neural networks."
Dong, Zhen, et al. "Hawq-v2: Hessian aware trace-weighted quantization of neural networks."
Advances in neural information processing systems 33 (2020): 18518-18529.
https://github.com/openvinotoolkit/nncf/blob/develop/nncf/torch/quantization/hessian_trace.py
"""
Expand Down Expand Up @@ -173,7 +173,7 @@ def act_grad_hook(model, grad_input, grad_output):
def _get_enable_act_grad_hook(self, name):
def enable_act_grad_hook(model, inputs, outputs):
input = inputs[0]
if input.requires_grad is False:
if input.requires_grad is False: #
input.requires_grad = True
self.layer_acts[name] = input

Expand Down Expand Up @@ -251,13 +251,13 @@ def _sample_rademacher(self, params):
r.masked_fill_(r == 0, -1)
samples.append(r)
return samples

def _sample_rademacher_like_params(self):
def sample(parameter):
r = torch.randint_like(parameter, high=2, device=self.device)
return r.masked_fill_(r == 0, -1)
return [sample(p) for p in self.params]

def _sample_normal_like_params(self):
return [torch.randn(p.size(), device=self.device) for p in self.params]

Expand Down Expand Up @@ -391,7 +391,7 @@ def _insert_hook(self, model, target_module_list):
for layer, module in model.named_modules():
for target_module in target_module_list:
# print("layer:",layer)
# print("target_model:",target_module)
# print("target_model:",target_module)
if layer == target_module:
logging.debug("Collect: %s" % (module))
# print("Collect: %s" % (module))
Expand All @@ -408,7 +408,7 @@ def _insert_hook_quantize(self, model, target_module_list):
# print("layer:",layer)
length = len("_model.")
new_key = layer[length:]
# print("target_model:",target_module)
# print("target_model:",target_module)
if new_key == target_module:
logging.debug("Collect: %s" % (module))
# print("Collect: %s" % (module))
Expand Down Expand Up @@ -521,7 +521,7 @@ def compare_weights(
float_dict: Dict[str, Any], quantized_dict: Dict[str, Any]
) -> Dict[str, Dict[str, torch.Tensor]]:
r"""Compare the weights of the float module with its corresponding quantized module.

Returns a dict with key corresponding to module names and each entry being
a dictionary with two keys 'float' and 'quantized', containing the float and
quantized weights. This dict can be used to compare and compute the quantization
Expand Down Expand Up @@ -608,7 +608,7 @@ def hawq_top(fp32_model, q_model, dataloader, criterion, enable_act):
op_qnt_tensor = weight_quant_loss[key]['quantized'].dequantize()
diff_l2 = (torch.norm(op_float_tensor - op_qnt_tensor, p=2) ** 2)
pertur_lst[key] = diff_l2

if enable_act:
act_to_traces = traces['activation']
for trace_i, pertur_i, act_i in zip(op_to_traces.keys(), pertur_lst.keys(), act_to_traces.keys()):
Expand Down
2 changes: 1 addition & 1 deletion neural_compressor/contrib/strategy/sigopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def traverse(self):

This is SigOpt version of traverse -- with additional constraints setting to HPO.
"""
self._eval_baseline()
self._prepare_tuning()

baseline_msg = '[Accuracy: {:.4f}'.format(self.baseline[0]) + \
''.join([', {}: {:.4f}'.format(x,y) for x,y in zip( \
Expand Down
1 change: 1 addition & 0 deletions neural_compressor/contrib/strategy/tpe.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ def _configure_hpopt_search_space_and_params(self, search_space):
def traverse(self):
"""Tpe traverse logic."""
logger.info("Start to run tpe strategy.")
self._prepare_tuning()
# prepare log file
trials_file = os.path.join(os.path.dirname(self.history_path), 'tpe_trials.csv')
best_result_file = os.path.join(os.path.dirname(self.history_path), 'tpe_best_result.csv')
Expand Down
9 changes: 3 additions & 6 deletions neural_compressor/strategy/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,13 +79,10 @@ def sequential_traverse(self):
eval_dataloader=self.eval_dataloader,
eval_metric=self.eval_metric,
resume=self._resume,
q_hooks=self.q_hooks)
q_hooks=self.q_hooks,
pre_strategy = pre_strategy
)

if pre_strategy:
#TODO add tuning history from the previous stage to current stage.
strategy.baseline = deepcopy(pre_strategy.baseline)
strategy.trials_count = pre_strategy.trials_count
strategy.objectives.baseline = deepcopy(pre_strategy.baseline)
pre_strategy = strategy
strategy.traverse()
self.best_qmodel = strategy.best_qmodel
Expand Down
3 changes: 1 addition & 2 deletions neural_compressor/strategy/auto_mixed_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,7 @@ def next_tune_cfg(self):

def traverse(self):
"""Traverse the tuning space according to auto-mixed precision strategy."""
# get fp32 model baseline
self._eval_baseline()
self._prepare_tuning()

for op_tuning_cfg in self.next_tune_cfg():
# add tune_cfg here as quantize use tune_cfg
Expand Down
Loading