Skip to content

Commit a435389

Browse files
authored
Added optimization level for new API & support sub-parameter setting for strategy (#222)
Signed-off-by: yiliu30 <[email protected]>
1 parent 3a5afba commit a435389

File tree

5 files changed

+113
-11
lines changed

5 files changed

+113
-11
lines changed

neural_compressor/conf/config.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -841,6 +841,7 @@ def percent_to_float(data):
841841
},
842842
},
843843
Optional('use_bf16', default=True): bool,
844+
Optional('optimization_level', default=1): And(int, lambda level: level in [0, 1]),
844845
Optional('graph_optimization'): graph_optimization_schema,
845846
Optional('mixed_precision'): mixed_precision_schema,
846847

@@ -1111,6 +1112,7 @@ def percent_to_float(data):
11111112
'activation': {}},
11121113
}): dict,
11131114
Optional('use_bf16', default=False): bool,
1115+
Optional('optimization_level', default=1): int,
11141116
Optional('tuning', default={
11151117
'strategy': {'name': 'basic'},
11161118
'accuracy_criterion': {'relative': 0.01, 'higher_is_better': True},
@@ -1346,8 +1348,17 @@ def map_pyconfig_to_cfg(self, pythonic_config):
13461348
'tuning.exit_policy.max_trials': pythonic_config.quantization.max_trials,
13471349
'tuning.exit_policy.performance_only': pythonic_config.quantization.performance_only,
13481350
'use_bf16': pythonic_config.quantization.use_bf16,
1351+
'quantization.optimization_level': pythonic_config.quantization.optimization_level,
13491352
'reduce_range': pythonic_config.quantization.reduce_range
13501353
})
1354+
if pythonic_config.quantization.strategy_kwargs:
1355+
st_kwargs = pythonic_config.quantization.strategy_kwargs
1356+
for st_key in ['sigopt_api_token', 'sigopt_project_id', 'sigopt_experiment_name', \
1357+
'accuracy_weight', 'latency_weight']:
1358+
if st_key in st_kwargs:
1359+
st_val = st_kwargs[st_key]
1360+
mapping.update({'tuning.strategy.' + st_key: st_val})
1361+
13511362
if pythonic_config.distillation is not None:
13521363
mapping.update({
13531364
'distillation.train.criterion': pythonic_config.distillation.criterion,

neural_compressor/conf/pythonic_config.py

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,17 +34,33 @@ def __init__(self,
3434
op_type_list=None,
3535
op_name_list=None,
3636
strategy='basic',
37+
strategy_kwargs=None,
3738
objective='performance',
3839
timeout=0,
3940
max_trials=100,
4041
performance_only=False,
4142
reduce_range=None,
4243
use_bf16=True,
44+
optimization_level=1,
4345
accuracy_criterion=accuracy_criterion):
4446
extra_precisions = ["bf16"] if use_bf16 else []
45-
super().__init__(inputs, outputs, backend, device, calibration_sampling_size, op_type_list,
46-
op_name_list, strategy, objective, timeout, max_trials, performance_only,
47-
reduce_range, extra_precisions, accuracy_criterion)
47+
super().__init__(inputs=inputs,
48+
outputs=outputs,
49+
backend=backend,
50+
device=device,
51+
calibration_sampling_size=calibration_sampling_size,
52+
op_type_list=op_type_list,
53+
op_name_list=op_name_list,
54+
strategy=strategy,
55+
strategy_kwargs=strategy_kwargs,
56+
objective=objective,
57+
timeout=timeout,
58+
max_trials=max_trials,
59+
performance_only=performance_only,
60+
reduce_range=reduce_range,
61+
extra_precisions=extra_precisions,
62+
optimization_level=optimization_level,
63+
accuracy_criterion=accuracy_criterion)
4864
self._approach = approach
4965

5066
@property

neural_compressor/config.py

Lines changed: 39 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -307,12 +307,14 @@ def __init__(self,
307307
op_type_list=None,
308308
op_name_list=None,
309309
strategy="basic",
310+
strategy_kwargs=None,
310311
objective="performance",
311312
timeout=0,
312313
max_trials=100,
313314
performance_only=False,
314315
reduce_range=None,
315316
extra_precisions=["bf16"],
317+
optimization_level=1,
316318
accuracy_criterion=accuracy_criterion):
317319
self._inputs = inputs
318320
self._outputs = outputs
@@ -321,13 +323,15 @@ def __init__(self,
321323
self._op_type_list = op_type_list
322324
self._op_name_list = op_name_list
323325
self._strategy = strategy
326+
self._strategy_kwargs = strategy_kwargs
324327
self._objective = objective
325328
self._timeout = timeout
326329
self._max_trials = max_trials
327330
self._performance_only = performance_only
328331
self._reduce_range = reduce_range
329332
self._extra_precisions = extra_precisions \
330333
if isinstance(extra_precisions, List) else [extra_precisions]
334+
self._optimization_level = optimization_level
331335
self.use_bf16 = "bf16" in self._extra_precisions
332336
self._accuracy_criterion = accuracy_criterion
333337
self._calibration_sampling_size = calibration_sampling_size
@@ -346,6 +350,14 @@ def extra_precisions(self, extra_precisions):
346350
self._extra_precisions = extra_precisions
347351
self._use_bf16 = "bf16" in extra_precisions
348352

353+
@property
354+
def optimization_level(self):
355+
return self._optimization_level
356+
357+
@optimization_level.setter
358+
def optimization_level(self, optimization_level):
359+
self._optimization_level = optimization_level
360+
349361
@property
350362
def reduce_range(self):
351363
return self._reduce_range
@@ -399,9 +411,17 @@ def strategy(self):
399411
@strategy.setter
400412
def strategy(self, strategy):
401413
if check_value('strategy', strategy, str,
402-
['basic', 'mse', 'bayesian', 'random', 'exhaustive']):
414+
['basic', 'mse', 'bayesian', 'random', 'exhaustive', 'sigopt', 'tpe']):
403415
self._strategy = strategy
404416

417+
@property
418+
def strategy_kwargs(self):
419+
return self._strategy_kwargs
420+
421+
@strategy_kwargs.setter
422+
def strategy_kwargs(self, strategy_kwargs):
423+
self._strategy_kwargs = strategy_kwargs
424+
405425
@property
406426
def op_name_list(self):
407427
return self._op_name_list
@@ -480,11 +500,12 @@ def inputs(self, inputs):
480500

481501

482502
class TuningCriterion:
483-
def __init__(self, strategy="basic", timeout=0, max_trials=100, objective="performance"):
503+
def __init__(self, strategy="basic", strategy_kwargs=None, timeout=0, max_trials=100, objective="performance"):
484504
self._strategy = strategy
485505
self._timeout = timeout
486506
self._max_trials = max_trials
487507
self._objective = objective
508+
self._strategy_kwargs = strategy_kwargs
488509

489510
@property
490511
def max_trials(self):
@@ -521,9 +542,16 @@ def strategy(self):
521542
@strategy.setter
522543
def strategy(self, strategy):
523544
if check_value('strategy', strategy, str,
524-
['basic', 'mse', 'bayesian', 'random', 'exhaustive']):
545+
['basic', 'mse', 'bayesian', 'random', 'exhaustive', 'sigopt', 'tpe']):
525546
self._strategy = strategy
526-
547+
548+
@property
549+
def strategy_kwargs(self):
550+
return self._strategy_kwargs
551+
552+
@strategy_kwargs.setter
553+
def strategy_kwargs(self, strategy_kwargs):
554+
self._strategy_kwargs = strategy_kwargs
527555

528556
tuning_criterion = TuningCriterion()
529557

@@ -540,6 +568,7 @@ def __init__(self,
540568
op_name_list=None,
541569
reduce_range=None,
542570
extra_precisions = ["bf16"],
571+
optimization_level=1,
543572
tuning_criterion=tuning_criterion,
544573
accuracy_criterion=accuracy_criterion,
545574
):
@@ -551,11 +580,13 @@ def __init__(self,
551580
op_type_list=op_type_list,
552581
op_name_list=op_name_list,
553582
strategy=tuning_criterion.strategy,
583+
strategy_kwargs=tuning_criterion.strategy_kwargs,
554584
objective=tuning_criterion.objective,
555585
timeout=tuning_criterion.timeout,
556586
max_trials=tuning_criterion.max_trials,
557587
reduce_range=reduce_range,
558588
extra_precisions=extra_precisions,
589+
optimization_level=optimization_level,
559590
accuracy_criterion=accuracy_criterion)
560591
self.approach = approach
561592

@@ -578,10 +609,12 @@ def __init__(self,
578609
op_type_list=None,
579610
op_name_list=None,
580611
reduce_range=None,
581-
extra_precisions=["bf16"]):
612+
extra_precisions=["bf16"],
613+
optimization_level=1):
582614
super().__init__(inputs=inputs, outputs=outputs, device=device, backend=backend,
583615
op_type_list=op_type_list, op_name_list=op_name_list,
584-
reduce_range=reduce_range, extra_precisions=extra_precisions)
616+
reduce_range=reduce_range, extra_precisions=extra_precisions,
617+
optimization_level=optimization_level)
585618
self._approach = 'quant_aware_training'
586619

587620
@property

test/strategy/test_basic.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def build_fake_model():
155155
tf.import_graph_def(graph_def, name='')
156156
return graph
157157

158-
class TestQuantization(unittest.TestCase):
158+
class TestBasicTuningStrategy(unittest.TestCase):
159159

160160
@classmethod
161161
def setUpClass(self):
@@ -217,6 +217,25 @@ def test_run_basic_max_trials_multimetric_weight(self):
217217
quantizer.model = self.constant_graph
218218
quantizer.fit()
219219

220+
221+
def test_run_basic_one_trial_new_api(self):
222+
from neural_compressor.quantization import fit
223+
from neural_compressor.config import AccuracyCriterion, AccuracyLoss, PostTrainingQuantConfig, TuningCriterion
224+
from neural_compressor.data import DATASETS, DATALOADERS
225+
226+
# dataset and dataloader
227+
dataset = DATASETS("tensorflow")["dummy"](((100, 3, 3, 1)))
228+
dataloader = DATALOADERS["tensorflow"](dataset)
229+
230+
# tuning and accuracy criterion
231+
tolerable_loss = AccuracyLoss(0.01)
232+
accuracy_criterion = AccuracyCriterion(criterion='relative', tolerable_loss=tolerable_loss)
233+
tuning_criterion = TuningCriterion(strategy='basic')
234+
conf = PostTrainingQuantConfig(approach="static", backend="tensorflow",
235+
tuning_criterion=tuning_criterion,
236+
accuracy_criterion=accuracy_criterion)
237+
q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader= dataloader, eval_dataloader=dataloader)
238+
self.assertIsNotNone(q_model)
220239

221240
if __name__ == "__main__":
222241
unittest.main()

test/strategy/test_sigopt.py

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def build_fake_model():
104104
return graph
105105

106106
@unittest.skipIf(CONDITION , "missing the env variables 'SIGOPT_API_TOKEN' or 'SIGOPT_PROJECT_ID'")
107-
class TestQuantization(unittest.TestCase):
107+
class TestSigoptTuningStrategy(unittest.TestCase):
108108

109109
@classmethod
110110
def setUpClass(self):
@@ -140,6 +140,29 @@ def test_run_basic_max_trials(self):
140140
quantizer.eval_dataloader = common.DataLoader(dataset)
141141
quantizer.model = self.constant_graph
142142
quantizer.fit()
143+
144+
def test_run_sigopt_one_trial_new_api(self):
145+
from neural_compressor.quantization import fit
146+
from neural_compressor.config import AccuracyCriterion, AccuracyLoss, PostTrainingQuantConfig, TuningCriterion
147+
from neural_compressor.data import DATASETS, DATALOADERS
148+
149+
# dataset and dataloader
150+
dataset = DATASETS("tensorflow")["dummy"](((100, 3, 3, 1)))
151+
dataloader = DATALOADERS["tensorflow"](dataset)
152+
153+
# tuning and accuracy criterion
154+
tolerable_loss = AccuracyLoss(0.01)
155+
accuracy_criterion = AccuracyCriterion(criterion='relative', tolerable_loss=tolerable_loss)
156+
strategy_kwargs = {'sigopt_api_token': 'sigopt_api_token_test',
157+
'sigopt_project_id': 'sigopt_project_id_test',
158+
'sigopt_experiment_name': 'nc-tune'}
159+
tuning_criterion = TuningCriterion(strategy='sigopt', strategy_kwargs=strategy_kwargs, max_trials=3)
160+
conf = PostTrainingQuantConfig(approach="static", backend="tensorflow",
161+
tuning_criterion=tuning_criterion,
162+
accuracy_criterion=accuracy_criterion)
163+
q_model = fit(model=self.constant_graph, conf=conf, calib_dataloader= dataloader, eval_dataloader=dataloader)
164+
self.assertIsNotNone(q_model)
165+
143166

144167
if __name__ == "__main__":
145168
unittest.main()

0 commit comments

Comments
 (0)