|
18 | 18 | """The auto-mixed precision strategy.""" |
19 | 19 |
|
20 | 20 | import copy |
21 | | -import numpy as np |
22 | | -from collections import OrderedDict |
| 21 | +from collections import OrderedDict, defaultdict |
| 22 | +from itertools import groupby |
23 | 23 | from .strategy import strategy_registry, TuneStrategy |
24 | 24 | from ..utils import logger |
25 | | -from .utils.tuning_sampler import OpTypeWiseTuningSampler, FallbackTuningSampler |
| 25 | +from .utils.tuning_sampler import FallbackTuningSampler |
26 | 26 | from .utils.tuning_structs import OpTuningConfig |
27 | 27 | from neural_compressor.adaptor.torch_utils.mixed_precision import ipex_mixed_precision |
28 | 28 |
|
@@ -50,6 +50,7 @@ def _initialize_config(self, conf): |
50 | 50 | config.domain = getattr(config, 'domain', None) |
51 | 51 | config.reduce_range = getattr(config, 'reduce_range', None) |
52 | 52 | config.example_inputs = getattr(config, 'example_inputs', None) |
| 53 | + config.quant_level = getattr(config, "quant_level", "auto") |
53 | 54 | return config |
54 | 55 |
|
55 | 56 | def next_tune_cfg(self): |
@@ -79,54 +80,116 @@ def next_tune_cfg(self): |
79 | 80 | if not target_dtypes: |
80 | 81 | target_dtypes = ['bf16'] |
81 | 82 | # step1. target_dtype AMAP, collect the ops that support target_dtype |
82 | | - bf16_items_name = [] |
| 83 | + lower_precision_items_name = [] |
83 | 84 | op_tuning_cfg = {} |
84 | 85 | for idx, target_dtype in enumerate(target_dtypes): |
85 | | - bf16_items = tuning_space.query_items_by_quant_mode(target_dtype) |
86 | | - if len(bf16_items) == 0 and \ |
87 | | - not (idx == len(target_dtypes) - 1 and len(bf16_items_name) == 0): |
| 86 | + lower_precision_items = tuning_space.query_items_by_quant_mode(target_dtype) |
| 87 | + if len(lower_precision_items) == 0 and \ |
| 88 | + not (idx == len(target_dtypes) - 1 and len(lower_precision_items_name) == 0): |
88 | 89 | continue |
89 | | - bf16_items_name = [item.name for item in bf16_items] |
| 90 | + lower_precision_items_name = [item.name for item in lower_precision_items] |
90 | 91 | op_tuning_cfg = deepcopy(initial_op_tuning_cfg) |
91 | | - for op_name_type in bf16_items_name: |
| 92 | + for op_name_type in lower_precision_items_name: |
92 | 93 | op_tuning_cfg[op_name_type] = \ |
93 | 94 | OpTuningConfig(op_name_type[0], op_name_type[1], target_dtype, tuning_space) |
94 | 95 | calib_sampling_size = 1 |
95 | 96 | op_tuning_cfg['calib_sampling_size'] = calib_sampling_size |
96 | 97 | yield op_tuning_cfg |
97 | 98 |
|
98 | | - # step2. fallback |
99 | | - target_dtype = 'fp32' |
100 | | - fallback_items_name_lst = bf16_items_name[::-1] |
| 99 | + # step 2, fallback op into fp32 |
| 100 | + # quant_level: |
| 101 | + # auto: op-type-wise -> op-wise |
| 102 | + # 0: op-type wise |
| 103 | + # 1: op-wise |
| 104 | + |
| 105 | + # if quant level is auto or 0, do op type wise fallback |
| 106 | + target_dtype = "fp32" |
| 107 | + fallback_items_name_lst = lower_precision_items_name[::-1] |
101 | 108 | if fallback_items_name_lst: |
102 | | - logger.info(f"Start to fallback op to {target_dtype} one by one.") |
103 | | - self._fallback_started() |
104 | | - op_dtypes = OrderedDict(zip(fallback_items_name_lst, [target_dtype] * len(fallback_items_name_lst))) |
| 109 | + logger.info("[Strategy] start fallback op into fp32.") |
105 | 110 | initial_op_tuning_cfg = deepcopy(op_tuning_cfg) |
| 111 | + if self.config.quant_level in ["auto", 0]: |
| 112 | + logger.info(f"[Strategy] fallback op into fp32 in op type wise, \ |
| 113 | + as quant level is {self.config.quant_level}") |
| 114 | + for op_tuning_cfg in self.fallback_in_op_type_wise(tuning_space, fallback_items_name_lst,\ |
| 115 | + deepcopy(initial_op_tuning_cfg), target_dtype): |
| 116 | + yield op_tuning_cfg |
| 117 | + |
| 118 | + # if quant level is auto or 1, do op instance fallback |
| 119 | + if self.config.quant_level in ["auto", 1]: |
| 120 | + logger.info(f"[Strategy] fallback op into fp32 in op wise, \ |
| 121 | + as quant level is {self.config.quant_level}") |
| 122 | + for op_tuning_cfg in self.fallback_in_op_wise(tuning_space, fallback_items_name_lst,\ |
| 123 | + deepcopy(initial_op_tuning_cfg), target_dtype): |
| 124 | + yield op_tuning_cfg |
| 125 | + |
| 126 | + def fallback_in_op_type_wise(self, tuning_space, fallback_items_name_lst, initial_op_tuning_cfg, target_dtype): |
| 127 | + """Fallback op in op type wise. |
| 128 | +
|
| 129 | + Args: |
| 130 | + tuning_space: tuning space |
| 131 | + fallback_items_name_lst: the list of items to be fallback |
| 132 | + initial_op_tuning_cfg: initial tuning config |
| 133 | + target_dtype: target data type, such as fp32 |
| 134 | +
|
| 135 | + Yields: |
| 136 | + tuning config |
| 137 | + """ |
| 138 | + fallback_items_name_lst.sort(key=lambda x: x[1]) |
| 139 | + op_type_groups = groupby(fallback_items_name_lst, key=lambda x: x[1]) |
| 140 | + # key: ((op1_name, op_type1),(op2_name, op_type1), (op3_name, op_type1), ...) |
| 141 | + # value: target dtype |
| 142 | + ops_dtypes = OrderedDict() |
| 143 | + for op_type, op_lst in op_type_groups: |
| 144 | + ops_dtypes[tuple(op_lst)] = target_dtype |
106 | 145 | fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], |
107 | | - initial_op_tuning_cfg=initial_op_tuning_cfg, |
108 | | - op_dtypes=op_dtypes, accumulate=False) |
| 146 | + initial_op_tuning_cfg=initial_op_tuning_cfg, |
| 147 | + op_dtypes=ops_dtypes, accumulate=False) |
109 | 148 | op_fallback_acc_impact = OrderedDict() |
110 | 149 | for op_index, op_tuning_cfg in enumerate(fallback_sampler): |
111 | | - op_tuning_cfg['calib_sampling_size'] = calib_sampling_size |
| 150 | + op_tuning_cfg['calib_sampling_size'] = -1 |
| 151 | + yield op_tuning_cfg |
| 152 | + acc, _ = self.last_tune_result |
| 153 | + op_fallback_acc_impact[fallback_items_name_lst[op_index]] = acc |
| 154 | + |
| 155 | + def fallback_in_op_wise(self, tuning_space, fallback_items_name_lst, initial_op_tuning_cfg, target_dtype): |
| 156 | + """Fallback op in op wise. |
| 157 | +
|
| 158 | + Args: |
| 159 | + tuning_space: tuning space |
| 160 | + fallback_items_name_lst: the list of items to be fallback |
| 161 | + initial_op_tuning_cfg: initial tuning config |
| 162 | + target_dtype: target data type, such as fp32 |
| 163 | +
|
| 164 | + Yields: |
| 165 | + tuning config |
| 166 | + """ |
| 167 | + op_dtypes = OrderedDict(zip(fallback_items_name_lst, [target_dtype] * len(fallback_items_name_lst))) |
| 168 | + fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], |
| 169 | + initial_op_tuning_cfg=initial_op_tuning_cfg, |
| 170 | + op_dtypes=op_dtypes, accumulate=False) |
| 171 | + op_fallback_acc_impact = OrderedDict() |
| 172 | + for op_index, op_tuning_cfg in enumerate(fallback_sampler): |
| 173 | + op_tuning_cfg['calib_sampling_size'] = -1 |
112 | 174 | yield op_tuning_cfg |
113 | 175 | acc, _ = self.last_tune_result |
114 | 176 | op_fallback_acc_impact[fallback_items_name_lst[op_index]] = acc |
115 | 177 |
|
116 | 178 | # do accumulated fallback according to the order in the previous stage |
117 | 179 | if len(op_fallback_acc_impact) > 0: |
118 | | - ordered_ops = sorted(op_fallback_acc_impact.keys(), key=lambda key: op_fallback_acc_impact[key], |
119 | | - reverse=self.higher_is_better) |
| 180 | + ordered_ops = sorted(op_fallback_acc_impact.keys(), key=lambda key: op_fallback_acc_impact[key], \ |
| 181 | + reverse=self.higher_is_better) |
120 | 182 | op_dtypes = OrderedDict(zip(ordered_ops, [target_dtype] * len(fallback_items_name_lst))) |
121 | 183 | logger.info("Start to accumulate fallback to {target_dtype}.") |
122 | | - initial_op_tuning_cfg = deepcopy(op_tuning_cfg) |
| 184 | + initial_op_tuning_cfg = copy.deepcopy(op_tuning_cfg) |
123 | 185 | fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], |
124 | | - initial_op_tuning_cfg=initial_op_tuning_cfg, |
125 | | - op_dtypes=op_dtypes, accumulate=True) |
| 186 | + initial_op_tuning_cfg=initial_op_tuning_cfg, |
| 187 | + op_dtypes=op_dtypes, accumulate=True) |
126 | 188 | for op_tuning_cfg in fallback_sampler: |
127 | | - op_tuning_cfg['calib_sampling_size'] = calib_sampling_size |
| 189 | + op_tuning_cfg['calib_sampling_size'] = -1 |
128 | 190 | yield op_tuning_cfg |
129 | 191 |
|
| 192 | + |
130 | 193 | def traverse(self): |
131 | 194 | """Traverse the tuning space according to auto-mixed precision strategy.""" |
132 | 195 | if self.config.backend == "ipex": |
|
0 commit comments