|
| 1 | +#!/usr/bin/env python |
| 2 | +# -*- coding: utf-8 -*- |
| 3 | +# |
| 4 | +# Copyright (c) 2021 Intel Corporation |
| 5 | +# |
| 6 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | +# you may not use this file except in compliance with the License. |
| 8 | +# You may obtain a copy of the License at |
| 9 | +# |
| 10 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | +# |
| 12 | +# Unless required by applicable law or agreed to in writing, software |
| 13 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | +# See the License for the specific language governing permissions and |
| 16 | +# limitations under the License. |
| 17 | + |
| 18 | +"""The auto-mixed precision strategy.""" |
| 19 | + |
| 20 | +import copy |
| 21 | +import numpy as np |
| 22 | +from collections import OrderedDict |
| 23 | +from .strategy import strategy_registry, TuneStrategy |
| 24 | +from ...utils import logger |
| 25 | + |
| 26 | +from .utils.tuning_sampler import OpTypeWiseTuningSampler, FallbackTuningSampler |
| 27 | +from .utils.tuning_structs import OpTuningConfig |
| 28 | + |
| 29 | + |
| 30 | +@strategy_registry |
| 31 | +class AutoMixedPrecisionTuneStrategy(TuneStrategy): |
| 32 | + """Tuning strategy for auto mixed precision.""" |
| 33 | + |
| 34 | + def next_tune_cfg(self): |
| 35 | + """Generate the next tuning config. |
| 36 | + |
| 37 | + Tuning configurations are generated according to the following rules: |
| 38 | + 1. First, it tries to convert all ops into target date type as many as possible. |
| 39 | + 2. If the accuracy does not meets the requirements, it starts the stage of fallback |
| 40 | + which converts ops into higher precision. |
| 41 | + |
| 42 | + Yields: |
| 43 | + tune_config (dict): A dict containing the tuning configuration. |
| 44 | + """ |
| 45 | + from copy import deepcopy |
| 46 | + |
| 47 | + # filter quantization dtype |
| 48 | + # TODO align with the old mixed-precison |
| 49 | + target_dtypes = self.cfg.graph_optimization.precisions if self.cfg.graph_optimization \ |
| 50 | + else self.cfg.mixed_precision.precisions |
| 51 | + target_dtypes = list(set(target_dtypes) - set(['fp32'])) |
| 52 | + tuning_space = self.tuning_space |
| 53 | + initial_op_tuning_cfg = {} |
| 54 | + for item in tuning_space.root_item.options: |
| 55 | + if item.item_type == 'op': |
| 56 | + op_name, op_type = item.name |
| 57 | + initial_op_tuning_cfg[item.name] = OpTuningConfig(op_name, op_type, 'fp32', tuning_space) |
| 58 | + |
| 59 | + if not target_dtypes: |
| 60 | + target_dtypes = ['bf16'] |
| 61 | + # step1. target_dtype AMAP, collect the ops that support target_dtype |
| 62 | + bf16_items_name = [] |
| 63 | + op_tuning_cfg = {} |
| 64 | + for idx, target_dtype in enumerate(target_dtypes): |
| 65 | + bf16_items = tuning_space.query_items_by_quant_mode(target_dtype) |
| 66 | + if len(bf16_items) == 0 and \ |
| 67 | + not (idx == len(target_dtypes) - 1 and len(bf16_items_name) == 0): |
| 68 | + continue |
| 69 | + bf16_items_name = [item.name for item in bf16_items] |
| 70 | + op_tuning_cfg = deepcopy(initial_op_tuning_cfg) |
| 71 | + for op_name_type in bf16_items_name: |
| 72 | + op_tuning_cfg[op_name_type] = \ |
| 73 | + OpTuningConfig(op_name_type[0], op_name_type[1], target_dtype, tuning_space) |
| 74 | + calib_sampling_size = 1 |
| 75 | + op_tuning_cfg['calib_sampling_size'] = calib_sampling_size |
| 76 | + yield op_tuning_cfg |
| 77 | + |
| 78 | + # step2. fallback |
| 79 | + target_dtype = 'fp32' |
| 80 | + fallback_items_name_lst = bf16_items_name[::-1] |
| 81 | + if fallback_items_name_lst: |
| 82 | + logger.info(f"Start to fallback op to {target_dtype} one by one.") |
| 83 | + self._fallback_started() |
| 84 | + op_dtypes = OrderedDict(zip(fallback_items_name_lst, [target_dtype] * len(fallback_items_name_lst))) |
| 85 | + initial_op_tuning_cfg = deepcopy(op_tuning_cfg) |
| 86 | + fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], |
| 87 | + initial_op_tuning_cfg=initial_op_tuning_cfg, |
| 88 | + op_dtypes=op_dtypes, accumulate=False) |
| 89 | + op_fallback_acc_impact = OrderedDict() |
| 90 | + for op_index, op_tuning_cfg in enumerate(fallback_sampler): |
| 91 | + op_tuning_cfg['calib_sampling_size'] = calib_sampling_size |
| 92 | + yield op_tuning_cfg |
| 93 | + acc, _ = self.last_tune_result |
| 94 | + op_fallback_acc_impact[fallback_items_name_lst[op_index]] = acc |
| 95 | + |
| 96 | + # do accumulated fallback according to the order in the previous stage |
| 97 | + if len(op_fallback_acc_impact) > 0: |
| 98 | + ordered_ops = sorted(op_fallback_acc_impact.keys(), key=lambda key: op_fallback_acc_impact[key], |
| 99 | + reverse=self.higher_is_better) |
| 100 | + op_dtypes = OrderedDict(zip(ordered_ops, [target_dtype] * len(fallback_items_name_lst))) |
| 101 | + logger.info("Start to accumulate fallback to {target_dtype}.") |
| 102 | + initial_op_tuning_cfg = deepcopy(op_tuning_cfg) |
| 103 | + fallback_sampler = FallbackTuningSampler(tuning_space, tuning_order_lst=[], |
| 104 | + initial_op_tuning_cfg=initial_op_tuning_cfg, |
| 105 | + op_dtypes=op_dtypes, accumulate=True) |
| 106 | + for op_tuning_cfg in fallback_sampler: |
| 107 | + op_tuning_cfg['calib_sampling_size'] = calib_sampling_size |
| 108 | + yield op_tuning_cfg |
| 109 | + |
| 110 | + def traverse(self): |
| 111 | + """Traverse the tuning space according to auto-mixed precision strategy.""" |
| 112 | + # get fp32 model baseline |
| 113 | + self._eval_baseline() |
| 114 | + |
| 115 | + trials_count = 0 |
| 116 | + for op_tuning_cfg in self.next_tune_cfg(): |
| 117 | + # add tune_cfg here as quantize use tune_cfg |
| 118 | + tune_cfg = self._tune_cfg_converter(op_tuning_cfg) |
| 119 | + trials_count += 1 |
| 120 | + tuning_history = self._find_tuning_history(tune_cfg) |
| 121 | + if tuning_history and trials_count < self.cfg.tuning.exit_policy.max_trials: |
| 122 | + self.last_tune_result = tuning_history['last_tune_result'] |
| 123 | + self.best_tune_result = tuning_history['best_tune_result'] |
| 124 | + logger.warn("Find evaluated tuning config, skip.") |
| 125 | + continue |
| 126 | + |
| 127 | + logger.debug("Dump current mixed precision configuration:") |
| 128 | + logger.debug(tune_cfg) |
| 129 | + self.last_qmodel = self.adaptor.quantize( |
| 130 | + tune_cfg, self.model, self.calib_dataloader, self.q_func) |
| 131 | + assert self.last_qmodel |
| 132 | + # Return the last quantized model as a result. if performance only. |
| 133 | + if self.cfg.tuning.exit_policy.performance_only: |
| 134 | + self.best_qmodel = self.last_qmodel |
| 135 | + self._add_tuning_history(copy.deepcopy(tune_cfg), (-1, [0]), q_config=self.last_qmodel.q_config) |
| 136 | + return |
| 137 | + self.last_tune_cfg = copy.deepcopy(tune_cfg) |
| 138 | + if self.eval_dataloader or self.eval_func: |
| 139 | + q_config = copy.deepcopy(self.last_qmodel.q_config) |
| 140 | + self.last_tune_result = self._evaluate(self.last_qmodel) |
| 141 | + self.cur_best_acc, self.cur_best_tuning_cfg = self.update_best_op_tuning_cfg(op_tuning_cfg) |
| 142 | + need_stop = self.stop(self.cfg.tuning.exit_policy.timeout, trials_count) |
| 143 | + # record the tuning history |
| 144 | + saved_tune_cfg = copy.deepcopy(tune_cfg) |
| 145 | + saved_last_tune_result = copy.deepcopy(self.last_tune_result) |
| 146 | + self._add_tuning_history(saved_tune_cfg, saved_last_tune_result, q_config=q_config) |
| 147 | + else: |
| 148 | + # If the eval_dataloader was not specified under the config yaml file, |
| 149 | + # We only converted the model with customized precisions. |
| 150 | + self.best_qmodel = self.last_qmodel |
| 151 | + need_stop = True |
| 152 | + |
| 153 | + if need_stop: |
| 154 | + break |
| 155 | + |
| 156 | + |
0 commit comments