diff --git a/.azure-pipelines/scripts/ut/env_setup.sh b/.azure-pipelines/scripts/ut/env_setup.sh index 01c1a0b4a91..0aaedcf169b 100644 --- a/.azure-pipelines/scripts/ut/env_setup.sh +++ b/.azure-pipelines/scripts/ut/env_setup.sh @@ -20,7 +20,7 @@ echo "mxnet version is $mxnet_version" if [[ "${tensorflow_version}" == *"-official" ]]; then pip install tensorflow==${tensorflow_version%-official} elif [[ "${tensorflow_version}" == "spr-base" ]]; then - pip install /tf_dataset/tf_binary/221204/tensorflow*.whl + pip install /tf_dataset/tf_binary/221212/tensorflow*.whl if [[ $? -ne 0 ]]; then exit 1 fi diff --git a/neural_compressor/adaptor/tensorflow.py b/neural_compressor/adaptor/tensorflow.py index 22ee3c88fe4..5a594fbacde 100644 --- a/neural_compressor/adaptor/tensorflow.py +++ b/neural_compressor/adaptor/tensorflow.py @@ -83,7 +83,7 @@ def __init__(self, framework_specific_info): from pkg_resources import parse_version import tensorflow as tf - self.new_api = parse_version(tf.version.VERSION) == parse_version('2.11.0202242') + self.new_api = tf.version.VERSION in ('2.11.0202242', '2.11.0202250') self.qdq_enabled = self.itex_mode or self.format == 'QDQ' or self.new_api self.op_wise_sequences = self.query_handler.get_eightbit_patterns(self.qdq_enabled) self.optimization = self.query_handler.get_grappler_optimization_cfg() diff --git a/neural_compressor/adaptor/tensorflow.yaml b/neural_compressor/adaptor/tensorflow.yaml index 188b5ce00e6..cbe91e7d016 100644 --- a/neural_compressor/adaptor/tensorflow.yaml +++ b/neural_compressor/adaptor/tensorflow.yaml @@ -16,7 +16,7 @@ --- - version: - name: ['2.11.0202242'] + name: ['2.11.0202242', '2.11.0202250'] precisions: names: int8, uint8, bf16, fp32 diff --git a/neural_compressor/adaptor/tf_utils/graph_converter.py b/neural_compressor/adaptor/tf_utils/graph_converter.py index caa9afe033d..1f359b41034 100644 --- a/neural_compressor/adaptor/tf_utils/graph_converter.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter.py @@ -36,6 +36,7 @@ from .transform_graph.bias_correction import BiasCorrection from .util import generate_feed_dict, iterator_sess_run,version1_gt_version2,version1_eq_version2 from .util import version1_gte_version2,version1_lte_version2,version1_lt_version2 +from .util import TF_SPR_BASE_VERSIONS from .quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel from .quantize_graph_common import QuantizeGraphHelper from .quantize_graph.qdq.optimize_qdq import OptimizeQDQGraph @@ -268,7 +269,7 @@ def _check_tf_version(self): if version1_eq_version2(tf.version.VERSION, '1.15.0-up3'): is_supported_version = True - if version1_eq_version2(tf.version.VERSION, '2.11.0202242'): + if tf.version.VERSION in TF_SPR_BASE_VERSIONS: is_supported_version = True is_sprbase_version = True diff --git a/neural_compressor/adaptor/tf_utils/graph_converter_without_calib.py b/neural_compressor/adaptor/tf_utils/graph_converter_without_calib.py index f7006b2edbf..a795d5b19b4 100644 --- a/neural_compressor/adaptor/tf_utils/graph_converter_without_calib.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter_without_calib.py @@ -44,7 +44,8 @@ from .graph_rewriter.int8.post_quantized_op_cse import PostCseOptimizer from .graph_rewriter.int8.meta_op_optimizer import MetaInfoChangingMemOpOptimizer from .graph_rewriter.int8.rnn_convert import QuantizedRNNConverter -from .util import version1_gte_version2,version1_gt_version2,version1_eq_version2, version1_lt_version2 +from .util import version1_gte_version2,version1_gt_version2,version1_eq_version2,version1_lt_version2 +from .util import TF_SPR_BASE_VERSIONS TF_SUPPORTED_MAX_VERSION = '2.11.0' TF_SUPPORTED_MIN_VERSION = '1.14.0' @@ -118,8 +119,8 @@ def _check_tf_version(self): if version1_eq_version2(tf.version.VERSION, '1.15.0-up3'): is_supported_version = True - - if version1_eq_version2(tf.version.VERSION, '2.11.0202242'): + + if tf.version.VERSION in TF_SPR_BASE_VERSIONS: is_supported_version = True is_sprbase_version = True diff --git a/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py index 1141674c276..99d529fec97 100644 --- a/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py @@ -20,7 +20,7 @@ from neural_compressor.adaptor.tf_utils.graph_util import GraphAnalyzer from neural_compressor.adaptor.tf_utils.graph_util import GraphRewriterHelper as Helper from tensorflow.python.framework import dtypes -from neural_compressor.adaptor.tf_utils.util import version1_eq_version2 +from neural_compressor.adaptor.tf_utils.util import TF_SPR_BASE_VERSIONS class FuseGeluOptimizer(GraphRewriterBase): # pragma: no cover @@ -29,7 +29,7 @@ class FuseGeluOptimizer(GraphRewriterBase): # pragma: no cover def do_transformation(self): if not (tf.version.VERSION in ('1.15.0-up2','1.15.0-up3') or \ - version1_eq_version2(tf.version.VERSION, '2.11.0202242')): + tf.version.VERSION in TF_SPR_BASE_VERSIONS): return self.model cur_graph = GraphAnalyzer() diff --git a/neural_compressor/adaptor/tf_utils/util.py b/neural_compressor/adaptor/tf_utils/util.py index 8a4ff70beb8..750900c4ab8 100644 --- a/neural_compressor/adaptor/tf_utils/util.py +++ b/neural_compressor/adaptor/tf_utils/util.py @@ -30,6 +30,8 @@ from .graph_util import GraphRewriterHelper from pkg_resources import parse_version +TF_SPR_BASE_VERSIONS = ('2.11.0202242', '2.11.0202250') + def version1_lt_version2(version1, version2): return parse_version(version1) < parse_version(version2) diff --git a/test/itex/test_tensorflow_itex_basic.py b/test/itex/test_tensorflow_itex_basic.py index 407a47e6a6e..34e4aafb34d 100644 --- a/test/itex/test_tensorflow_itex_basic.py +++ b/test/itex/test_tensorflow_itex_basic.py @@ -6,6 +6,7 @@ import shutil import yaml import platform +from tensorflow.python.platform import gfile from neural_compressor.adaptor.tf_utils.util import disable_random from neural_compressor.experimental import Quantization, Benchmark, common from neural_compressor.adaptor.tf_utils.util import version1_lt_version2, version1_gte_version2 @@ -239,6 +240,9 @@ def test_itex_benchmark_gpu(self): relu = tf.nn.relu(add) relu6 = tf.nn.relu6(relu, name='op_to_store') out_name = relu6.name.split(':')[0] + num_of_instance = 1 + cores_per_instance = 1 + log_file = '' with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) output_graph_def = graph_util.convert_variables_to_constants( @@ -257,11 +261,13 @@ def test_itex_benchmark_gpu(self): evaluator.b_dataloader = common.DataLoader(dataset) num_of_instance = evaluator.conf.usr_cfg.evaluation.performance.configs.num_of_instance cores_per_instance = evaluator.conf.usr_cfg.evaluation.performance.configs.cores_per_instance + log_file = '{}_{}_{}.log'.format(num_of_instance, cores_per_instance, 0) + if gfile.Exists(log_file): + os.remove(log_file) evaluator.model = output_graph evaluator('performance') found_multi_instance_log = False - log_file = '{}_{}_{}.log'.format(num_of_instance, cores_per_instance, 0) for file_name in os.listdir(os.getcwd()): if file_name == log_file: found_multi_instance_log = True