From 82e32144276d0e6556d4fe9694b92852820a0dbb Mon Sep 17 00:00:00 2001 From: chensuyue Date: Wed, 13 Dec 2023 23:04:52 +0800 Subject: [PATCH 01/14] version update reuse Signed-off-by: chensuyue --- .../scripts/{ut/ut_fwk_version.sh => fwk_version.sh} | 9 ++------- .../scripts/models/run_mxnet_models_trigger.sh | 4 +++- .../scripts/models/run_onnxrt_models_trigger.sh | 4 +++- .../scripts/models/run_pytorch_models_trigger.sh | 6 ++++-- .../scripts/models/run_tensorflow_models_trigger.sh | 4 +++- .azure-pipelines/scripts/ut/env_setup.sh | 6 ++++++ .azure-pipelines/scripts/ut/run_basic_adaptor.sh | 2 +- .azure-pipelines/scripts/ut/run_basic_api.sh | 2 +- .azure-pipelines/scripts/ut/run_basic_others.sh | 2 +- 9 files changed, 24 insertions(+), 15 deletions(-) rename .azure-pipelines/scripts/{ut/ut_fwk_version.sh => fwk_version.sh} (53%) diff --git a/.azure-pipelines/scripts/ut/ut_fwk_version.sh b/.azure-pipelines/scripts/fwk_version.sh similarity index 53% rename from .azure-pipelines/scripts/ut/ut_fwk_version.sh rename to .azure-pipelines/scripts/fwk_version.sh index e89b9fd4cee..509506966af 100644 --- a/.azure-pipelines/scripts/ut/ut_fwk_version.sh +++ b/.azure-pipelines/scripts/fwk_version.sh @@ -1,9 +1,9 @@ #!/bin/bash -echo "export UT fwk version..." +echo "export FWs version..." test_mode=$1 -if [ "$test_mode" == "coverage" ]; then +if [ "$test_mode" == "coverage" ] || [ "$test_mode" == "latest" ]; then export tensorflow_version='2.13.0' export pytorch_version='2.0.1+cpu' export torchvision_version='0.15.2+cpu' @@ -21,11 +21,6 @@ else export mxnet_version='1.9.1' fi -# import torch before import tensorflow -cd /neural-compressor/test || exit 1 -find . -name "test*.py" | xargs sed -i 's/import tensorflow as tf/import torch; import tensorflow as tf/g' -find . -name "test*.py" | xargs sed -i 's/import tensorflow.compat.v1 as tf/import torch; import tensorflow.compat.v1 as tf/g' -find . -name "test*.py" | xargs sed -i 's/from tensorflow import keras/import torch; from tensorflow import keras/g' diff --git a/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh b/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh index e6ac987d72f..21be4c96031 100644 --- a/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_mxnet_models_trigger.sh @@ -21,8 +21,10 @@ do esac done +echo "specify FWs version..." +source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest' FRAMEWORK="mxnet" -FRAMEWORK_VERSION="1.9.1" +FRAMEWORK_VERSION=${mxnet_version} inc_new_api=false # ======== set up config for mxnet models ======== diff --git a/.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh b/.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh index 267dc4dedb7..d48a115bea5 100644 --- a/.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_onnxrt_models_trigger.sh @@ -21,8 +21,10 @@ do esac done +echo "specify FWs version..." +source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest' FRAMEWORK="onnxrt" -FRAMEWORK_VERSION="1.15.1" +FRAMEWORK_VERSION=${onnxruntime_version} inc_new_api=false # ======== set up config for onnxrt models ======== diff --git a/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh b/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh index 5b4a8afab64..8d61509f011 100644 --- a/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_pytorch_models_trigger.sh @@ -21,9 +21,11 @@ do esac done +echo "specify FWs version..." +source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest' FRAMEWORK="pytorch" -FRAMEWORK_VERSION="2.0.1+cpu" -TORCH_VISION_VERSION="0.15.2+cpu" +FRAMEWORK_VERSION=${pytorch_version} +TORCH_VISION_VERSION=${torchvision_version} inc_new_api=false # ======== set up config for pytorch models ======== diff --git a/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh b/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh index 504c54d16ca..505fad4a6e5 100644 --- a/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh +++ b/.azure-pipelines/scripts/models/run_tensorflow_models_trigger.sh @@ -21,8 +21,10 @@ do esac done +echo "specify FWs version..." +source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest' FRAMEWORK="tensorflow" -FRAMEWORK_VERSION="2.13.0" +FRAMEWORK_VERSION=${tensorflow_version} inc_new_api=false # ======== set up config for tensorflow models ======== diff --git a/.azure-pipelines/scripts/ut/env_setup.sh b/.azure-pipelines/scripts/ut/env_setup.sh index 19f01b8a63c..c5e6990bfcb 100644 --- a/.azure-pipelines/scripts/ut/env_setup.sh +++ b/.azure-pipelines/scripts/ut/env_setup.sh @@ -106,3 +106,9 @@ pip list echo "[DEBUG] list pipdeptree..." pip install pipdeptree pipdeptree + +# import torch before import tensorflow +cd /neural-compressor/test || exit 1 +find . -name "test*.py" | xargs sed -i 's/import tensorflow as tf/import torch; import tensorflow as tf/g' +find . -name "test*.py" | xargs sed -i 's/import tensorflow.compat.v1 as tf/import torch; import tensorflow.compat.v1 as tf/g' +find . -name "test*.py" | xargs sed -i 's/from tensorflow import keras/import torch; from tensorflow import keras/g' diff --git a/.azure-pipelines/scripts/ut/run_basic_adaptor.sh b/.azure-pipelines/scripts/ut/run_basic_adaptor.sh index 8ed2f8c644b..fb578e4c83d 100644 --- a/.azure-pipelines/scripts/ut/run_basic_adaptor.sh +++ b/.azure-pipelines/scripts/ut/run_basic_adaptor.sh @@ -4,7 +4,7 @@ test_case="run basic adaptor" echo "${test_case}" echo "specify fwk version..." -source /neural-compressor/.azure-pipelines/scripts/ut/ut_fwk_version.sh $1 +source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh $1 echo "set up UT env..." bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}" diff --git a/.azure-pipelines/scripts/ut/run_basic_api.sh b/.azure-pipelines/scripts/ut/run_basic_api.sh index 6312a13ea31..c31cd8f9b96 100644 --- a/.azure-pipelines/scripts/ut/run_basic_api.sh +++ b/.azure-pipelines/scripts/ut/run_basic_api.sh @@ -4,7 +4,7 @@ test_case="run basic quantization/benchmark/export/mixed_precision/distillation/ echo "${test_case}" echo "specify fwk version..." -source /neural-compressor/.azure-pipelines/scripts/ut/ut_fwk_version.sh $1 +source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh $1 echo "set up UT env..." bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}" diff --git a/.azure-pipelines/scripts/ut/run_basic_others.sh b/.azure-pipelines/scripts/ut/run_basic_others.sh index 02d77660590..8b8bdd5209c 100644 --- a/.azure-pipelines/scripts/ut/run_basic_others.sh +++ b/.azure-pipelines/scripts/ut/run_basic_others.sh @@ -4,7 +4,7 @@ test_case="run basic others" echo "${test_case}" echo "specify fwk version..." -source /neural-compressor/.azure-pipelines/scripts/ut/ut_fwk_version.sh $1 +source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh $1 echo "set up UT env..." bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}" From eade9564f239cdb4bd6ac55ab25d610bab655dce Mon Sep 17 00:00:00 2001 From: chensuyue Date: Wed, 13 Dec 2023 23:24:45 +0800 Subject: [PATCH 02/14] update FWs version for CI test Signed-off-by: chensuyue --- .azure-pipelines/model-test.yml | 4 ++-- .azure-pipelines/scripts/fwk_version.sh | 16 ++++++++-------- .azure-pipelines/scripts/ut/env_setup.sh | 12 ++++-------- .azure-pipelines/scripts/ut/run_basic_itex.sh | 8 ++++---- .../scripts/ut/run_basic_pt_pruning.sh | 6 +++--- .../scripts/ut/run_basic_tf_pruning.sh | 2 +- .azure-pipelines/template/docker-template.yml | 2 +- .azure-pipelines/template/model-template.yml | 2 +- .azure-pipelines/template/ut-template.yml | 2 +- .azure-pipelines/ut-3x-pt.yml | 2 +- .azure-pipelines/ut-3x-tf.yml | 2 +- .azure-pipelines/ut-basic-no-cover.yml | 19 ++----------------- .azure-pipelines/ut-basic.yml | 2 +- 13 files changed, 30 insertions(+), 49 deletions(-) diff --git a/.azure-pipelines/model-test.yml b/.azure-pipelines/model-test.yml index 55c5216b87a..ebca39ba8f2 100644 --- a/.azure-pipelines/model-test.yml +++ b/.azure-pipelines/model-test.yml @@ -155,9 +155,9 @@ stages: patterns: "**/*_tuning_info.log" path: $(OUT_SCRIPT_PATH) - task: UsePythonVersion@0 - displayName: "Use Python 3.8" + displayName: "Use Python 3.10" inputs: - versionSpec: "3.8" + versionSpec: "3.10" - script: | cd ${OUT_SCRIPT_PATH} mkdir generated diff --git a/.azure-pipelines/scripts/fwk_version.sh b/.azure-pipelines/scripts/fwk_version.sh index 509506966af..eb4ea2e1afc 100644 --- a/.azure-pipelines/scripts/fwk_version.sh +++ b/.azure-pipelines/scripts/fwk_version.sh @@ -4,6 +4,14 @@ echo "export FWs version..." test_mode=$1 if [ "$test_mode" == "coverage" ] || [ "$test_mode" == "latest" ]; then + export tensorflow_version='2.14.0' + export pytorch_version='2.1.0+cpu' + export torchvision_version='0.16.0+cpu' + export ipex_version='2.1.0+cpu' + export onnx_version='1.15.0' + export onnxruntime_version='1.16.3' + export mxnet_version='1.9.1' +else export tensorflow_version='2.13.0' export pytorch_version='2.0.1+cpu' export torchvision_version='0.15.2+cpu' @@ -11,14 +19,6 @@ if [ "$test_mode" == "coverage" ] || [ "$test_mode" == "latest" ]; then export onnx_version='1.14.1' export onnxruntime_version='1.15.1' export mxnet_version='1.9.1' -else - export tensorflow_version='2.12.0' - export pytorch_version='1.13.0+cpu' - export torchvision_version='0.14.0+cpu' - export ipex_version='1.13.0+cpu' - export onnx_version='1.13.1' - export onnxruntime_version='1.14.1' - export mxnet_version='1.9.1' fi diff --git a/.azure-pipelines/scripts/ut/env_setup.sh b/.azure-pipelines/scripts/ut/env_setup.sh index c5e6990bfcb..ca9d4494587 100644 --- a/.azure-pipelines/scripts/ut/env_setup.sh +++ b/.azure-pipelines/scripts/ut/env_setup.sh @@ -48,19 +48,15 @@ if [[ "${torchvision_version}" != "" ]]; then pip install torchvision==${torchvision_version} -f https://download.pytorch.org/whl/torch_stable.html fi -if [[ "${ipex_version}" == "1.13.0+cpu" ]]; then - ipex_whl="https://github.com/intel/intel-extension-for-pytorch/releases/download/v1.13.0%2Bcpu/intel_extension_for_pytorch-1.13.0-cp310-cp310-manylinux2014_x86_64.whl" - pip install $ipex_whl -elif [[ "${ipex_version}" == "2.0.0+cpu" ]]; then +if [[ "${ipex_version}" == "2.0.0+cpu" ]]; then ipex_whl="https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/intel_extension_for_pytorch-2.0.0%2Bcpu-cp310-cp310-linux_x86_64.whl" pip install $ipex_whl elif [[ "${ipex_version}" == "2.0.1+cpu" ]]; then ipex_whl="https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/intel_extension_for_pytorch-2.0.100%2Bcpu-cp310-cp310-linux_x86_64.whl" pip install $ipex_whl -elif [[ "${ipex_version}" == "2.1.0" ]]; then - pip install /tf_dataset/pt_binary/ww32/torch-*.whl - pip install /tf_dataset/pt_binary/ww32/torchvision-*.whl - pip install /tf_dataset/pt_binary/ww32/intel_extension_for_pytorch-*.whl +elif [[ "${ipex_version}" == "2.1.0+cpu" ]]; then + ipex_whl="https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/cpu/intel_extension_for_pytorch-2.1.0%2Bcpu-cp310-cp310-linux_x86_64.whl" + pip install $ipex_whl fi if [[ "${onnx_version}" != "" ]]; then diff --git a/.azure-pipelines/scripts/ut/run_basic_itex.sh b/.azure-pipelines/scripts/ut/run_basic_itex.sh index 7ec4936e182..b908fd65c78 100644 --- a/.azure-pipelines/scripts/ut/run_basic_itex.sh +++ b/.azure-pipelines/scripts/ut/run_basic_itex.sh @@ -4,10 +4,10 @@ test_case="run basic itex" echo "${test_case}" echo "specify fwk version..." -export itex_version='1.1.0' -export tensorflow_version='2.11.0-official' -export onnx_version='1.13.0' -export onnxruntime_version='1.13.1' +export itex_version='2.14.0.1' +export tensorflow_version='2.14.0-official' +export onnx_version='1.15.0' +export onnxruntime_version='1.16.3' echo "set up UT env..." bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}" diff --git a/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh b/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh index 659a7e63522..436639fd556 100644 --- a/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh +++ b/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh @@ -4,9 +4,9 @@ test_case="run basic pt pruning" echo "${test_case}" echo "specify fwk version..." -export pytorch_version='2.0.0+cpu' -export torchvision_version='0.15.1+cpu' -export ipex_version='2.0.0+cpu' +export pytorch_version='2.1.0+cpu' +export torchvision_version='0.16.0+cpu' +export ipex_version='2.1.0+cpu' echo "set up UT env..." bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}" diff --git a/.azure-pipelines/scripts/ut/run_basic_tf_pruning.sh b/.azure-pipelines/scripts/ut/run_basic_tf_pruning.sh index 10f3f56140b..fc92bde1ad8 100644 --- a/.azure-pipelines/scripts/ut/run_basic_tf_pruning.sh +++ b/.azure-pipelines/scripts/ut/run_basic_tf_pruning.sh @@ -4,7 +4,7 @@ test_case="run basic tf pruning" echo "${test_case}" echo "specify fwk version..." -export tensorflow_version='2.12.0' +export tensorflow_version='2.14.0' echo "set up UT env..." bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}" diff --git a/.azure-pipelines/template/docker-template.yml b/.azure-pipelines/template/docker-template.yml index 4944d04b860..bf0ad790ca6 100644 --- a/.azure-pipelines/template/docker-template.yml +++ b/.azure-pipelines/template/docker-template.yml @@ -7,7 +7,7 @@ parameters: default: "neural-compressor" - name: repoTag type: string - default: "py38" + default: "py310" - name: dockerFileName type: string default: "Dockerfile" diff --git a/.azure-pipelines/template/model-template.yml b/.azure-pipelines/template/model-template.yml index a9b2098cc7b..874dd88aa9c 100644 --- a/.azure-pipelines/template/model-template.yml +++ b/.azure-pipelines/template/model-template.yml @@ -15,7 +15,7 @@ steps: parameters: dockerConfigName: "commonDockerConfig" repoName: "neural-compressor" - repoTag: "py38" + repoTag: "py310" dockerFileName: "Dockerfile" containerName: ${{ parameters.modelContainerName }} diff --git a/.azure-pipelines/template/ut-template.yml b/.azure-pipelines/template/ut-template.yml index e78d3514831..7b771a53161 100644 --- a/.azure-pipelines/template/ut-template.yml +++ b/.azure-pipelines/template/ut-template.yml @@ -23,7 +23,7 @@ steps: parameters: dockerConfigName: ${{ parameters.dockerConfigName }} repoName: "neural-compressor" - repoTag: "py38" + repoTag: "py310" dockerFileName: "Dockerfile" containerName: ${{ parameters.utContainerName }} repo: ${{ parameters.repo }} diff --git a/.azure-pipelines/ut-3x-pt.yml b/.azure-pipelines/ut-3x-pt.yml index 6002187543b..3a83c1e7d09 100644 --- a/.azure-pipelines/ut-3x-pt.yml +++ b/.azure-pipelines/ut-3x-pt.yml @@ -18,7 +18,7 @@ pool: ICX-16C variables: IMAGE_NAME: "neural-compressor" - IMAGE_TAG: "py38" + IMAGE_TAG: "py310" UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir ARTIFACT_NAME: "UT_coverage_report_3x_pt" diff --git a/.azure-pipelines/ut-3x-tf.yml b/.azure-pipelines/ut-3x-tf.yml index e76d831746b..1824e350786 100644 --- a/.azure-pipelines/ut-3x-tf.yml +++ b/.azure-pipelines/ut-3x-tf.yml @@ -18,7 +18,7 @@ pool: ICX-16C variables: IMAGE_NAME: "neural-compressor" - IMAGE_TAG: "py38" + IMAGE_TAG: "py310" UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir ARTIFACT_NAME: "UT_coverage_report_3x_tf" diff --git a/.azure-pipelines/ut-basic-no-cover.yml b/.azure-pipelines/ut-basic-no-cover.yml index 9fecd40740b..875c03887b8 100644 --- a/.azure-pipelines/ut-basic-no-cover.yml +++ b/.azure-pipelines/ut-basic-no-cover.yml @@ -23,7 +23,7 @@ pool: ICX-16C variables: IMAGE_NAME: "neural-compressor" - IMAGE_TAG: "py38" + IMAGE_TAG: "py310" UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir ARTIFACT_NAME: "UT_report" @@ -103,19 +103,4 @@ stages: uploadPath: $(UPLOAD_PATH) utArtifact: "ut-others" utTestMode: "no-coverage" - utContainerName: "utTest-no-coverage" - -# - stage: IPEX -# displayName: Unit Test IPEX 2.1 for SQ algo -# dependsOn: [] -# jobs: -# - job: -# steps: -# - template: template/ut-template.yml -# parameters: -# dockerConfigName: "commonDockerConfig" -# utScriptFileName: "run_basic_v2.1_ipex" -# uploadPath: $(UPLOAD_PATH) -# utArtifact: "ut-ipex_v2.1" -# utTestMode: "no-coverage" -# utContainerName: "utTest-no-coverage" + utContainerName: "utTest-no-coverage" \ No newline at end of file diff --git a/.azure-pipelines/ut-basic.yml b/.azure-pipelines/ut-basic.yml index afc383ce42e..18c8769f868 100644 --- a/.azure-pipelines/ut-basic.yml +++ b/.azure-pipelines/ut-basic.yml @@ -23,7 +23,7 @@ pool: ICX-16C variables: IMAGE_NAME: "neural-compressor" - IMAGE_TAG: "py38" + IMAGE_TAG: "py310" UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir ARTIFACT_NAME: "UT_coverage_report" From 579fdd8ffff9c6e0ff82c268237fa567c5698fca Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 15:28:32 +0000 Subject: [PATCH 03/14] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .azure-pipelines/ut-basic-no-cover.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/ut-basic-no-cover.yml b/.azure-pipelines/ut-basic-no-cover.yml index 875c03887b8..06d3d418f03 100644 --- a/.azure-pipelines/ut-basic-no-cover.yml +++ b/.azure-pipelines/ut-basic-no-cover.yml @@ -103,4 +103,4 @@ stages: uploadPath: $(UPLOAD_PATH) utArtifact: "ut-others" utTestMode: "no-coverage" - utContainerName: "utTest-no-coverage" \ No newline at end of file + utContainerName: "utTest-no-coverage" From 3ab9321272995f336df294651a2feb6ad18b4561 Mon Sep 17 00:00:00 2001 From: chensuyue Date: Thu, 14 Dec 2023 00:34:49 +0800 Subject: [PATCH 04/14] trigger ut test Signed-off-by: chensuyue --- .azure-pipelines/ut-basic-no-cover.yml | 1 + .azure-pipelines/ut-basic.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.azure-pipelines/ut-basic-no-cover.yml b/.azure-pipelines/ut-basic-no-cover.yml index 875c03887b8..7ec0a80a767 100644 --- a/.azure-pipelines/ut-basic-no-cover.yml +++ b/.azure-pipelines/ut-basic-no-cover.yml @@ -12,6 +12,7 @@ pr: - test - setup.py - requirements.txt + - .azure-pipelines/scripts/ut exclude: - test/neural_coder - test/3x diff --git a/.azure-pipelines/ut-basic.yml b/.azure-pipelines/ut-basic.yml index 18c8769f868..98aec0732ab 100644 --- a/.azure-pipelines/ut-basic.yml +++ b/.azure-pipelines/ut-basic.yml @@ -12,6 +12,7 @@ pr: - test - setup.py - requirements.txt + - .azure-pipelines/scripts/ut exclude: - test/neural_coder - test/3x From 3ae345a4f3262ec2cec00df099055d821b24504b Mon Sep 17 00:00:00 2001 From: chensuyue Date: Thu, 14 Dec 2023 09:54:29 +0800 Subject: [PATCH 05/14] update ut test Signed-off-by: chensuyue --- .azure-pipelines/scripts/fwk_version.sh | 2 +- .azure-pipelines/scripts/ut/env_setup.sh | 11 +++++++---- .azure-pipelines/scripts/ut/run_basic_api.sh | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.azure-pipelines/scripts/fwk_version.sh b/.azure-pipelines/scripts/fwk_version.sh index eb4ea2e1afc..cf8530d6b9c 100644 --- a/.azure-pipelines/scripts/fwk_version.sh +++ b/.azure-pipelines/scripts/fwk_version.sh @@ -8,7 +8,7 @@ if [ "$test_mode" == "coverage" ] || [ "$test_mode" == "latest" ]; then export pytorch_version='2.1.0+cpu' export torchvision_version='0.16.0+cpu' export ipex_version='2.1.0+cpu' - export onnx_version='1.15.0' + export onnx_version='1.14.1' export onnxruntime_version='1.16.3' export mxnet_version='1.9.1' else diff --git a/.azure-pipelines/scripts/ut/env_setup.sh b/.azure-pipelines/scripts/ut/env_setup.sh index ca9d4494587..859a7d32117 100644 --- a/.azure-pipelines/scripts/ut/env_setup.sh +++ b/.azure-pipelines/scripts/ut/env_setup.sh @@ -104,7 +104,10 @@ pip install pipdeptree pipdeptree # import torch before import tensorflow -cd /neural-compressor/test || exit 1 -find . -name "test*.py" | xargs sed -i 's/import tensorflow as tf/import torch; import tensorflow as tf/g' -find . -name "test*.py" | xargs sed -i 's/import tensorflow.compat.v1 as tf/import torch; import tensorflow.compat.v1 as tf/g' -find . -name "test*.py" | xargs sed -i 's/from tensorflow import keras/import torch; from tensorflow import keras/g' +if [[ $(echo "${test_case}" | grep -c "api") != 0 ]] || [[ $(echo "${test_case}" | grep -c "others") != 0 ]] || [[ $(echo "${test_case}" | grep -c "adaptor") != 0 ]]; then + cd /neural-compressor/test || exit 1 + find . -name "test*.py" | xargs sed -i 's/import tensorflow as tf/import torch; import tensorflow as tf/g' + find . -name "test*.py" | xargs sed -i 's/import tensorflow.compat.v1 as tf/import torch; import tensorflow.compat.v1 as tf/g' + find . -name "test*.py" | xargs sed -i 's/from tensorflow import keras/import torch; from tensorflow import keras/g' +fi + diff --git a/.azure-pipelines/scripts/ut/run_basic_api.sh b/.azure-pipelines/scripts/ut/run_basic_api.sh index c31cd8f9b96..4a014a2eecf 100644 --- a/.azure-pipelines/scripts/ut/run_basic_api.sh +++ b/.azure-pipelines/scripts/ut/run_basic_api.sh @@ -1,6 +1,6 @@ #!/bin/bash python -c "import neural_compressor as nc;print(nc.version.__version__)" -test_case="run basic quantization/benchmark/export/mixed_precision/distillation/scheduler/nas" +test_case="run basic api quantization/benchmark/export/mixed_precision/distillation/scheduler/nas" echo "${test_case}" echo "specify fwk version..." From a917baa4556edffdd85e58f6db2f96ccdd4c6dc9 Mon Sep 17 00:00:00 2001 From: chensuyue Date: Mon, 18 Dec 2023 17:32:21 +0800 Subject: [PATCH 06/14] update fw version Signed-off-by: chensuyue --- .azure-pipelines/scripts/ut/run_basic_adaptor_tfnewapi.sh | 2 +- .azure-pipelines/scripts/ut/run_basic_itex.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/scripts/ut/run_basic_adaptor_tfnewapi.sh b/.azure-pipelines/scripts/ut/run_basic_adaptor_tfnewapi.sh index 094ce9b38d0..9373631f5f7 100644 --- a/.azure-pipelines/scripts/ut/run_basic_adaptor_tfnewapi.sh +++ b/.azure-pipelines/scripts/ut/run_basic_adaptor_tfnewapi.sh @@ -1,6 +1,6 @@ #!/bin/bash python -c "import neural_compressor as nc;print(nc.version.__version__)" -test_case="run basic adaptor tfnewapi" +test_case="run basic tfnewapi" echo "${test_case}" echo "specify fwk version..." diff --git a/.azure-pipelines/scripts/ut/run_basic_itex.sh b/.azure-pipelines/scripts/ut/run_basic_itex.sh index b908fd65c78..1c227d5e095 100644 --- a/.azure-pipelines/scripts/ut/run_basic_itex.sh +++ b/.azure-pipelines/scripts/ut/run_basic_itex.sh @@ -6,7 +6,7 @@ echo "${test_case}" echo "specify fwk version..." export itex_version='2.14.0.1' export tensorflow_version='2.14.0-official' -export onnx_version='1.15.0' +export onnx_version='1.14.1' export onnxruntime_version='1.16.3' echo "set up UT env..." From 59849156e14e790f0c89e5c1463df53a0aa9428f Mon Sep 17 00:00:00 2001 From: chensuyue Date: Mon, 18 Dec 2023 21:38:19 +0800 Subject: [PATCH 07/14] fix modify scope Signed-off-by: chensuyue --- .azure-pipelines/scripts/ut/env_setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/scripts/ut/env_setup.sh b/.azure-pipelines/scripts/ut/env_setup.sh index 859a7d32117..acd678c0505 100644 --- a/.azure-pipelines/scripts/ut/env_setup.sh +++ b/.azure-pipelines/scripts/ut/env_setup.sh @@ -104,7 +104,7 @@ pip install pipdeptree pipdeptree # import torch before import tensorflow -if [[ $(echo "${test_case}" | grep -c "api") != 0 ]] || [[ $(echo "${test_case}" | grep -c "others") != 0 ]] || [[ $(echo "${test_case}" | grep -c "adaptor") != 0 ]]; then +if [[ $(echo "${test_case}" | grep -c "run basic api") != 0 ]] || [[ $(echo "${test_case}" | grep -c "run basic others") != 0 ]] || [[ $(echo "${test_case}" | grep -c "run basic adaptor") != 0 ]]; then cd /neural-compressor/test || exit 1 find . -name "test*.py" | xargs sed -i 's/import tensorflow as tf/import torch; import tensorflow as tf/g' find . -name "test*.py" | xargs sed -i 's/import tensorflow.compat.v1 as tf/import torch; import tensorflow.compat.v1 as tf/g' From 9721dc763d10a79e3a01d14daa36728a2a8cd638 Mon Sep 17 00:00:00 2001 From: xinhe Date: Tue, 19 Dec 2023 10:07:08 +0800 Subject: [PATCH 08/14] [bug fix] Torch/IPEX 2.1.0][CI] UT issue (#1477) Signed-off-by: Xin He --- neural_compressor/utils/pytorch.py | 3 +++ test/model/test_model_pytorch.py | 28 ++++++++++++++-------------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/neural_compressor/utils/pytorch.py b/neural_compressor/utils/pytorch.py index 98d79570c6a..ad2d090b0a8 100644 --- a/neural_compressor/utils/pytorch.py +++ b/neural_compressor/utils/pytorch.py @@ -507,6 +507,9 @@ def recover_model_from_json(model, json_file_path, example_inputs): if isinstance(example_inputs, dict): model(**example_inputs) model(**example_inputs) + elif isinstance(example_inputs, tuple) or isinstance(example_inputs, list): + model(*example_inputs) + model(*example_inputs) else: model(example_inputs) model(example_inputs) diff --git a/test/model/test_model_pytorch.py b/test/model/test_model_pytorch.py index f0990b6558c..34ac9f51596 100644 --- a/test/model/test_model_pytorch.py +++ b/test/model/test_model_pytorch.py @@ -114,19 +114,19 @@ def test_WeightOnlyLinear(self): for dtype in compression_dtype: new_model = Model() inc_model = INCModel(new_model) - inc_model.export_compressed_model( + compressed_model = inc_model.export_compressed_model( qweight_config_path="saved/qconfig.json", compression_dtype=dtype, scale_dtype=torch.float32, use_optimum_format=False, ) out2 = q_model(input) - torch.save(inc_model.state_dict(), "saved/tmp.pt") + torch.save(compressed_model.state_dict(), "saved/tmp.pt") model_size2 = os.path.getsize("saved/tmp.pt") / 1024 print("WeightOnlyLinear Model size:{:.3f}M".format(model_size2)) - self.assertTrue(isinstance(inc_model.model.fc1, WeightOnlyLinear)) - self.assertTrue(inc_model.model.fc1.qweight.dtype == dtype) - self.assertTrue(inc_model.model.fc1.scales.dtype == torch.float32) + self.assertTrue(isinstance(compressed_model.fc1, WeightOnlyLinear)) + self.assertTrue(compressed_model.fc1.qweight.dtype == dtype) + self.assertTrue(compressed_model.fc1.scales.dtype == torch.float32) self.assertTrue(model_size1 / model_size2 > 2) self.assertTrue(torch.all(torch.isclose(out1, out2, atol=5e-1))) @@ -135,35 +135,35 @@ def test_WeightOnlyLinear(self): for dim in compress_dims: new_model = Model() inc_model = INCModel(new_model) - inc_model.export_compressed_model( + compressed_model = inc_model.export_compressed_model( qweight_config_path="saved/qconfig.json", compression_dim=dim, use_optimum_format=False, ) out2 = q_model(input) - torch.save(inc_model.state_dict(), "saved/tmp.pt") + torch.save(compressed_model.state_dict(), "saved/tmp.pt") model_size2 = os.path.getsize("saved/tmp.pt") / 1024 print("WeightOnlyLinear Model size:{:.3f}M".format(model_size2)) - self.assertTrue(isinstance(inc_model.model.fc1, WeightOnlyLinear)) + self.assertTrue(isinstance(compressed_model.fc1, WeightOnlyLinear)) if dim == 1: - self.assertTrue(inc_model.model.fc1.qweight.shape[0] == inc_model.model.fc1.out_features) + self.assertTrue(compressed_model.fc1.qweight.shape[1] != compressed_model.fc1.in_features) else: - self.assertTrue(inc_model.model.fc1.qweight.shape[1] == inc_model.model.fc1.in_features) + self.assertTrue(compressed_model.fc1.qweight.shape[0] != compressed_model.fc1.out_features) self.assertTrue(model_size1 / model_size2 > 2) self.assertTrue(torch.all(torch.isclose(out1, out2, atol=5e-1))) # test half dtype new_model = Model() inc_model = INCModel(new_model) - inc_model.export_compressed_model( + compressed_model = inc_model.export_compressed_model( qweight_config_path="saved/qconfig.json", ) out2 = q_model(input) - torch.save(inc_model.state_dict(), "saved/tmp.pt") + torch.save(compressed_model.state_dict(), "saved/tmp.pt") model_size2 = os.path.getsize("saved/tmp.pt") / 1024 print("WeightOnlyLinear Model size:{:.3f}M".format(model_size2)) - self.assertTrue(isinstance(inc_model.model.fc1, WeightOnlyLinear)) - self.assertTrue(inc_model.model.fc1.scales.dtype == torch.float16) + self.assertTrue(isinstance(compressed_model.fc1, WeightOnlyLinear)) + self.assertTrue(compressed_model.fc1.scales.dtype == torch.float16) self.assertTrue(model_size1 / model_size2 > 2) self.assertTrue(torch.all(torch.isclose(out1, out2, atol=5e-1))) From 3fe8bd48f3c4f62addad6f0ff35e2ff5d4b84f9f Mon Sep 17 00:00:00 2001 From: chensuyue Date: Wed, 20 Dec 2023 20:54:10 +0800 Subject: [PATCH 09/14] fix horovod install issue in CI Signed-off-by: chensuyue --- .azure-pipelines/scripts/ut/env_setup.sh | 7 +- .../scripts/ut/run_basic_others.sh | 1 + .../scripts/ut/run_basic_pt_pruning.sh | 1 + .../scripts/ut/run_basic_tf_pruning.sh | 1 + test/distributed/test_distributed_metrics.py | 996 ------------------ test/distributed/test_distributed_pt_train.py | 8 - 6 files changed, 9 insertions(+), 1005 deletions(-) delete mode 100644 test/distributed/test_distributed_metrics.py diff --git a/.azure-pipelines/scripts/ut/env_setup.sh b/.azure-pipelines/scripts/ut/env_setup.sh index acd678c0505..699b307153d 100644 --- a/.azure-pipelines/scripts/ut/env_setup.sh +++ b/.azure-pipelines/scripts/ut/env_setup.sh @@ -84,7 +84,6 @@ fi # install special test env requirements # common deps pip install cmake -pip install horovod pip install transformers if [[ $(echo "${test_case}" | grep -c "others") != 0 ]];then @@ -93,6 +92,12 @@ elif [[ $(echo "${test_case}" | grep -c "nas") != 0 ]]; then pip install dynast==1.6.0rc1 elif [[ $(echo "${test_case}" | grep -c "tf pruning") != 0 ]]; then pip install tensorflow-addons + # Workaround + # horovod can't be install in the env with TF and PT together + # so test distribute cases in the env with single fw installed + pip install horovod +elif [[ $(echo "${test_case}" | grep -c "pt pruning") != 0 ]]; then + pip install horovod fi # test deps pip install coverage diff --git a/.azure-pipelines/scripts/ut/run_basic_others.sh b/.azure-pipelines/scripts/ut/run_basic_others.sh index 8b8bdd5209c..e5abf4a293f 100644 --- a/.azure-pipelines/scripts/ut/run_basic_others.sh +++ b/.azure-pipelines/scripts/ut/run_basic_others.sh @@ -26,6 +26,7 @@ sed -i '/ distillation\//d' run.sh sed -i '/ scheduler\//d' run.sh sed -i '/ nas\//d' run.sh sed -i '/ 3x\//d' run.sh +sed -i '/ distributed\//d' run.sh echo "copy model for dynas..." mkdir -p .torch/ofa_nets || true diff --git a/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh b/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh index 436639fd556..590967409e6 100644 --- a/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh +++ b/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh @@ -14,6 +14,7 @@ export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.f lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))') cd /neural-compressor/test || exit 1 find ./pruning_with_pt -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh +find ./distributed -name "test_distributed_pt_train.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh LOG_DIR=/neural-compressor/log_dir mkdir -p ${LOG_DIR} diff --git a/.azure-pipelines/scripts/ut/run_basic_tf_pruning.sh b/.azure-pipelines/scripts/ut/run_basic_tf_pruning.sh index fc92bde1ad8..60d126865c4 100644 --- a/.azure-pipelines/scripts/ut/run_basic_tf_pruning.sh +++ b/.azure-pipelines/scripts/ut/run_basic_tf_pruning.sh @@ -12,6 +12,7 @@ export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.f lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))') cd /neural-compressor/test || exit 1 find ./pruning_with_tf -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh +find ./distributed -name "test_distributed_tf_dataloader.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh LOG_DIR=/neural-compressor/log_dir mkdir -p ${LOG_DIR} diff --git a/test/distributed/test_distributed_metrics.py b/test/distributed/test_distributed_metrics.py deleted file mode 100644 index b42b194985a..00000000000 --- a/test/distributed/test_distributed_metrics.py +++ /dev/null @@ -1,996 +0,0 @@ -"""Tests for the distributed metrics.""" -import os -import re -import shutil -import signal -import subprocess -import sys -import unittest - -import cpuinfo -import tensorflow as tf - -from neural_compressor.adaptor.tf_utils.util import version1_gte_version2, version1_lt_version2 -from neural_compressor.utils import logger - - -def build_fake_ut(): - fake_ut = """ -import numpy as np -import unittest -import horovod.tensorflow as hvd -import os -import sys -import cpuinfo -import json -import tensorflow as tf -from neural_compressor.metric import METRICS -from neural_compressor.experimental.metric.f1 import evaluate -from neural_compressor.experimental.metric.evaluate_squad import evaluate as evaluate_squad -from neural_compressor.experimental.metric import bleu -from neural_compressor.utils import logger - -tf.compat.v1.enable_eager_execution() - -class TestMetrics(unittest.TestCase): - @classmethod - def setUpClass(cls): - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - hvd.init() - if hvd.rank() == 0: - if os.path.exists('anno_0.yaml'): - os.remove('anno_0.yaml') - if os.path.exists('anno_1.yaml'): - os.remove('anno_1.yaml') - if os.path.exists('anno_2.yaml'): - os.remove('anno_2.yaml') - while hvd.rank() == 1: - if not os.path.exists('anno_0.yaml') \\ - and not os.path.exists('anno_1.yaml') \\ - and not os.path.exists('anno_2.yaml'): - break - - @classmethod - def tearDownClass(cls): - if hvd.rank() == 1: - if os.path.exists('anno_0.yaml'): - os.remove('anno_0.yaml') - if os.path.exists('anno_1.yaml'): - os.remove('anno_1.yaml') - if os.path.exists('anno_2.yaml'): - os.remove('anno_2.yaml') - - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\\n") - - def test_mIOU(self): - metrics = METRICS('tensorflow') - miou = metrics['mIOU']() - miou.hvd = hvd - if hvd.rank() == 0: - preds = np.array([0]) - labels = np.array([0]) - else: - preds = np.array([0, 1, 1]) - labels = np.array([1, 0, 1]) - miou.update(preds, labels) - self.assertAlmostEqual(miou.result(), 0.33333334) - - miou.reset() - if hvd.rank() == 0: - preds = np.array([0, 0]) - labels = np.array([0, 1]) - else: - preds = np.array([1, 1]) - labels = np.array([1, 1]) - miou.update(preds, labels) - self.assertAlmostEqual(miou.result(), 0.58333333) - - def test_onnxrt_GLUE(self): - metrics = METRICS('onnxrt_qlinearops') - glue = metrics['GLUE']('mrpc') - glue.hvd = hvd - preds = [np.array( - [[-3.2443411, 3.0909934], - [2.0500996, -2.3100944], - [1.870293 , -2.0741048], - [-2.8377204, 2.617834], - [2.008347 , -2.0215416], - [-2.9693947, 2.7782154], - [-2.9949608, 2.7887983], - [-3.0623112, 2.8748074]]) - ] - labels = [np.array([1, 0, 0, 1, 0, 1, 0, 1])] - self.assertRaises(NotImplementedError, glue.update, preds, labels) - preds_2 = [np.array( - [[-3.1296735, 2.8356276], - [-3.172515 , 2.9173899], - [-3.220131 , 3.0916846], - [2.1452675, -1.9398905], - [1.5475761, -1.9101546], - [-2.9797182, 2.721741], - [-3.2052834, 2.9934788], - [-2.7451005, 2.622343]]) - ] - labels_2 = [np.array([1, 1, 1, 0, 0, 1, 1, 1])] - self.assertRaises(NotImplementedError, glue.update, preds_2, labels_2) - glue.reset() - self.assertRaises(NotImplementedError, glue.update, preds, labels) - - def test_tensorflow_F1(self): - metrics = METRICS('tensorflow') - F1 = metrics['F1']() - F1.hvd = hvd - if hvd.rank() == 0: - preds = [1, 1, 1, 1] - labels = [0, 1, 1, 1] - else: - preds = [1, 1, 1, 1, 1, 1] - labels = [1, 1, 1, 1, 1, 1] - - F1.update(preds, labels) - self.assertEqual(F1.result(), 0.9) - - def test_squad_evaluate(self): - evaluate.hvd = hvd - label = [{'paragraphs':\\ - [{'qas':[{'answers': [{'answer_start': 177, 'text': 'Denver Broncos'}, \\ - {'answer_start': 177, 'text': 'Denver Broncos'}, \\ - {'answer_start': 177, 'text': 'Denver Broncos'}], \\ - 'question': 'Which NFL team represented the AFC at Super Bowl 50?', \\ - 'id': '56be4db0acb8001400a502ec'}]}]}] - preds = {'56be4db0acb8001400a502ec': 'Denver Broncos'} - f1 = evaluate(preds, label) - self.assertEqual(f1, 100.) - dataset = [{'paragraphs':\\ - [{'qas':[{'answers': [{'answer_start': 177, 'text': 'Denver Broncos'}, \\ - {'answer_start': 177, 'text': 'Denver Broncos'}, \\ - {'answer_start': 177, 'text': 'Denver Broncos'}], \\ - 'question': 'Which NFL team represented the AFC at Super Bowl 50?', \\ - 'id': '56be4db0acb8001400a502ec'}]}]}] - predictions = {'56be4db0acb8001400a502ec': 'Denver Broncos'} - f1_squad = evaluate_squad(dataset,predictions) - self.assertEqual(f1_squad['f1'], 100.) - self.assertEqual(f1_squad['exact_match'], 100.) - - def test_pytorch_F1(self): - metrics = METRICS('pytorch') - F1 = metrics['F1']() - F1.hvd = hvd - F1.reset() - if hvd.rank() == 0: - preds = [1] - labels = [2] - else: - preds = [1] - labels = [1, 1] - F1.update(preds, labels) - self.assertEqual(F1.result(), 0.8) - - def test_tensorflow_topk(self): - metrics = METRICS('tensorflow') - top1 = metrics['topk']() - top1.reset() - self.assertEqual(top1.result(), 0) - top2 = metrics['topk'](k=2) - top3 = metrics['topk'](k=3) - top1.hvd = hvd - top2.hvd = hvd - top3.hvd = hvd - - if hvd.rank() == 0: - predicts = [[0, 0.2, 0.9, 0.3]] - labels = [[0, 1, 0, 0]] - single_predict = [0, 0.2, 0.9, 0.3] - sparse_labels = [2] - single_label = 2 - else: - predicts = [[0, 0.9, 0.8, 0]] - labels = [[0, 0, 1, 0]] - single_predict = [0, 0.2, 0.9, 0.3] - sparse_labels = [2] - single_label = 2 - - # test functionality of one-hot label - top1.update(predicts, labels) - top2.update(predicts, labels) - top3.update(predicts, labels) - self.assertEqual(top1.result(), 0.0) - self.assertEqual(top2.result(), 0.5) - self.assertEqual(top3.result(), 1) - - # test functionality of sparse label - top1.reset() - top2.reset() - top3.reset() - top1.update(predicts, sparse_labels) - top2.update(predicts, sparse_labels) - top3.update(predicts, sparse_labels) - self.assertEqual(top1.result(), 0.5) - self.assertEqual(top2.result(), 1) - self.assertEqual(top3.result(), 1) - - # test functionality of single label - top1.reset() - top2.reset() - top3.reset() - top1.update(single_predict, single_label) - top2.update(single_predict, single_label) - top3.update(single_predict, single_label) - self.assertEqual(top1.result(), 1) - self.assertEqual(top2.result(), 1) - self.assertEqual(top3.result(), 1) - - def test_tensorflow_mAP(self): - metrics = METRICS('tensorflow') - fake_dict = 'dog: 1' - if hvd.rank() == 0: - with open('anno_0.yaml', 'w', encoding = "utf-8") as f: - f.write(fake_dict) - while True: - file_exists = hvd.allgather_object(os.path.exists('anno_0.yaml')) - if file_exists == [True, True]: - break - mAP = metrics['mAP']('anno_0.yaml') - mAP.hvd = hvd - self.assertEqual(mAP.category_map_reverse['dog'], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([['a', 'b']]), - np.array([[]]), - np.array([b'000000397133.jpg']) - ] - self.assertRaises(NotImplementedError, mAP.update, detection, ground_truth) - - detection = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787]]), - np.array([[ 1., 1.]]) - ] - ground_truth = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[b'dog', b'dog']]), - np.array([[]]), - np.array([b'000000397133.jpg']) - ] - self.assertRaises(NotImplementedError, mAP.update, detection, ground_truth) - mAP.result() - self.assertEqual(format(mAP.result(), '.5f'), - '0.00000') - - detection = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - detection_2 = [ - np.array([[8]]), - np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ], - [0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ], - [0.57902956, 0.39394334, 0.8342961 , 0.5577197 ], - [0.7949219 , 0.6513021 , 0.8472295 , 0.68427753], - [0.809729 , 0.5947042 , 0.8539927 , 0.62916476], - [0.7258591 , 0.08907133, 1. , 0.86224866], - [0.43100086, 0.37782395, 0.8384069 , 0.5616918 ], - [0.32005906, 0.84334356, 1. , 1. ]]]), - np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\\ - 0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]), - np.array([[55., 55., 79., 55., 55., 67., 79., 82.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.56262296, 0.0015625 , 1. , 0.5431719 ], - [0.16374707, 0.60728127, 0.813911 , 0.77823436], - [0.5841452 , 0.21182813, 0.65156907, 0.24670312], - [0.8056206 , 0.048875 , 0.90124124, 0.1553125 ], - [0.6729742 , 0.09317187, 0.7696956 , 0.21203125], - [0.3848478 , 0.002125 , 0.61522245, 0.303 ], - [0.61548007, 0. , 0.7015925 , 0.097125 ], - [0.6381967 , 0.1865625 , 0.7184075 , 0.22534375], - [0.6274239 , 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375 ], - [0.2673302 , 0.245625 , 0.3043794 , 0.27353126], - [0.7137705 , 0.15429688, 0.726815 , 0.17114063], - [0.6003747 , 0.25942189, 0.6438876 , 0.27320313], - [0.68845433, 0.13501562, 0.714637 , 0.17245312], - [0.69358313, 0.10959375, 0.7043091 , 0.12409375], - [0.493911 , 0. , 0.72571427, 0.299 ], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\\ - 56, 50, 56, 56, 79, 57, 81]]), - np.array([b'000000397133.jpg']) - ] - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.9358696 , 0.07528409, 0.99891305, 0.25 ], - [0.8242174 , 0.3309659 , 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696 , 0.8179261 ], - [0.32582608, 0.8575 , 0.98426086, 0.9984659 ], - [0.77795655, 0.6268466 , 0.89930433, 0.73434657], - [0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ], - [0.58473915, 0.75661933, 0.5998261 , 0.83579546], - [0.80391306, 0.6129829 , 0.8733478 , 0.66201705], - [0.8737391 , 0.6579546 , 0.943 , 0.7053693 ], - [0.775 , 0.6549716 , 0.8227391 , 0.6882955 ], - [0.8130869 , 0.58292615, 0.90526086, 0.62551135], - [0.7844348 , 0.68735796, 0.98182607, 0.83329546], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b'000000037777.jpg']) - ] - - mAP = metrics['mAP']() - - self.assertEqual(mAP.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.18182') - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), '.5f'), - '0.20347') - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.18182') - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b'000000037777.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64]]), - np.array([b'000000037700.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000011.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000012.jpg']) - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - - def test_tensorflow_VOCmAP(self): - metrics = METRICS('tensorflow') - fake_dict = 'dog: 1' - if hvd.rank() == 0: - with open('anno_1.yaml', 'w', encoding = "utf-8") as f: - f.write(fake_dict) - while True: - file_exists = hvd.allgather_object(os.path.exists('anno_1.yaml')) - if file_exists == [True, True]: - break - mAP = metrics['VOCmAP']('anno_1.yaml') - mAP.hvd = hvd - self.assertEqual(mAP.iou_thrs, 0.5) - self.assertEqual(mAP.map_points, 0) - self.assertEqual(mAP.category_map_reverse['dog'], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([['a', 'b']]), - np.array([[]]), - np.array([b'000000397133.jpg']) - ] - self.assertRaises(NotImplementedError, mAP.update, detection, ground_truth) - - mAP = metrics['VOCmAP']() - detection = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - detection_2 = [ - np.array([[8]]), - np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ], - [0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ], - [0.57902956, 0.39394334, 0.8342961 , 0.5577197 ], - [0.7949219 , 0.6513021 , 0.8472295 , 0.68427753], - [0.809729 , 0.5947042 , 0.8539927 , 0.62916476], - [0.7258591 , 0.08907133, 1. , 0.86224866], - [0.43100086, 0.37782395, 0.8384069 , 0.5616918 ], - [0.32005906, 0.84334356, 1. , 1. ]]]), - np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\\ - 0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]), - np.array([[55., 55., 79., 55., 55., 67., 79., 82.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.56262296, 0.0015625 , 1. , 0.5431719 ], - [0.16374707, 0.60728127, 0.813911 , 0.77823436], - [0.5841452 , 0.21182813, 0.65156907, 0.24670312], - [0.8056206 , 0.048875 , 0.90124124, 0.1553125 ], - [0.6729742 , 0.09317187, 0.7696956 , 0.21203125], - [0.3848478 , 0.002125 , 0.61522245, 0.303 ], - [0.61548007, 0. , 0.7015925 , 0.097125 ], - [0.6381967 , 0.1865625 , 0.7184075 , 0.22534375], - [0.6274239 , 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375 ], - [0.2673302 , 0.245625 , 0.3043794 , 0.27353126], - [0.7137705 , 0.15429688, 0.726815 , 0.17114063], - [0.6003747 , 0.25942189, 0.6438876 , 0.27320313], - [0.68845433, 0.13501562, 0.714637 , 0.17245312], - [0.69358313, 0.10959375, 0.7043091 , 0.12409375], - [0.493911 , 0. , 0.72571427, 0.299 ], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\\ - 56, 50, 56, 56, 79, 57, 81]]), - np.array([b'000000397133.jpg']) - ] - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.9358696 , 0.07528409, 0.99891305, 0.25 ], - [0.8242174 , 0.3309659 , 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696 , 0.8179261 ], - [0.32582608, 0.8575 , 0.98426086, 0.9984659 ], - [0.77795655, 0.6268466 , 0.89930433, 0.73434657], - [0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ], - [0.58473915, 0.75661933, 0.5998261 , 0.83579546], - [0.80391306, 0.6129829 , 0.8733478 , 0.66201705], - [0.8737391 , 0.6579546 , 0.943 , 0.7053693 ], - [0.775 , 0.6549716 , 0.8227391 , 0.6882955 ], - [0.8130869 , 0.58292615, 0.90526086, 0.62551135], - [0.7844348 , 0.68735796, 0.98182607, 0.83329546], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b'000000037777.jpg']) - ] - - self.assertEqual(mAP.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.18182') - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), '.5f'), - '0.20347') - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.18182') - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b'000000037777.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64]]), - np.array([b'000000037700.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000011.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000012.jpg']) - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - - def test_tensorflow_COCOmAP(self): - metrics = METRICS('tensorflow') - fake_dict = 'dog: 1' - if hvd.rank() == 0: - with open('anno_2.yaml', 'w', encoding = "utf-8") as f: - f.write(fake_dict) - while True: - file_exists = hvd.allgather_object(os.path.exists('anno_2.yaml')) - if file_exists == [True, True]: - break - mAP = metrics['COCOmAP']('anno_2.yaml') - mAP.hvd = hvd - self.assertEqual(mAP.category_map_reverse['dog'], 1) - detection = [ - np.array([[5]]), - np.array([[5]]), - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([['a', 'b']]), - np.array([[]]), - np.array([b'000000397133.jpg']) - ] - self.assertRaises(NotImplementedError, mAP.update, detection, ground_truth) - mAP = metrics['COCOmAP']() - detection = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762], - [0.40032804, 0.01218696, 0.6924763 , 0.30341768], - [0.62706745, 0.35748824, 0.6892729 , 0.41513762]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - detection_2 = [ - np.array([[8]]), - np.array([[[0.82776225, 0.5865939 , 0.8927653 , 0.6302338 ], - [0.8375764 , 0.6424138 , 0.9055594 , 0.6921875 ], - [0.57902956, 0.39394334, 0.8342961 , 0.5577197 ], - [0.7949219 , 0.6513021 , 0.8472295 , 0.68427753], - [0.809729 , 0.5947042 , 0.8539927 , 0.62916476], - [0.7258591 , 0.08907133, 1. , 0.86224866], - [0.43100086, 0.37782395, 0.8384069 , 0.5616918 ], - [0.32005906, 0.84334356, 1. , 1. ]]]), - np.array([[0.86698544, 0.7562499 , 0.66414887, 0.64498234,\\ - 0.63083494,0.46618757, 0.3914739 , 0.3094324 ]]), - np.array([[55., 55., 79., 55., 55., 67., 79., 82.]]) - ] - ground_truth = [ - np.array([[[0.5633255 , 0.34003124, 0.69857144, 0.4009531 ], - [0.56262296, 0.0015625 , 1. , 0.5431719 ], - [0.16374707, 0.60728127, 0.813911 , 0.77823436], - [0.5841452 , 0.21182813, 0.65156907, 0.24670312], - [0.8056206 , 0.048875 , 0.90124124, 0.1553125 ], - [0.6729742 , 0.09317187, 0.7696956 , 0.21203125], - [0.3848478 , 0.002125 , 0.61522245, 0.303 ], - [0.61548007, 0. , 0.7015925 , 0.097125 ], - [0.6381967 , 0.1865625 , 0.7184075 , 0.22534375], - [0.6274239 , 0.22104688, 0.71140516, 0.27134374], - [0.39566743, 0.24370313, 0.43578455, 0.284375 ], - [0.2673302 , 0.245625 , 0.3043794 , 0.27353126], - [0.7137705 , 0.15429688, 0.726815 , 0.17114063], - [0.6003747 , 0.25942189, 0.6438876 , 0.27320313], - [0.68845433, 0.13501562, 0.714637 , 0.17245312], - [0.69358313, 0.10959375, 0.7043091 , 0.12409375], - [0.493911 , 0. , 0.72571427, 0.299 ], - [0.69576114, 0.15107812, 0.70714283, 0.16332813], - [0.4763466 , 0.7769531 , 0.54334897, 0.9675937 ]]]), - np.array([[]]), - np.array([[44, 67, 1, 49, 51, 51, 79, 1, 47, 47, 51, 51,\\ - 56, 50, 56, 56, 79, 57, 81]]), - np.array([b'000000397133.jpg']) - ] - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.9358696 , 0.07528409, 0.99891305, 0.25 ], - [0.8242174 , 0.3309659 , 0.93508697, 0.47301137], - [0.77413046, 0.22599432, 0.9858696 , 0.8179261 ], - [0.32582608, 0.8575 , 0.98426086, 0.9984659 ], - [0.77795655, 0.6268466 , 0.89930433, 0.73434657], - [0.5396087 , 0.39053977, 0.8483913 , 0.5615057 ], - [0.58473915, 0.75661933, 0.5998261 , 0.83579546], - [0.80391306, 0.6129829 , 0.8733478 , 0.66201705], - [0.8737391 , 0.6579546 , 0.943 , 0.7053693 ], - [0.775 , 0.6549716 , 0.8227391 , 0.6882955 ], - [0.8130869 , 0.58292615, 0.90526086, 0.62551135], - [0.7844348 , 0.68735796, 0.98182607, 0.83329546], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62, 62, 67, 82, 52, 79, 81, 55, 55, 55, 55, 62, 55]]), - np.array([b'000000037777.jpg']) - ] - - self.assertEqual(mAP.result(), 0) - - mAP.update(detection, ground_truth) - - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.14149') - - mAP.update(detection_2, ground_truth_2) - self.assertEqual(format(mAP.result(), '.5f'), - '0.13366') - mAP.reset() - mAP.update(detection, ground_truth) - self.assertEqual(format(mAP.result(), '.5f'), - '0.14149') - - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[[64, 62]]]), - np.array([b'000000037777.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64]]), - np.array([b'000000037700.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection, ground_truth_2) - detection_1 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787 , 0.60418576, 0.35155892, 0.31158054]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - ground_truth_1 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000011.jpg']) - ] - self.assertRaises(ValueError, mAP.update, detection_1, ground_truth_1) - ground_truth_2 = [ - np.array([[[0.51508695, 0.2911648 , 0.5903478 , 0.31360796], - [0.872 , 0.6190057 , 0.9306522 , 0.6591761 ]]]), - np.array([[]]), - np.array([[64, 62]]), - np.array([b'000000012.jpg']) - ] - detection_2 = [ - np.array([[[0.16117382, 0.59801614, 0.81511605, 0.7858219 ], - [0.5589304 , 0. , 0.98301625, 0.520178 ]]]), - np.array([[0.9267181 , 0.8510787]]), - np.array([[ 1., 67., 51., 79., 47.]]) - ] - self.assertRaises(ValueError, mAP.update, detection_2, ground_truth_2) - - def test__accuracy(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [0] - predicts2 = [[0, 0]] - labels2 = [[0, 1]] - predicts3 = [[[0, 1], [0, 0], [0, 1]]] - labels3 = [[[0, 1], [0, 1], [1, 0]]] - predicts4 = [[0.2, 0.8]] - labels4 = [0] - else: - predicts1 = [0, 1, 1] - labels1 = [1, 1, 1] - predicts2 = [[0, 0]] - labels2 = [[1, 1]] - predicts3 = [[[0, 1], [0, 1], [0, 1]]] - labels3 = [[[1, 0], [1, 0], [1, 0]]] - predicts4 = [[0.1, 0.9], [0.3, 0.7], [0.4, 0.6]] - labels4 = [1, 0, 0] - - metrics = METRICS('tensorflow') - acc = metrics['Accuracy']() - acc.hvd = hvd - acc.update(predicts1, labels1) - acc_result = acc.result() - self.assertEqual(acc_result, 0.5) - acc.reset() - acc.update(predicts2, labels2) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts3, labels3) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts4, labels4) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(1, 1) - self.assertEqual(acc.result(), 1.0) - - wrong_predictions = [1, 0, 0] - wrong_labels = [[0, 1, 1]] - self.assertRaises(ValueError, acc.update, wrong_predictions, wrong_labels) - - metrics = METRICS('pytorch') - acc = metrics['Accuracy']() - acc.hvd = hvd - acc.update(predicts1, labels1) - acc_result = acc.result() - self.assertEqual(acc_result, 0.5) - acc.reset() - acc.update(predicts2, labels2) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts3, labels3) - self.assertEqual(acc.result(), 0.25) - acc.reset() - acc.update(predicts4, labels4) - self.assertEqual(acc.result(), 0.25) - - def test_mse(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [0] - predicts2 = [1, 1] - labels2 = [0, 1] - else: - predicts1 = [0, 0, 1] - labels1 = [1, 0, 0] - predicts2 = [1, 1] - labels2 = [1, 0] - - metrics = METRICS('tensorflow') - mse = metrics['MSE'](compare_label=False) - mse.hvd = hvd - mse.update(predicts1, labels1) - mse_result = mse.result() - self.assertEqual(mse_result, 0.75) - mse.update(predicts2, labels2) - mse_result = mse.result() - self.assertEqual(mse_result, 0.625) - - metrics = METRICS('pytorch') - mse = metrics['MSE']() - mse.hvd = hvd - mse.update(predicts1, labels1) - mse_result = mse.result() - self.assertEqual(mse_result, 0.75) - mse.update(predicts2, labels2) - mse_result = mse.result() - self.assertEqual(mse_result, 0.625) - - def test_mae(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [0] - predicts2 = [1, 1] - labels2 = [1, 1] - else: - predicts1 = [0, 0, 1] - labels1 = [1, 0, 0] - predicts2 = [1, 1] - labels2 = [1, 0] - - metrics = METRICS('tensorflow') - mae = metrics['MAE']() - mae.hvd = hvd - mae.update(predicts1, labels1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.75) - if hvd.rank() == 1: - mae.update(0, 1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.8) - mae.reset() - mae.update(predicts2, labels2) - mae_result = mae.result() - self.assertEqual(mae_result, 0.25) - - metrics = METRICS('pytorch') - mae = metrics['MAE']() - mae.hvd = hvd - mae.update(predicts1, labels1) - mae_result = mae.result() - self.assertEqual(mae_result, 0.75) - mae.update(predicts2, labels2) - mae_result = mae.result() - self.assertEqual(mae_result, 0.5) - - self.assertRaises(AssertionError, mae.update, [1], [1, 2]) - self.assertRaises(AssertionError, mae.update, 1, [1,2]) - self.assertRaises(AssertionError, mae.update, [1, 2], [1]) - self.assertRaises(AssertionError, mae.update, 1, np.array([1,2])) - - def test_rmse(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [1] - predicts2 = [1, 1] - labels2 = [1, 0] - else: - predicts1 = [0, 0, 1] - labels1 = [0, 0, 0] - predicts2 = [1, 1] - labels2 = [0, 0] - - metrics = METRICS('tensorflow') - rmse = metrics['RMSE']() - rmse.hvd = hvd - rmse.update(predicts1, labels1) - rmse_result = rmse.result() - self.assertEqual(rmse_result, 0.5) - rmse.reset() - rmse.update(predicts2, labels2) - rmse_result = rmse.result() - self.assertAlmostEqual(rmse_result, np.sqrt(0.75)) - - metrics = METRICS('pytorch') - rmse = metrics['RMSE']() - rmse.hvd = hvd - rmse.update(predicts1, labels1) - rmse_result = rmse.result() - self.assertEqual(rmse_result, 0.5) - rmse.update(predicts2, labels2) - rmse_result = rmse.result() - self.assertAlmostEqual(rmse_result, np.sqrt(0.5)) - - def test_loss(self): - if hvd.rank() == 0: - predicts1 = [1] - labels1 = [0] - predicts2 = [1, 0, 1] - labels2 = [1, 0, 0] - predicts3 = [1, 0] - labels3 = [0, 1] - else: - predicts1 = [0, 0, 1] - labels1 = [1, 0, 0] - predicts2 = [1] - labels2 = [0] - predicts3 = [0, 1] - labels3 = [0, 0] - - metrics = METRICS('tensorflow') - loss = metrics['Loss']() - loss.hvd = hvd - loss.update(predicts1, labels1) - loss_result = loss.result() - self.assertEqual(loss_result, 0.5) - loss.update(predicts2, labels2) - loss_result = loss.result() - self.assertEqual(loss_result, 0.625) - loss.reset() - loss.update(predicts3, labels3) - self.assertEqual(loss.result(), 0.5) - - metrics = METRICS('pytorch') - loss = metrics['Loss']() - loss.hvd = hvd - loss.update(predicts1, labels1) - loss_result = loss.result() - self.assertEqual(loss_result, 0.5) - loss.update(predicts2, labels2) - loss_result = loss.result() - self.assertEqual(loss_result, 0.625) - loss.reset() - loss.update(predicts3, labels3) - self.assertEqual(loss.result(), 0.5) - - -if __name__ == "__main__": - unittest.main() - """ - - with open("fake_ut.py", "w", encoding="utf-8") as f: - f.write(fake_ut) - - -class TestDistributed(unittest.TestCase): - @classmethod - def setUpClass(cls): - build_fake_ut() - - @classmethod - def tearDownClass(cls): - os.remove("fake_ut.py") - shutil.rmtree("./saved", ignore_errors=True) - shutil.rmtree("runs", ignore_errors=True) - - def setUp(self): - logger.info(f"CPU: {cpuinfo.get_cpu_info()['brand_raw']}") - logger.info(f"Test: {sys.modules[__name__].__file__}-{self.__class__.__name__}-{self._testMethodName}") - - def tearDown(self): - logger.info(f"{self._testMethodName} done.\n") - - @unittest.skipIf( - version1_lt_version2(tf.version.VERSION, "2.10.0") or version1_gte_version2(tf.version.VERSION, "2.12.0"), - "Only test equal or above TF 2.10.0 and less than 2.12.0", - ) - def test_distributed(self): - distributed_cmd = "horovodrun -np 2 python fake_ut.py" - p = subprocess.Popen( - distributed_cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True - ) # nosec - try: - out, error = p.communicate() - matches = re.findall(r"FAILED", error.decode("utf-8")) - self.assertEqual(matches, []) - - matches = re.findall(r"OK", error.decode("utf-8")) - self.assertTrue(len(matches) > 0) - - except KeyboardInterrupt: - os.killpg(os.getpgid(p.pid), signal.SIGKILL) - assert 0 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/distributed/test_distributed_pt_train.py b/test/distributed/test_distributed_pt_train.py index 6617ce27723..096089ec837 100644 --- a/test/distributed/test_distributed_pt_train.py +++ b/test/distributed/test_distributed_pt_train.py @@ -3,16 +3,8 @@ import signal import subprocess import unittest - -import horovod.torch as hvd -import torch -import torch.nn as nn import torchvision -from neural_compressor.data import Datasets -from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader - - def build_fake_py(): fake_py = """ import os From 49f37cf9f299d9800af1eb732842f20ee100afb9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 20 Dec 2023 12:56:59 +0000 Subject: [PATCH 10/14] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- test/distributed/test_distributed_pt_train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/distributed/test_distributed_pt_train.py b/test/distributed/test_distributed_pt_train.py index 096089ec837..598b7ffae8a 100644 --- a/test/distributed/test_distributed_pt_train.py +++ b/test/distributed/test_distributed_pt_train.py @@ -3,8 +3,10 @@ import signal import subprocess import unittest + import torchvision + def build_fake_py(): fake_py = """ import os From 0598a306e67bb79b33956790169b1f6197f37697 Mon Sep 17 00:00:00 2001 From: chensuyue Date: Thu, 21 Dec 2023 10:31:35 +0800 Subject: [PATCH 11/14] close horovod related test due to horovod install issue with torch>=2.1 Signed-off-by: chensuyue --- .azure-pipelines/scripts/ut/env_setup.sh | 2 -- .azure-pipelines/scripts/ut/run_basic_pt_pruning.sh | 2 +- test/data/test_dataloader.py | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.azure-pipelines/scripts/ut/env_setup.sh b/.azure-pipelines/scripts/ut/env_setup.sh index 699b307153d..2937c154300 100644 --- a/.azure-pipelines/scripts/ut/env_setup.sh +++ b/.azure-pipelines/scripts/ut/env_setup.sh @@ -96,8 +96,6 @@ elif [[ $(echo "${test_case}" | grep -c "tf pruning") != 0 ]]; then # horovod can't be install in the env with TF and PT together # so test distribute cases in the env with single fw installed pip install horovod -elif [[ $(echo "${test_case}" | grep -c "pt pruning") != 0 ]]; then - pip install horovod fi # test deps pip install coverage diff --git a/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh b/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh index 590967409e6..828db2f6d96 100644 --- a/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh +++ b/.azure-pipelines/scripts/ut/run_basic_pt_pruning.sh @@ -14,7 +14,7 @@ export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.f lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))') cd /neural-compressor/test || exit 1 find ./pruning_with_pt -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh -find ./distributed -name "test_distributed_pt_train.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh +# find ./distributed -name "test_distributed_pt_train.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh LOG_DIR=/neural-compressor/log_dir mkdir -p ${LOG_DIR} diff --git a/test/data/test_dataloader.py b/test/data/test_dataloader.py index 39672820e50..6a1eaff7214 100644 --- a/test/data/test_dataloader.py +++ b/test/data/test_dataloader.py @@ -87,7 +87,7 @@ def test_pytorch_dataset(self): "dataset": {"FashionMNIST": {"root": "./", "train": True, "download": True}}, "transform": {"Resize": {"size": 24}}, "filter": None, - "distributed": True, + "distributed": False, # close temperately due to horovod install issue with torch>=2.1 } dataloader = create_dataloader("pytorch", dataloader_args) self.assertEqual(dataloader.dataloader.sampler.__class__.__name__, "DistributedSampler") From 88789407af1f5fa2ea3fac0da13946622ed9ea6c Mon Sep 17 00:00:00 2001 From: chensuyue Date: Thu, 21 Dec 2023 10:31:55 +0800 Subject: [PATCH 12/14] remove invalid scripts Signed-off-by: chensuyue --- .../scripts/ut/run_basic_v2.1_ipex.sh | 33 ------------------- 1 file changed, 33 deletions(-) delete mode 100644 .azure-pipelines/scripts/ut/run_basic_v2.1_ipex.sh diff --git a/.azure-pipelines/scripts/ut/run_basic_v2.1_ipex.sh b/.azure-pipelines/scripts/ut/run_basic_v2.1_ipex.sh deleted file mode 100644 index 03c4d3c6979..00000000000 --- a/.azure-pipelines/scripts/ut/run_basic_v2.1_ipex.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -python -c "import neural_compressor as nc;print(nc.version.__version__)" -test_case="run basic ipex v2.1" -echo "${test_case}" - -echo "specify fwk version..." -export ipex_version='2.1.0' - -echo "set up UT env..." -bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}" -export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file -lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))') -cd /neural-compressor/test || exit 1 -find ./ipex -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh -find ./algorithm -name "test_smooth_quant.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh - -LOG_DIR=/neural-compressor/log_dir -mkdir -p ${LOG_DIR} -ut_log_name=${LOG_DIR}/ut_ipex_v2.1.log - -echo "cat run.sh..." -sort run.sh -o run.sh -cat run.sh | tee ${ut_log_name} -echo "------UT start-------" -bash -x run.sh 2>&1 | tee -a ${ut_log_name} -cp .coverage ${LOG_DIR}/.coverage.ipex -echo "------UT end -------" - -if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then - echo "Find errors in UT test, please check the output..." - exit 1 -fi -echo "UT finished successfully! " \ No newline at end of file From d5501481adc4904ac6d72797a30cbc8bfbf6082b Mon Sep 17 00:00:00 2001 From: chensuyue Date: Thu, 21 Dec 2023 20:19:12 +0800 Subject: [PATCH 13/14] fix the issue Signed-off-by: chensuyue --- test/data/test_dataloader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/data/test_dataloader.py b/test/data/test_dataloader.py index 6a1eaff7214..1880de40f67 100644 --- a/test/data/test_dataloader.py +++ b/test/data/test_dataloader.py @@ -90,7 +90,7 @@ def test_pytorch_dataset(self): "distributed": False, # close temperately due to horovod install issue with torch>=2.1 } dataloader = create_dataloader("pytorch", dataloader_args) - self.assertEqual(dataloader.dataloader.sampler.__class__.__name__, "DistributedSampler") + self.assertEqual(dataloader.dataloader.sampler.__class__.__name__, "SequentialSampler") for data in dataloader: self.assertEqual(len(data[0]), 2) self.assertEqual(data[0][0].shape, (24, 24)) From a482be61daa83f485cab5a1c8bd2b6ed476bce46 Mon Sep 17 00:00:00 2001 From: chensuyue Date: Fri, 22 Dec 2023 16:23:07 +0800 Subject: [PATCH 14/14] freeze transformers version for ITREX CI issue Signed-off-by: chensuyue --- .azure-pipelines/scripts/ut/run_itrex.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.azure-pipelines/scripts/ut/run_itrex.sh b/.azure-pipelines/scripts/ut/run_itrex.sh index d70da260694..9a5ce4ff6d1 100644 --- a/.azure-pipelines/scripts/ut/run_itrex.sh +++ b/.azure-pipelines/scripts/ut/run_itrex.sh @@ -11,6 +11,8 @@ bash /intel-extension-for-transformers/.github/workflows/script/prepare_env.sh bash /intel-extension-for-transformers/.github/workflows/script/install_binary.sh # prepare test env +# tmp install transformers for incompatible issue +pip install transformers==4.34.1 pip install -r /intel-extension-for-transformers/tests/requirements.txt LOG_DIR=/neural-compressor/log_dir mkdir -p ${LOG_DIR}