diff --git a/.azure-pipelines/scripts/ut/run_itrex.sh b/.azure-pipelines/scripts/ut/run_itrex.sh
index e2c4360b6b5..2bbbf958398 100644
--- a/.azure-pipelines/scripts/ut/run_itrex.sh
+++ b/.azure-pipelines/scripts/ut/run_itrex.sh
@@ -19,6 +19,8 @@ sed -i '/neural-compressor.git/d' /intel-extension-for-transformers/tests/requir
pip install -r /intel-extension-for-transformers/tests/requirements.txt
# workaround
pip install onnx==1.15.0
+echo "pip list itrex ut deps..."
+pip list
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_itrex.log
diff --git a/.azure-pipelines/ut-3x-pt-fp8.yml b/.azure-pipelines/ut-3x-pt-fp8.yml
index e8a992b6e65..6f36ddecc64 100644
--- a/.azure-pipelines/ut-3x-pt-fp8.yml
+++ b/.azure-pipelines/ut-3x-pt-fp8.yml
@@ -41,7 +41,7 @@ stages:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "3x/run_3x_pt_fp8"
uploadPath: $(UPLOAD_PATH)
- utArtifact: "ut_3x_pt_fp8"
+ utArtifact: "ut_3x"
- stage: Torch_habana_baseline
displayName: Torch 3x Habana FP8 baseline
@@ -56,7 +56,7 @@ stages:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "3x/run_3x_pt_fp8"
uploadPath: $(UPLOAD_PATH)
- utArtifact: "ut_3x_pt_fp8_baseline"
+ utArtifact: "ut_3x_baseline"
- stage: Coverage
displayName: "Coverage Compare"
diff --git a/README.md b/README.md
index 349a45a9aa3..f4694e991e9 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
Intel® Neural Compressor
===========================
-
An open-source Python library supporting popular model compression techniques on all mainstream deep learning frameworks (TensorFlow, PyTorch, ONNX Runtime, and MXNet)
+ An open-source Python library supporting popular model compression techniques on all mainstream deep learning frameworks (TensorFlow, PyTorch, and ONNX Runtime)
[](https://github.com/intel/neural-compressor)
[](https://github.com/intel/neural-compressor/releases)
@@ -15,7 +15,7 @@ Intel® Neural Compressor
---
-Intel® Neural Compressor aims to provide popular model compression techniques such as quantization, pruning (sparsity), distillation, and neural architecture search on mainstream frameworks such as [TensorFlow](https://www.tensorflow.org/), [PyTorch](https://pytorch.org/), [ONNX Runtime](https://onnxruntime.ai/), and [MXNet](https://mxnet.apache.org/),
+Intel® Neural Compressor aims to provide popular model compression techniques such as quantization, pruning (sparsity), distillation, and neural architecture search on mainstream frameworks such as [TensorFlow](https://www.tensorflow.org/), [PyTorch](https://pytorch.org/), and [ONNX Runtime](https://onnxruntime.ai/),
as well as Intel extensions such as [Intel Extension for TensorFlow](https://github.com/intel/intel-extension-for-tensorflow) and [Intel Extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch).
In particular, the tool provides the key features, typical examples, and open collaborations as below:
diff --git a/docs/source/CONTRIBUTING.md b/docs/source/CONTRIBUTING.md
index 3378ba30e73..4decbfb6d0d 100644
--- a/docs/source/CONTRIBUTING.md
+++ b/docs/source/CONTRIBUTING.md
@@ -51,7 +51,7 @@ And generally use [Azure Cloud Instance](https://azure.microsoft.com/en-us/prici
| Code Scan | Bandit/CopyRight/DocStyle/SpellCheck | PASS |
| [DCO](https://github.com/apps/dco/) | Use `git commit -s` to sign off | PASS |
| Unit Test | Pytest scripts under [test](/test) | PASS (No failure, No core dump, No segmentation fault, No coverage drop) |
-| Model Test | Pytorch + TensorFlow + ONNX Runtime + MXNet | PASS (Functionality pass, FP32/INT8 No performance regression) |
+| Model Test | Pytorch + TensorFlow + ONNX Runtime | PASS (Functionality pass, FP32/INT8 No performance regression) |
## Support