diff --git a/AI-and-Analytics/Getting-Started-Samples/INC-Sample-for-Tensorflow/README.md b/AI-and-Analytics/Getting-Started-Samples/INC-Sample-for-Tensorflow/README.md index 3820d0e159..9ef8eba44d 100644 --- a/AI-and-Analytics/Getting-Started-Samples/INC-Sample-for-Tensorflow/README.md +++ b/AI-and-Analytics/Getting-Started-Samples/INC-Sample-for-Tensorflow/README.md @@ -36,7 +36,7 @@ performance to see the benefit of Intel® Neural Compressor. | Optimized for | Description |:--- |:--- -| OS | Linux* Ubuntu* 18.04 or later +| OS | Linux* Ubuntu* 18.04 or later, Windows 10* | Hardware | The Second Generation Intel® Xeon® Scalable processor family or newer Xeon® processors | Software | Intel® oneAPI AI Analytics Toolkit 2021.1 or later | What you will learn | How to use Intel® Neural Compressor tool to quantize the AI model based on TensorFlow* and speed up the inference on Intel® Xeon® CPUs @@ -92,6 +92,8 @@ git checkout 2021.1-beta10 ## Prepare Software Environment +### Linux (Ubuntu) + You can run this sample in a Jupyter notebook on your local computer or in the Intel® DevCloud. @@ -184,6 +186,19 @@ https://software.intel.com/content/www/us/en/develop/articles/installation-guide This step is optional if you plan to open the notebook on your local server. +### Windows 10 + +Setup the Conda running environment **user_tensorflow** by following commands: + +``` +conda deactivate +conda env remove -n user_tensorflow +conda create -n user_tensorflow python=3.9 -y +conda activate user_tensorflow +conda install -n user_tensorflow pycocotools -c esri -y +conda install -n user_tensorflow neural-compressor tensorflow -c conda-forge -c intel -y +conda install -n user_tensorflow jupyter runipy notebook -y +``` ## Run the Sample diff --git a/AI-and-Analytics/Getting-Started-Samples/INC-Sample-for-Tensorflow/inc_sample_tensorflow.ipynb b/AI-and-Analytics/Getting-Started-Samples/INC-Sample-for-Tensorflow/inc_sample_tensorflow.ipynb index c93a2bb151..9611ce8f53 100644 --- a/AI-and-Analytics/Getting-Started-Samples/INC-Sample-for-Tensorflow/inc_sample_tensorflow.ipynb +++ b/AI-and-Analytics/Getting-Started-Samples/INC-Sample-for-Tensorflow/inc_sample_tensorflow.ipynb @@ -55,7 +55,9 @@ " print(\"iLiT version {}\".format(inc.__version__)) \n", "\n", "import matplotlib.pyplot as plt\n", - "import numpy as np" + "import numpy as np\n", + "\n", + "from IPython import display" ] }, { @@ -148,7 +150,7 @@ "metadata": {}, "outputs": [], "source": [ - "epochs = 3\n", + "epochs = 1\n", "\n", "alexnet.train_mod(model, data, epochs)" ] @@ -194,7 +196,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls -la fp32_frezon.pb" + "%ls -la fp32_frezon.pb" ] }, { @@ -257,7 +259,7 @@ "metadata": {}, "outputs": [], "source": [ - "!cat alexnet.py" + "display.Code('alexnet.py')" ] }, { @@ -280,7 +282,7 @@ "metadata": {}, "outputs": [], "source": [ - "!cat alexnet.yaml" + "display.Code('alexnet.yaml')" ] }, { @@ -299,7 +301,6 @@ }, "outputs": [], "source": [ - "\n", "def auto_tune(input_graph_path, yaml_config, batch_size): \n", " fp32_graph = alexnet.load_pb(input_graph_path)\n", " quan = inc.Quantization(yaml_config)\n", @@ -341,7 +342,7 @@ }, "outputs": [], "source": [ - "!cat inc_quantize_model.py" + "display.Code('inc_quantize_model.py')" ] }, { @@ -388,7 +389,7 @@ "metadata": {}, "outputs": [], "source": [ - "!cat profiling_inc.py" + "display.Code('profiling_inc.py')" ] }, { @@ -435,9 +436,9 @@ }, "outputs": [], "source": [ - "!cat 32.json\n", + "display.Code('32.json')\n", "!echo \" \"\n", - "!cat 8.json" + "display.Code('8.json')" ] }, { @@ -574,7 +575,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.9.12" } }, "nbformat": 4,