Skip to content

Commit af10495

Browse files
authored
add PT 3dunet model (#811)
Signed-off-by: Cheng, Zixuan <[email protected]>
1 parent 55d618f commit af10495

28 files changed

+1994
-0
lines changed

examples/.config/model_params_pytorch.json

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,13 @@
311311
"batch_size": 64,
312312
"main_script": "run_glue.py"
313313
},
314+
"3dunet": {
315+
"model_src_dir": "image_recognition/3d-unet/quantization/ptq/fx",
316+
"dataset_location": "/tf_dataset/dataset/mlperf_3dunet/build",
317+
"input_model": "/tf_dataset/pytorch/mlperf_3dunet/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1",
318+
"batch_size": 100,
319+
"main_script": "run.py"
320+
},
314321
"rnnt": {
315322
"model_src_dir": "speech_recognition/rnnt/quantization/ptq_dynamic/eager",
316323
"dataset_location": "/tf_dataset/pytorch/rnnt/convert_dataset/",

examples/README.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -419,6 +419,12 @@ Intel® Neural Compressor validated examples with multiple compression technique
419419
<td>Post-Training Static Quantization</td>
420420
<td><a href="./pytorch/image_recognition/resnest/quantization/ptq/fx">fx</a></td>
421421
</tr>
422+
<tr>
423+
<td>3D-UNet</td>
424+
<td>Image Recognition</td>
425+
<td>Post-Training Static Quantization</td>
426+
<td><a href="./pytorch/image_recognition/3d-unet/quantization/ptq/fx">fx</a></td>
427+
</tr>
422428
<tr>
423429
<td>SSD ResNet34</td>
424430
<td>Object Detection</td>
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
build/
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
build/
Lines changed: 204 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,204 @@
1+
# Copyright (c) 2021 Intel Corporation
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
16+
#
17+
# Licensed under the Apache License, Version 2.0 (the "License");
18+
# you may not use this file except in compliance with the License.
19+
# You may obtain a copy of the License at
20+
#
21+
# http://www.apache.org/licenses/LICENSE-2.0
22+
#
23+
# Unless required by applicable law or agreed to in writing, software
24+
# distributed under the License is distributed on an "AS IS" BASIS,
25+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26+
# See the License for the specific language governing permissions and
27+
# limitations under the License.
28+
29+
SHELL := /bin/bash
30+
31+
MAKEFILE_NAME := $(lastword $(MAKEFILE_LIST))
32+
UNAME := $(shell whoami)
33+
UID := $(shell id -u `whoami`)
34+
GROUPNAME := $(shell id -gn `whoami`)
35+
GROUPID := $(shell id -g `whoami`)
36+
37+
HOST_VOL ?= ${PWD}
38+
CONTAINER_VOL ?= /workspace
39+
40+
BUILD_DIR := build
41+
ifndef DOWNLOAD_DATA_DIR
42+
export DOWNLOAD_DATA_DIR := $(HOST_VOL)/$(BUILD_DIR)/MICCAI_BraTS_2019_Data_Training
43+
endif
44+
RAW_DATA_DIR := $(BUILD_DIR)/raw_data
45+
PREPROCESSED_DATA_DIR := $(BUILD_DIR)/preprocessed_data
46+
POSTPROCESSED_DATA_DIR := $(BUILD_DIR)/postprocessed_data
47+
MODEL_DIR := $(BUILD_DIR)/model
48+
RESULT_DIR := $(BUILD_DIR)/result
49+
MLPERF_CONF := $(BUILD_DIR)/mlperf.conf
50+
PYTORCH_MODEL := $(RESULT_DIR)/fold_1.zip
51+
ONNX_MODEL := $(MODEL_DIR)/224_224_160.onnx
52+
ONNX_DYNAMIC_BS_MODEL := $(MODEL_DIR)/224_224_160_dynamic_bs.onnx
53+
TF_MODEL := $(MODEL_DIR)/224_224_160.pb
54+
OPENVINO_MODEL := $(MODEL_DIR)/brats_model_checkpoint_final_fold1_H224_W224_D160_C4.bin
55+
OPENVINO_MODEL_METADATA := $(MODEL_DIR)/brats_model_checkpoint_final_fold1_H224_W224_D160_C4.xml
56+
57+
# Env variables needed by nnUnet
58+
export nnUNet_raw_data_base=$(RAW_DATA_DIR)
59+
export nnUNet_preprocessed=$(PREPROCESSED_DATA_DIR)
60+
export RESULTS_FOLDER=$(RESULT_DIR)
61+
62+
.PHONY: setup
63+
setup: check_download_data_dir create_directories
64+
@echo "Running basic setup..."
65+
@if [ ! -e $(MLPERF_CONF) ]; then \
66+
cp ../../../mlperf.conf $(MLPERF_CONF); \
67+
fi
68+
@$(MAKE) -f $(MAKEFILE_NAME) init_submodule
69+
@$(MAKE) -f $(MAKEFILE_NAME) download_model
70+
71+
.PHONY: check_download_data_dir
72+
check_download_data_dir:
73+
@if [ ! -e $(DOWNLOAD_DATA_DIR) ]; then \
74+
echo "Please set environment variable DOWNLOAD_DATA_DIR to <path/to/MICCAI_BraTS_2019_Data_Training>" && false ; \
75+
fi
76+
77+
.PHONY: create_directories
78+
create_directories:
79+
@if [ ! -e $(BUILD_DIR) ]; then \
80+
mkdir $(BUILD_DIR); \
81+
fi
82+
@if [ ! -e $(MODEL_DIR) ]; then \
83+
mkdir $(MODEL_DIR); \
84+
fi
85+
@if [ ! -e $(RESULT_DIR) ]; then \
86+
mkdir $(RESULT_DIR); \
87+
fi
88+
89+
.PHONY: init_submodule
90+
init_submodule:
91+
@echo "Initialize nnUnet submodule.."
92+
#@git submodule update --init nnUnet
93+
94+
.PHONY: download_model
95+
download_model:
96+
@echo "Download models..."
97+
@$(MAKE) -f $(MAKEFILE_NAME) download_pytorch_model
98+
@$(MAKE) -f $(MAKEFILE_NAME) download_onnx_model
99+
@$(MAKE) -f $(MAKEFILE_NAME) download_tf_model
100+
@$(MAKE) -f $(MAKEFILE_NAME) download_openvino_model
101+
102+
.PHONY: download_pytorch_model
103+
download_pytorch_model: create_directories
104+
@echo "Downloading PyTorch model from Zenodo..."
105+
@if [ ! -e $(PYTORCH_MODEL) ]; then \
106+
wget -O $(PYTORCH_MODEL) https://zenodo.org/record/3904106/files/fold_1.zip?download=1 \
107+
&& cd $(RESULT_DIR) && unzip -o fold_1.zip; \
108+
fi
109+
110+
.PHONY: download_onnx_model
111+
download_onnx_model: create_directories
112+
@echo "Downloading ONNX model from Zenodo..."
113+
@if [ ! -e $(ONNX_MODEL) ]; then \
114+
wget -O $(ONNX_MODEL) https://zenodo.org/record/3928973/files/224_224_160.onnx?download=1; \
115+
fi
116+
@if [ ! -e $(ONNX_DYNAMIC_BS_MODEL) ]; then \
117+
wget -O $(ONNX_DYNAMIC_BS_MODEL) https://zenodo.org/record/3928973/files/224_224_160_dyanmic_bs.onnx?download=1; \
118+
fi
119+
120+
.PHONY: download_tf_model
121+
download_tf_model: create_directories
122+
@echo "Downloading TF model from Zenodo..."
123+
@if [ ! -e $(TF_MODEL) ]; then \
124+
wget -O $(TF_MODEL) https://zenodo.org/record/3928991/files/224_224_160.pb?download=1; \
125+
fi
126+
127+
.PHONY: download_openvino_model
128+
download_openvino_model: create_directories
129+
@echo "Downloading OpenVINO model from Zenodo..."
130+
@if [ ! -e $(OPENVINO_MODEL) ]; then \
131+
wget -O $(OPENVINO_MODEL) https://zenodo.org/record/3929002/files/brats_model_checkpoint_final_fold1_H224_W224_D160_C4.bin?download=1; \
132+
fi
133+
@if [ ! -e $(OPENVINO_MODEL_METADATA) ]; then \
134+
wget -O $(OPENVINO_MODEL_METADATA) https://zenodo.org/record/3929002/files/brats_model_checkpoint_final_fold1_H224_W224_D160_C4.xml?download=1; \
135+
fi
136+
137+
.PHONY: convert_onnx_model
138+
convert_onnx_model: download_pytorch_model
139+
@echo "Converting PyTorch model to ONNX model..."
140+
@if [ ! -e $(ONNX_MODEL) ]; then \
141+
python3 unet_pytorch_to_onnx.py; \
142+
fi
143+
144+
.PHONY: convert_tf_model
145+
convert_tf_model: convert_onnx_model
146+
@echo "Converting ONNX model to TF model..."
147+
@if [ ! -e $(TF_MODEL) ]; then \
148+
python3 unet_onnx_to_tf.py; \
149+
fi
150+
151+
.PHONY: preprocess_data
152+
preprocess_data: create_directories
153+
@echo "Restructuring raw data to $(RAW_DATA_DIR)..."
154+
@if [ ! -e $(RAW_DATA_DIR) ]; then \
155+
mkdir $(RAW_DATA_DIR); \
156+
fi
157+
@python3 Task043_BraTS_2019.py --downloaded_data_dir $(DOWNLOAD_DATA_DIR)
158+
@echo "Preprocessing and saving preprocessed data to $(PREPROCESSED_DATA_DIR)..."
159+
@if [ ! -e $(PREPROCESSED_DATA_DIR) ]; then \
160+
mkdir $(PREPROCESSED_DATA_DIR); \
161+
fi
162+
@python3 preprocess.py
163+
164+
.PHONY: mkdir_postprocessed_data
165+
mkdir_postprocessed_data:
166+
@if [ ! -e $(POSTPROCESSED_DATA_DIR) ]; then \
167+
mkdir $(POSTPROCESSED_DATA_DIR); \
168+
fi
169+
170+
.PHONY: run_pytorch_performance
171+
run_pytorch_performance:
172+
@python3 run.py --backend=pytorch
173+
174+
.PHONY: run_pytorch_accuracy
175+
run_pytorch_accuracy: mkdir_postprocessed_data
176+
@python3 run.py --backend=pytorch --accuracy
177+
178+
.PHONY: run_pytorch_NC_tuning
179+
run_pytorch_NC_tuning: mkdir_postprocessed_data
180+
@python3 run.py --backend=pytorch --accuracy --tune --mlperf_conf=./mlperf.conf
181+
182+
.PHONY: run_onnxruntime_performance
183+
run_onnxruntime_performance:
184+
@python3 run.py --backend=onnxruntime --model=build/model/224_224_160.onnx
185+
186+
.PHONY: run_onnxruntime_accuracy
187+
run_onnxruntime_accuracy: mkdir_postprocessed_data
188+
@python3 run.py --backend=onnxruntime --model=build/model/224_224_160.onnx --accuracy
189+
190+
.PHONY: run_tf_performance
191+
run_tf_performance:
192+
@python3 run.py --backend=tf --model=build/model/224_224_160.pb
193+
194+
.PHONY: run_tf_accuracy
195+
run_tf_accuracy: mkdir_postprocessed_data
196+
@python3 run.py --backend=tf --model=build/model/224_224_160.pb --accuracy
197+
198+
.PHONY: evaluate
199+
evaluate:
200+
@python3 accuracy-brats.py
201+
202+
.PHONY: clean
203+
clean:
204+
@rm -rf build
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
Step-by-Step
2+
============
3+
4+
This example is used to demonstrate the steps of reproducing quantization and benchmarking results with Intel® Neural Compressor.
5+
6+
The 3D-Unet source code comes from [mlperf](https://github.com/mlcommons/inference/tree/v1.0.1/vision/medical_imaging/3d-unet), commit SHA is **b7e8f0da170a421161410d18e5d2a05d75d6bccf**; [nnUnet](https://github.com/MIC-DKFZ/nnUNet) commit SHA is **b38c69b345b2f60cd0d053039669e8f988b0c0af**. Users could diff them with this example to know which changes have been made to integrate with Intel® Neural Compressor..
7+
8+
The model is performing on [BraTS 2019](https://www.med.upenn.edu/cbica/brats2019/data.html) brain tumor segmentation task.
9+
10+
# Prerequisite
11+
## 1. Environment
12+
Python 3.6 or higher version is recommended.
13+
The dependent packages are all in requirements, please install as following.
14+
```shell
15+
cd examples/pytorch/image_recognition/3d-unet/quantization/ptq/fx
16+
pip install -r requirements.txt
17+
```
18+
## 2. Preprocess Dataset
19+
```shell
20+
# download BraTS 2019 from https://www.med.upenn.edu/cbica/brats2019/data.html
21+
export DOWNLOAD_DATA_DIR=<path/to/MICCAI_BraTS_2019_Data_Training> # point to location of downloaded BraTS 2019 Training dataset.
22+
23+
# install dependency required by data preprocessing script
24+
git clone https://github.com/MIC-DKFZ/nnUNet.git --recursive
25+
cd nnUNet/
26+
git checkout b38c69b345b2f60cd0d053039669e8f988b0c0af
27+
# replace sklearn in the older version with scikit-learn
28+
sed -i 's/sklearn/scikit-learn/g' setup.py
29+
python setup.py install
30+
cd ..
31+
32+
# download pytorch model
33+
make download_pytorch_model
34+
35+
# generate preprocessed data
36+
make preprocess_data
37+
38+
# create postprocess dir
39+
make mkdir_postprocessed_data
40+
41+
# generate calibration preprocessed data
42+
python preprocess.py --preprocessed_data_dir=./build/calib_preprocess/ --validation_fold_file=./brats_cal_images_list.txt
43+
44+
# install mlperf loadgen required by tuning script
45+
git clone https://github.com/mlcommons/inference.git --recursive
46+
cd inference
47+
git checkout b7e8f0da170a421161410d18e5d2a05d75d6bccf
48+
cd loadgen
49+
pip install absl-py
50+
python setup.py install
51+
cd ../..
52+
```
53+
54+
# Run
55+
## 1. Quantization
56+
57+
```shell
58+
make run_pytorch_NC_tuning
59+
```
60+
61+
or
62+
63+
```shell
64+
python run.py --model_dir=build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1 --backend=pytorch --accuracy --preprocessed_data_dir=build/preprocessed_data/ --mlperf_conf=./mlperf.conf --tune
65+
```
66+
## 2. Benchmark
67+
```bash
68+
# int8
69+
sh run_benchmark.sh --int8=true --input_model=build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1 --dataset_location=build/preprocessed_data/
70+
# fp32
71+
sh run_benchmark.sh --input_model=build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1 --dataset_location=build/preprocessed_data/
72+
```
73+
## 3. Model Baseline
74+
| model | framework | accuracy | dataset | model link | model source | precision |
75+
| - | - | - | - | - | - | - |
76+
| 3D-Unet | PyTorch | **mean = 0.85300** (whole tumor = 0.9141, tumor core = 0.8679, enhancing tumor = 0.7770) | [Fold 1](folds/fold1_validation.txt) of [BraTS 2019](https://www.med.upenn.edu/cbica/brats2019/data.html) Training Dataset | [from zenodo](https://zenodo.org/record/3904106) | Trained in PyTorch using codes from[nnUnet](https://github.com/MIC-DKFZ/nnUNet) on [Fold 0](folds/fold0_validation.txt), [Fold 2](folds/fold2_validation.txt), [Fold 3](folds/fold3_validation.txt), and [Fold 4](folds/fold4_validation.txt) of [BraTS 2019](https://www.med.upenn.edu/cbica/brats2019/data.html) Training Dataset. | fp32 |

0 commit comments

Comments
 (0)