Skip to content

Commit c4e3520

Browse files
committed
replace UT DATASETS
Signed-off-by: changwa1 <[email protected]>
1 parent 929fa7a commit c4e3520

27 files changed

+95
-94
lines changed

neural_compressor/experimental/data/transforms/imagenet_transform.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333

3434
import numpy as np
3535
from neural_compressor.utils.utility import LazyImport
36+
from neural_compressor.utils import logger
3637
from .transform import transform_registry, BaseTransform
3738
tf = LazyImport('tensorflow')
3839
cv2 = LazyImport('cv2')

test/adaptor/onnxrt_adaptor/test_adaptor_onnxrt.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from onnx import onnx_pb as onnx_proto
1111
from onnx import helper, TensorProto, numpy_helper
1212
from neural_compressor.adaptor import FRAMEWORKS
13-
from neural_compressor.data import DATASETS, DATALOADERS
13+
from neural_compressor.data import Datasets, DATALOADERS
1414
from neural_compressor.experimental import Quantization, common
1515
from neural_compressor.experimental import Benchmark, common
1616
from neural_compressor import options
@@ -529,7 +529,7 @@ def build_gemm_model():
529529
def build_benchmark():
530530
seq = '''
531531
from neural_compressor.experimental import Benchmark
532-
from neural_compressor.data import DATASETS, DATALOADERS
532+
from neural_compressor.data import Datasets, DATALOADERS
533533
from neural_compressor import conf
534534
from onnx import onnx_pb as onnx_proto
535535
from onnx import helper, TensorProto, numpy_helper
@@ -555,7 +555,7 @@ def reverse_matrix(x):
555555
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
556556
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
557557
558-
datasets = DATASETS('onnxrt_qlinearops')
558+
datasets = Datasets('onnxrt_qlinearops')
559559
ext_dataset = datasets['dummy'](shape=(10, 2), low=0., high=1., label=True)
560560
ext_dataloader = DATALOADERS['onnxrt_qlinearops'](ext_dataset)
561561
@@ -590,26 +590,26 @@ class TestAdaptorONNXRT(unittest.TestCase):
590590
rn50_export_path = "rn50.onnx"
591591
rn50_model = torchvision.models.resnet50()
592592

593-
datasets = DATASETS('onnxrt_qlinearops')
593+
datasets = Datasets('onnxrt_qlinearops')
594594
cv_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True)
595595
cv_dataloader = DATALOADERS['onnxrt_qlinearops'](cv_dataset)
596596

597597
ir3_dataset = datasets['dummy'](shape=(10, 2048), low=0., high=1., label=True)
598598
ir3_dataloader = DATALOADERS['onnxrt_qlinearops'](ir3_dataset)
599599

600-
gather_dataset = DATASETS('onnxrt_qlinearops')['dummy'](shape=(5, 100, 4), label=True)
600+
gather_dataset = Datasets('onnxrt_qlinearops')['dummy'](shape=(5, 100, 4), label=True)
601601
gather_dataloader = DATALOADERS['onnxrt_qlinearops'](gather_dataset)
602602

603603
ext_dataset = datasets['dummy'](shape=(10, 2), low=0., high=1., label=True)
604604
ext_dataloader = DATALOADERS['onnxrt_qlinearops'](ext_dataset)
605605

606-
rename_dataset = DATASETS('onnxrt_qlinearops')['dummy'](shape=(5, 1, 200), label=True)
606+
rename_dataset = Datasets('onnxrt_qlinearops')['dummy'](shape=(5, 1, 200), label=True)
607607
rename_dataloader = DATALOADERS['onnxrt_qlinearops'](rename_dataset)
608608

609609
matmul_dataset = MatmulDataset()
610610
matmul_dataloader = DATALOADERS['onnxrt_qlinearops'](matmul_dataset)
611611

612-
conv_dataset = DATASETS('onnxrt_qlinearops')['dummy'](shape=(10, 3, 1, 3), label=True)
612+
conv_dataset = Datasets('onnxrt_qlinearops')['dummy'](shape=(10, 3, 1, 3), label=True)
613613
conv_dataloader = DATALOADERS['onnxrt_qlinearops'](conv_dataset)
614614

615615
@classmethod

test/adaptor/onnxrt_adaptor/test_onnxrt_augment.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from neural_compressor.experimental.data.datasets.dataset import Dataset
1212
from neural_compressor.adaptor.ox_utils.calibration import ONNXRTAugment
1313
from neural_compressor.model.onnx_model import ONNXModel
14-
from neural_compressor.data import DATASETS, DATALOADERS
14+
from neural_compressor.data import Datasets, DATALOADERS
1515

1616
def generate_input_initializer(tensor_shape, tensor_dtype, input_name):
1717
'''
@@ -55,7 +55,7 @@ def create_nlp_session():
5555
node = onnx.helper.make_node('Gather', ['D', 'B'], ['C'], name='gather')
5656
graph = helper.make_graph([squeeze, node], 'test_graph_1', [A], [C], [B_init])
5757
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
58-
datasets = DATASETS('onnxrt_qlinearops')
58+
datasets = Datasets('onnxrt_qlinearops')
5959
dataset = datasets['dummy_v2'](input_shape=(100, 4), label_shape=(1,))
6060

6161
dataloader = DATALOADERS['onnxrt_qlinearops'](dataset)

test/adaptor/pytorch_adaptor/test_adaptor_pytorch_2.x.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
import unittest
88
import os
99
from neural_compressor import PostTrainingQuantConfig, QuantizationAwareTrainingConfig, set_workspace
10-
from neural_compressor.data import DATASETS, DATALOADERS
11-
from neural_compressor.experimental.data.datasets.dataset import DATASETS
10+
from neural_compressor.data import Datasets, DATALOADERS
11+
from neural_compressor.experimental.data.datasets.dataset import Datasets
1212
from neural_compressor import quantization
1313
from neural_compressor.training import prepare_compression
1414
from neural_compressor.utils.pytorch import load
@@ -307,7 +307,7 @@ def tearDownClass(self):
307307
def test_fx_quant(self):
308308
for approach in ["qat", "static"]:
309309
model_origin = resnet18()
310-
dataset = DATASETS("pytorch")["dummy"]((10, 3, 224, 224), label=True)
310+
dataset = Datasets("pytorch")["dummy"]((10, 3, 224, 224), label=True)
311311
dataloader = DATALOADERS["pytorch"](dataset)
312312
if approach == "qat":
313313
model = copy.deepcopy(model_origin)
@@ -343,7 +343,7 @@ def test_fx_quant(self):
343343
for approach in ["qat", "static"]:
344344
model_origin = M()
345345
# run fx_quant in neural_compressor and save the quantized GraphModule
346-
dataset = DATASETS("pytorch")["dummy"]((100, 3, 224, 224), label=True)
346+
dataset = Datasets("pytorch")["dummy"]((100, 3, 224, 224), label=True)
347347
dataloader = DATALOADERS["pytorch"](dataset)
348348
if approach == "qat":
349349
model = copy.deepcopy(model_origin)
@@ -419,7 +419,7 @@ def eval_func(model):
419419
nhid = 256,
420420
nlayers = 2,
421421
)
422-
dataset = DATASETS("pytorch")["dummy"]((3, 10))
422+
dataset = Datasets("pytorch")["dummy"]((3, 10))
423423
dataloader = DATALOADERS["pytorch"](dataset)
424424
# run fx_quant in neural_compressor and save the quantized GraphModule
425425
if approach == "qat":
@@ -445,7 +445,7 @@ def eval_func(model):
445445
def test_fx_sub_module_quant(self):
446446
for approach in ["qat", "static"]:
447447
model_origin = DynamicControlModel()
448-
dataset = DATASETS("pytorch")["dummy"]((1, 3, 224, 224))
448+
dataset = Datasets("pytorch")["dummy"]((1, 3, 224, 224))
449449
dataloader = DATALOADERS["pytorch"](dataset)
450450
# run fx_quant in neural_compressor and save the quantized GraphModule
451451
if approach == "qat":
@@ -484,7 +484,7 @@ def test_fx_sub_module_quant(self):
484484
def test_mix_precision(self):
485485
model_origin = DynamicControlModel()
486486
# run fx_quant in neural_compressor and save the quantized GraphModule
487-
dataset = DATASETS("pytorch")["dummy"]((100, 3, 224, 224))
487+
dataset = Datasets("pytorch")["dummy"]((100, 3, 224, 224))
488488
dataloader = DATALOADERS["pytorch"](dataset)
489489
set_workspace=("./saved")
490490
conf = PostTrainingQuantConfig(op_name_list=ptq_fx_op_name_list)

test/adaptor/pytorch_adaptor/test_torch2onnx.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import neural_compressor.adaptor.pytorch as nc_torch
1010
from neural_compressor import quantization
1111
from neural_compressor.config import PostTrainingQuantConfig
12-
from neural_compressor.experimental.data.datasets.dataset import DATASETS
12+
from neural_compressor.experimental.data.datasets.dataset import Datasets
1313
from packaging.version import Version
1414
from torch.quantization import QuantStub, DeQuantStub
1515

@@ -209,7 +209,7 @@ def test_fx_quant(self):
209209
model = DynamicControlModel()
210210
# run fx_quant in neural_compressor and save the quantized GraphModule
211211
conf = PostTrainingQuantConfig(approach=approach)
212-
dataset = DATASETS("pytorch")['dummy']((100, 3, 224, 224))
212+
dataset = Datasets("pytorch")['dummy']((100, 3, 224, 224))
213213
dataloader = torch.utils.data.DataLoader(dataset)
214214
q_model = quantization.fit(model,
215215
conf,

test/benchmark/test_benchmark.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ def build_benchmark():
4545
arg_parser = ArgumentParser(description='Parse args')
4646
arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input odel')
4747
args = arg_parser.parse_args()
48-
from neural_compressor.data import DATASETS
49-
dataset = DATASETS('tensorflow')['dummy']((100, 32, 32, 1), label=True)
48+
from neural_compressor.data import Datasets
49+
dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True)
5050
from neural_compressor.experimental import Benchmark, common
5151
from neural_compressor.conf.config import BenchmarkConf
5252
benchmarker = Benchmark('fake_yaml.yaml')
@@ -60,8 +60,8 @@ def build_benchmark():
6060
arg_parser = ArgumentParser(description='Parse args')
6161
arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input odel')
6262
args = arg_parser.parse_args()
63-
from neural_compressor.data import DATASETS
64-
dataset = DATASETS('tensorflow')['dummy']((100, 32, 32, 1), label=True)
63+
from neural_compressor.data import Datasets
64+
dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True)
6565
from neural_compressor.experimental import Benchmark, common
6666
from neural_compressor.conf.config import BenchmarkConf
6767
conf = BenchmarkConf('fake_yaml.yaml')
@@ -94,8 +94,8 @@ def build_benchmark2():
9494
"arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input model')\n",
9595
"args = arg_parser.parse_args()\n",
9696

97-
"from neural_compressor.data import DATASETS\n",
98-
"dataset = DATASETS('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n",
97+
"from neural_compressor.data import Datasets\n",
98+
"dataset = Datasets('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n",
9999

100100
"from neural_compressor.experimental import Benchmark, common\n",
101101
"benchmarker = Benchmark()\n",

test/benchmark/test_benchmark_2.x.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@ def build_benchmark():
1717
args = arg_parser.parse_args()
1818
from neural_compressor.benchmark import fit
1919
from neural_compressor.config import BenchmarkConfig
20-
from neural_compressor.data import DATASETS
20+
from neural_compressor.data import Datasets
2121
from neural_compressor.experimental import common
22-
dataset = DATASETS('tensorflow')['dummy']((100, 32, 32, 1), label=True)
22+
dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True)
2323
b_dataloader = common.DataLoader(dataset, batch_size=10)
2424
conf = BenchmarkConfig(warmup=5, iteration=10, cores_per_instance=4, num_of_instance=2)
2525
fit(args.input_model, conf, b_dataloader=b_dataloader)
@@ -32,8 +32,8 @@ def build_benchmark():
3232
args = arg_parser.parse_args()
3333
from neural_compressor.benchmark import fit
3434
from neural_compressor.config import BenchmarkConfig
35-
from neural_compressor.data import DATASETS
36-
dataset = DATASETS('tensorflow')['dummy']((100, 32, 32, 1), label=True)
35+
from neural_compressor.data import Datasets
36+
dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True)
3737
from neural_compressor.experimental import common
3838
conf = BenchmarkConfig(warmup=5, iteration=10, cores_per_instance=4, num_of_instance=2)
3939
b_dataloader = common.DataLoader(dataset, batch_size=10)
@@ -63,8 +63,8 @@ def build_benchmark2():
6363
"arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input model')\n",
6464
"args = arg_parser.parse_args()\n",
6565
"from neural_compressor.benchmark import fit\n"
66-
"from neural_compressor.data import DATASETS\n",
67-
"dataset = DATASETS('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n",
66+
"from neural_compressor.data import Datasets\n",
67+
"dataset = Datasets('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n",
6868

6969
"from neural_compressor.experimental import common\n",
7070
"b_dataloader = common.DataLoader(dataset)\n",

test/config/test_pythonic_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from torch import nn
2020

2121
from neural_compressor.conf.pythonic_config import OpQuantConf, ActivationConf, WeightConf
22-
from neural_compressor.data import DATASETS
22+
from neural_compressor.data import Datasets
2323
from neural_compressor.experimental import Quantization, Distillation, Pruning, NAS, common
2424
from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader
2525
from neural_compressor.adaptor import FRAMEWORKS
@@ -199,7 +199,7 @@ def test_distillation(self):
199199
distiller.teacher_model = ConvNet(16, 32)
200200

201201
# Customized train, evaluation
202-
datasets = DATASETS('pytorch')
202+
datasets = Datasets('pytorch')
203203
dummy_dataset = datasets['dummy'](shape=(32, 3, 64, 64), low=0., high=1., label=True)
204204
dummy_dataloader = PyTorchDataLoader(dummy_dataset)
205205
def train_func(model):
@@ -241,7 +241,7 @@ def test_pruning(self):
241241
prune.model = model
242242

243243
# Customized train, evaluation
244-
datasets = DATASETS('pytorch')
244+
datasets = Datasets('pytorch')
245245
dummy_dataset = datasets['dummy'](shape=(32, 3, 64, 64), low=0., high=1., label=True)
246246
dummy_dataloader = PyTorchDataLoader(dummy_dataset)
247247
def train_func(model):

test/data/test_dataloader.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import shutil
77
from neural_compressor.utils.create_obj_from_config import create_dataset, create_dataloader
88
from neural_compressor.data.dataloaders.dataloader import DataLoader
9-
from neural_compressor.data import DATASETS, DATALOADERS, TRANSFORMS
9+
from neural_compressor.data import Datasets, DATALOADERS, TRANSFORMS
1010
from PIL import Image
1111

1212
class TestBuiltinDataloader(unittest.TestCase):
@@ -1069,7 +1069,7 @@ def test_pytorch_bert_dataset(self):
10691069
self.assertEqual(5, len(ds[0][0]))
10701070

10711071
def test_tensorflow_dummy(self):
1072-
datasets = DATASETS('tensorflow')
1072+
datasets = Datasets('tensorflow')
10731073
dataset = datasets['dummy'](shape=(4, 256, 256, 3))
10741074

10751075
data_loader = DATALOADERS['tensorflow'](dataset)
@@ -1092,7 +1092,7 @@ def test_tensorflow_dummy(self):
10921092
dataset = datasets['dummy'](shape=(4, 256, 256, 3), dtype=['float32', 'int8'])
10931093

10941094
def test_tensorflow_dummy_v2(self):
1095-
datasets = DATASETS('tensorflow')
1095+
datasets = Datasets('tensorflow')
10961096
# test with label
10971097
dataset = datasets['dummy_v2'](\
10981098
input_shape=(256, 256, 3), label_shape=(1,))
@@ -1131,7 +1131,7 @@ def test_tensorflow_dummy_v2(self):
11311131
input_shape=(256, 256, 3), dtype=['float32', 'int8'])
11321132

11331133
def test_tensorflow_sparse_dummy_v2(self):
1134-
datasets = DATASETS('tensorflow')
1134+
datasets = Datasets('tensorflow')
11351135
# test with label
11361136
dataset = datasets['sparse_dummy_v2'](\
11371137
dense_shape=[[10, 20], [5, 3]], label_shape=[[1]], sparse_ratio=[0.98, 0.8])
@@ -1184,7 +1184,7 @@ def test_style_transfer_dataset(self):
11841184
im = Image.fromarray(random_array)
11851185
im.save('test.jpg')
11861186

1187-
datasets = DATASETS('tensorflow')
1187+
datasets = Datasets('tensorflow')
11881188
dataset = datasets['style_transfer'](content_folder='./', style_folder='./')
11891189
length = len(dataset)
11901190
image, label = dataset[0]
@@ -1223,7 +1223,7 @@ def test_tensorflow_list_dict(self):
12231223
# self.assertEqual(data[0][1], 2)
12241224

12251225
def test_pytorch_dummy(self):
1226-
datasets = DATASETS('pytorch')
1226+
datasets = Datasets('pytorch')
12271227
transform = TRANSFORMS('pytorch', 'preprocess')['Resize'](**{'size':100})
12281228
dataset = datasets['dummy'](shape=[(4, 256, 256, 3), (4, 1)], \
12291229
high=[10., 10.], low=[0., 0.], transform=transform)
@@ -1240,7 +1240,7 @@ def test_pytorch_dummy(self):
12401240

12411241
@unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows yet")
12421242
def test_mxnet_dummy(self):
1243-
datasets = DATASETS('mxnet')
1243+
datasets = Datasets('mxnet')
12441244
transform = TRANSFORMS('mxnet', 'preprocess')['Resize'](**{'size':100})
12451245
dataset = datasets['dummy'](shape=(4, 256, 256, 3), transform=transform)
12461246

@@ -1258,7 +1258,7 @@ def test_mxnet_dummy(self):
12581258
self.assertEqual(dataset[0][1], 0)
12591259

12601260
def test_onnxrt_qlinear_dummy(self):
1261-
datasets = DATASETS('onnxrt_qlinearops')
1261+
datasets = Datasets('onnxrt_qlinearops')
12621262
transform = TRANSFORMS('onnxrt_qlinearops', 'preprocess')['Resize'](**{'size':100})
12631263
dataset = datasets['dummy'](shape=(4, 256, 256, 3), transform=transform)
12641264

@@ -1283,7 +1283,7 @@ def test_onnxrt_qlinear_dummy(self):
12831283
shape=[(4, 256, 256, 3), (4, 256, 256, 3)], dtype=['float32', 'int8', 'int8'])
12841284

12851285
def test_onnx_integer_dummy(self):
1286-
datasets = DATASETS('onnxrt_integerops')
1286+
datasets = Datasets('onnxrt_integerops')
12871287
dataset = datasets['dummy'](shape=(4, 256, 256, 3))
12881288

12891289
data_loader = DATALOADERS['onnxrt_integerops'](dataset)
@@ -1321,7 +1321,7 @@ def test_onnx_bert(self):
13211321
tsv_w.writerow(['Quality', '#1 ID', '#2 ID', '#1 String', '#2 String'])
13221322
tsv_w.writerow(['1', '702876', '702977', """Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .""", """Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence ."""])
13231323

1324-
datasets = DATASETS('onnxrt_integerops')
1324+
datasets = Datasets('onnxrt_integerops')
13251325
args = {'GLUE':
13261326
{'data_dir': './MRPC',
13271327
'model_name_or_path': 'bert-base-uncased',

test/data/test_filter.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import json
55
import shutil
66
from PIL import Image
7-
from neural_compressor.data import FILTERS, TRANSFORMS, DATASETS, DATALOADERS
7+
from neural_compressor.data import FILTERS, TRANSFORMS, Datasets, DATALOADERS
88
from neural_compressor.utils.create_obj_from_config import create_dataset, get_preprocess, create_dataloader
99

1010
import tensorflow as tf
@@ -60,7 +60,7 @@ def testLabelBalanceCOCORecord(self):
6060
preprocesses = TRANSFORMS('tensorflow', 'preprocess')
6161
filters = FILTERS('tensorflow')
6262
filter = filters['LabelBalanceCOCORecord'](2)
63-
datasets = DATASETS('tensorflow')
63+
datasets = Datasets('tensorflow')
6464
dataset = datasets['COCORecord']('test.record', \
6565
transform=None, filter=filter)
6666
dataloader = DATALOADERS['tensorflow'](dataset=dataset, batch_size=1)
@@ -146,7 +146,7 @@ def testLabelBalanceCOCORaw(self):
146146

147147
filters = FILTERS('onnxrt_qlinearops')
148148
filter = filters['LabelBalanceCOCORaw'](1)
149-
datasets = DATASETS('onnxrt_qlinearops')
149+
datasets = Datasets('onnxrt_qlinearops')
150150
dataset = datasets['COCORaw']('./', transform=None, filter=filter)
151151
dataloader = DATALOADERS['onnxrt_qlinearops'](dataset=dataset, batch_size=1)
152152
for (inputs, labels) in dataloader:

0 commit comments

Comments
 (0)