Skip to content

Commit 23e2291

Browse files
committed
replace UT DATASETS
Signed-off-by: changwa1 <[email protected]>
1 parent 05f4b99 commit 23e2291

27 files changed

+98
-97
lines changed

neural_compressor/experimental/data/transforms/imagenet_transform.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333

3434
import numpy as np
3535
from neural_compressor.utils.utility import LazyImport
36+
from neural_compressor.utils import logger
3637
from .transform import transform_registry, BaseTransform
3738
tf = LazyImport('tensorflow')
3839
cv2 = LazyImport('cv2')

test/adaptor/onnxrt_adaptor/test_adaptor_onnxrt.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from onnx import onnx_pb as onnx_proto
1111
from onnx import helper, TensorProto, numpy_helper
1212
from neural_compressor.adaptor import FRAMEWORKS
13-
from neural_compressor.data import DATASETS, DATALOADERS
13+
from neural_compressor.data import Datasets, DATALOADERS
1414
from neural_compressor.experimental import Quantization, common
1515
from neural_compressor.experimental import Benchmark, common
1616
from neural_compressor import options
@@ -529,7 +529,7 @@ def build_gemm_model():
529529
def build_benchmark():
530530
seq = '''
531531
from neural_compressor.experimental import Benchmark
532-
from neural_compressor.data import DATASETS, DATALOADERS
532+
from neural_compressor.data import Datasets, DATALOADERS
533533
from neural_compressor import conf
534534
from onnx import onnx_pb as onnx_proto
535535
from onnx import helper, TensorProto, numpy_helper
@@ -555,7 +555,7 @@ def reverse_matrix(x):
555555
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
556556
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
557557
558-
datasets = DATASETS('onnxrt_qlinearops')
558+
datasets = Datasets('onnxrt_qlinearops')
559559
ext_dataset = datasets['dummy'](shape=(10, 2), low=0., high=1., label=True)
560560
ext_dataloader = DATALOADERS['onnxrt_qlinearops'](ext_dataset)
561561
@@ -590,26 +590,26 @@ class TestAdaptorONNXRT(unittest.TestCase):
590590
rn50_export_path = "rn50.onnx"
591591
rn50_model = torchvision.models.resnet50()
592592

593-
datasets = DATASETS('onnxrt_qlinearops')
593+
datasets = Datasets('onnxrt_qlinearops')
594594
cv_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0., high=1., label=True)
595595
cv_dataloader = DATALOADERS['onnxrt_qlinearops'](cv_dataset)
596596

597597
ir3_dataset = datasets['dummy'](shape=(10, 2048), low=0., high=1., label=True)
598598
ir3_dataloader = DATALOADERS['onnxrt_qlinearops'](ir3_dataset)
599599

600-
gather_dataset = DATASETS('onnxrt_qlinearops')['dummy'](shape=(5, 100, 4), label=True)
600+
gather_dataset = Datasets('onnxrt_qlinearops')['dummy'](shape=(5, 100, 4), label=True)
601601
gather_dataloader = DATALOADERS['onnxrt_qlinearops'](gather_dataset)
602602

603603
ext_dataset = datasets['dummy'](shape=(10, 2), low=0., high=1., label=True)
604604
ext_dataloader = DATALOADERS['onnxrt_qlinearops'](ext_dataset)
605605

606-
rename_dataset = DATASETS('onnxrt_qlinearops')['dummy'](shape=(5, 1, 200), label=True)
606+
rename_dataset = Datasets('onnxrt_qlinearops')['dummy'](shape=(5, 1, 200), label=True)
607607
rename_dataloader = DATALOADERS['onnxrt_qlinearops'](rename_dataset)
608608

609609
matmul_dataset = MatmulDataset()
610610
matmul_dataloader = DATALOADERS['onnxrt_qlinearops'](matmul_dataset)
611611

612-
conv_dataset = DATASETS('onnxrt_qlinearops')['dummy'](shape=(10, 3, 1, 3), label=True)
612+
conv_dataset = Datasets('onnxrt_qlinearops')['dummy'](shape=(10, 3, 1, 3), label=True)
613613
conv_dataloader = DATALOADERS['onnxrt_qlinearops'](conv_dataset)
614614

615615
@classmethod

test/adaptor/onnxrt_adaptor/test_onnxrt_augment.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from neural_compressor.experimental.data.datasets.dataset import Dataset
1212
from neural_compressor.adaptor.ox_utils.calibration import ONNXRTAugment
1313
from neural_compressor.model.onnx_model import ONNXModel
14-
from neural_compressor.data import DATASETS, DATALOADERS
14+
from neural_compressor.data import Datasets, DATALOADERS
1515

1616
def generate_input_initializer(tensor_shape, tensor_dtype, input_name):
1717
'''
@@ -55,7 +55,7 @@ def create_nlp_session():
5555
node = onnx.helper.make_node('Gather', ['D', 'B'], ['C'], name='gather')
5656
graph = helper.make_graph([squeeze, node], 'test_graph_1', [A], [C], [B_init])
5757
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
58-
datasets = DATASETS('onnxrt_qlinearops')
58+
datasets = Datasets('onnxrt_qlinearops')
5959
dataset = datasets['dummy_v2'](input_shape=(100, 4), label_shape=(1,))
6060

6161
dataloader = DATALOADERS['onnxrt_qlinearops'](dataset)

test/adaptor/pytorch_adaptor/test_adaptor_pytorch_2.x.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,11 @@
1010
import os
1111
from neural_compressor import PostTrainingQuantConfig, QuantizationAwareTrainingConfig
1212
from neural_compressor.config import set_tensorboard, set_workspace
13-
from neural_compressor.data import DATASETS, DATALOADERS
13+
from neural_compressor.data import Datasets, DATALOADERS
1414
from neural_compressor.adaptor import FRAMEWORKS
1515
from neural_compressor.model import MODELS
1616
from neural_compressor.experimental import Quantization, common
17-
from neural_compressor.experimental.data.datasets.dataset import DATASETS
17+
from neural_compressor.experimental.data.datasets.dataset import Datasets
1818
from neural_compressor import quantization
1919
from neural_compressor.training import prepare_compression
2020
from neural_compressor.utils.pytorch import load
@@ -419,7 +419,7 @@ def test_quantization_new_API(self):
419419
compression_manager = prepare_compression(copy.deepcopy(model), quant_conf)
420420
q_model = train_func(compression_manager, compression_manager.model)
421421
else:
422-
dataset = DATASETS("pytorch")["dummy"]((100, 3, 224, 224))
422+
dataset = Datasets("pytorch")["dummy"]((100, 3, 224, 224))
423423
dataloader = DATALOADERS["pytorch"](dataset)
424424
if fake_yaml == "dynamic":
425425
quant_conf = PostTrainingQuantConfig(approach="dynamic",
@@ -449,7 +449,7 @@ def eval_func(model):
449449
# run fx_quant in neural_compressor and save the quantized GraphModule
450450
quant_conf = PostTrainingQuantConfig(approach="auto")
451451
set_workspace("./saved")
452-
dataset = DATASETS("pytorch")["dummy"]((100, 3, 224, 224))
452+
dataset = Datasets("pytorch")["dummy"]((100, 3, 224, 224))
453453
dataloader = common.DataLoader(dataset)
454454
q_model = quantization.fit(model_origin,
455455
quant_conf,
@@ -469,7 +469,7 @@ def test_tensorboard(self):
469469
quant_conf = PostTrainingQuantConfig(approach="static",
470470
backend="pytorch")
471471
set_tensorboard(True)
472-
dataset = DATASETS("pytorch")["dummy"]((100, 3, 224, 224))
472+
dataset = Datasets("pytorch")["dummy"]((100, 3, 224, 224))
473473
dataloader = common.DataLoader(dataset)
474474
quantization.fit(
475475
model, quant_conf, calib_dataloader=dataloader, eval_func=eval_func
@@ -493,7 +493,7 @@ def tearDownClass(self):
493493
def test_fx_quant(self):
494494
for fake_yaml in ["qat", "static"]:
495495
model_origin = resnet18()
496-
dataset = DATASETS("pytorch")["dummy"]((10, 3, 224, 224), label=True)
496+
dataset = Datasets("pytorch")["dummy"]((10, 3, 224, 224), label=True)
497497
dataloader = DATALOADERS["pytorch"](dataset)
498498
if fake_yaml == "qat":
499499
conf = QuantizationAwareTrainingConfig(
@@ -526,7 +526,7 @@ def test_fx_quant(self):
526526
for fake_yaml in ["qat", "static"]:
527527
model_origin = M()
528528
# run fx_quant in neural_compressor and save the quantized GraphModule
529-
dataset = DATASETS("pytorch")["dummy"]((100, 3, 224, 224), label=True)
529+
dataset = Datasets("pytorch")["dummy"]((100, 3, 224, 224), label=True)
530530
dataloader = DATALOADERS["pytorch"](dataset)
531531
if fake_yaml == "qat":
532532
conf = QuantizationAwareTrainingConfig(
@@ -600,7 +600,7 @@ def eval_func(model):
600600
nhid = 256,
601601
nlayers = 2,
602602
)
603-
dataset = DATASETS("pytorch")["dummy"]((3, 10))
603+
dataset = Datasets("pytorch")["dummy"]((3, 10))
604604
dataloader = DATALOADERS["pytorch"](dataset)
605605
# run fx_quant in neural_compressor and save the quantized GraphModule
606606
if fake_yaml == "qat":
@@ -622,7 +622,7 @@ def eval_func(model):
622622
def test_fx_sub_module_quant(self):
623623
for fake_yaml in ["qat", "static"]:
624624
model_origin = DynamicControlModel()
625-
dataset = DATASETS("pytorch")["dummy"]((1, 3, 224, 224))
625+
dataset = Datasets("pytorch")["dummy"]((1, 3, 224, 224))
626626
dataloader = DATALOADERS["pytorch"](dataset)
627627
# run fx_quant in neural_compressor and save the quantized GraphModule
628628
if fake_yaml == "qat":
@@ -658,7 +658,7 @@ def test_fx_sub_module_quant(self):
658658
def test_mix_precision(self):
659659
model_origin = DynamicControlModel()
660660
# run fx_quant in neural_compressor and save the quantized GraphModule
661-
dataset = DATASETS("pytorch")["dummy"]((100, 3, 224, 224))
661+
dataset = Datasets("pytorch")["dummy"]((100, 3, 224, 224))
662662
dataloader = DATALOADERS["pytorch"](dataset)
663663
set_workspace=("./saved")
664664
conf = PostTrainingQuantConfig(op_name_list=ptq_fx_op_name_list, backend="pytorch_fx")

test/adaptor/pytorch_adaptor/test_torch2onnx.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import neural_compressor.adaptor.pytorch as nc_torch
1010
from neural_compressor import quantization
1111
from neural_compressor.config import PostTrainingQuantConfig
12-
from neural_compressor.experimental.data.datasets.dataset import DATASETS
12+
from neural_compressor.experimental.data.datasets.dataset import Datasets
1313
from packaging.version import Version
1414
from torch.quantization import QuantStub, DeQuantStub
1515

@@ -212,7 +212,7 @@ def test_fx_quant(self):
212212
approach=fake_yaml,
213213
backend="pytorch_fx"
214214
)
215-
dataset = DATASETS("pytorch")['dummy']((100, 3, 224, 224))
215+
dataset = Datasets("pytorch")['dummy']((100, 3, 224, 224))
216216
dataloader = torch.utils.data.DataLoader(dataset)
217217
q_model = quantization.fit(model,
218218
conf,

test/benchmark/test_benchmark.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ def build_benchmark():
4545
arg_parser = ArgumentParser(description='Parse args')
4646
arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input odel')
4747
args = arg_parser.parse_args()
48-
from neural_compressor.data import DATASETS
49-
dataset = DATASETS('tensorflow')['dummy']((100, 32, 32, 1), label=True)
48+
from neural_compressor.data import Datasets
49+
dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True)
5050
from neural_compressor.experimental import Benchmark, common
5151
from neural_compressor.conf.config import BenchmarkConf
5252
benchmarker = Benchmark('fake_yaml.yaml')
@@ -60,8 +60,8 @@ def build_benchmark():
6060
arg_parser = ArgumentParser(description='Parse args')
6161
arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input odel')
6262
args = arg_parser.parse_args()
63-
from neural_compressor.data import DATASETS
64-
dataset = DATASETS('tensorflow')['dummy']((100, 32, 32, 1), label=True)
63+
from neural_compressor.data import Datasets
64+
dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True)
6565
from neural_compressor.experimental import Benchmark, common
6666
from neural_compressor.conf.config import BenchmarkConf
6767
conf = BenchmarkConf('fake_yaml.yaml')
@@ -94,8 +94,8 @@ def build_benchmark2():
9494
"arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input model')\n",
9595
"args = arg_parser.parse_args()\n",
9696

97-
"from neural_compressor.data import DATASETS\n",
98-
"dataset = DATASETS('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n",
97+
"from neural_compressor.data import Datasets\n",
98+
"dataset = Datasets('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n",
9999

100100
"from neural_compressor.experimental import Benchmark, common\n",
101101
"benchmarker = Benchmark()\n",

test/benchmark/test_benchmark_2.x.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@ def build_benchmark():
1818
args = arg_parser.parse_args()
1919
from neural_compressor.benchmark import fit
2020
from neural_compressor.config import BenchmarkConfig
21-
from neural_compressor.data import DATASETS
21+
from neural_compressor.data import Datasets
2222
from neural_compressor.experimental import common
23-
dataset = DATASETS('tensorflow')['dummy']((100, 32, 32, 1), label=True)
23+
dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True)
2424
b_dataloader = common.DataLoader(dataset, batch_size=10)
2525
conf = BenchmarkConfig(warmup=5, iteration=10, cores_per_instance=4, num_of_instance=2)
2626
fit(args.input_model, conf, b_dataloader=b_dataloader)
@@ -33,8 +33,8 @@ def build_benchmark():
3333
args = arg_parser.parse_args()
3434
from neural_compressor.benchmark import fit
3535
from neural_compressor.config import BenchmarkConfig
36-
from neural_compressor.data import DATASETS
37-
dataset = DATASETS('tensorflow')['dummy']((100, 32, 32, 1), label=True)
36+
from neural_compressor.data import Datasets
37+
dataset = Datasets('tensorflow')['dummy']((100, 32, 32, 1), label=True)
3838
from neural_compressor.experimental import common
3939
conf = BenchmarkConfig(warmup=5, iteration=10, cores_per_instance=4, num_of_instance=2)
4040
b_dataloader = common.DataLoader(dataset, batch_size=10)
@@ -64,8 +64,8 @@ def build_benchmark2():
6464
"arg_parser.add_argument('--input_model', dest='input_model', default='input_model', help='input model')\n",
6565
"args = arg_parser.parse_args()\n",
6666
"from neural_compressor.benchmark import fit\n"
67-
"from neural_compressor.data import DATASETS\n",
68-
"dataset = DATASETS('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n",
67+
"from neural_compressor.data import Datasets\n",
68+
"dataset = Datasets('tensorflow')['dummy']((5, 32, 32, 1), label=True)\n",
6969

7070
"from neural_compressor.experimental import common\n",
7171
"b_dataloader = common.DataLoader(dataset)\n",

test/config/test_pythonic_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from torch import nn
2020

2121
from neural_compressor.conf.pythonic_config import OpQuantConf, ActivationConf, WeightConf
22-
from neural_compressor.data import DATASETS
22+
from neural_compressor.data import Datasets
2323
from neural_compressor.experimental import Quantization, Distillation, Pruning, NAS, common
2424
from neural_compressor.experimental.data.dataloaders.pytorch_dataloader import PyTorchDataLoader
2525
from neural_compressor.adaptor import FRAMEWORKS
@@ -202,7 +202,7 @@ def test_distillation(self):
202202
distiller.teacher_model = ConvNet(16, 32)
203203

204204
# Customized train, evaluation
205-
datasets = DATASETS('pytorch')
205+
datasets = Datasets('pytorch')
206206
dummy_dataset = datasets['dummy'](shape=(32, 3, 64, 64), low=0., high=1., label=True)
207207
dummy_dataloader = PyTorchDataLoader(dummy_dataset)
208208
def train_func(model):
@@ -245,7 +245,7 @@ def test_pruning(self):
245245
prune.model = model
246246

247247
# Customized train, evaluation
248-
datasets = DATASETS('pytorch')
248+
datasets = Datasets('pytorch')
249249
dummy_dataset = datasets['dummy'](shape=(32, 3, 64, 64), low=0., high=1., label=True)
250250
dummy_dataloader = PyTorchDataLoader(dummy_dataset)
251251
def train_func(model):

test/data/test_dataloader.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import shutil
77
from neural_compressor.utils.create_obj_from_config import create_dataset, create_dataloader
88
from neural_compressor.data.dataloaders.dataloader import DataLoader
9-
from neural_compressor.data import DATASETS, DATALOADERS, TRANSFORMS
9+
from neural_compressor.data import Datasets, DATALOADERS, TRANSFORMS
1010
from PIL import Image
1111

1212
class TestBuiltinDataloader(unittest.TestCase):
@@ -1069,7 +1069,7 @@ def test_pytorch_bert_dataset(self):
10691069
self.assertEqual(5, len(ds[0][0]))
10701070

10711071
def test_tensorflow_dummy(self):
1072-
datasets = DATASETS('tensorflow')
1072+
datasets = Datasets('tensorflow')
10731073
dataset = datasets['dummy'](shape=(4, 256, 256, 3))
10741074

10751075
data_loader = DATALOADERS['tensorflow'](dataset)
@@ -1092,7 +1092,7 @@ def test_tensorflow_dummy(self):
10921092
dataset = datasets['dummy'](shape=(4, 256, 256, 3), dtype=['float32', 'int8'])
10931093

10941094
def test_tensorflow_dummy_v2(self):
1095-
datasets = DATASETS('tensorflow')
1095+
datasets = Datasets('tensorflow')
10961096
# test with label
10971097
dataset = datasets['dummy_v2'](\
10981098
input_shape=(256, 256, 3), label_shape=(1,))
@@ -1131,7 +1131,7 @@ def test_tensorflow_dummy_v2(self):
11311131
input_shape=(256, 256, 3), dtype=['float32', 'int8'])
11321132

11331133
def test_tensorflow_sparse_dummy_v2(self):
1134-
datasets = DATASETS('tensorflow')
1134+
datasets = Datasets('tensorflow')
11351135
# test with label
11361136
dataset = datasets['sparse_dummy_v2'](\
11371137
dense_shape=[[10, 20], [5, 3]], label_shape=[[1]], sparse_ratio=[0.98, 0.8])
@@ -1184,7 +1184,7 @@ def test_style_transfer_dataset(self):
11841184
im = Image.fromarray(random_array)
11851185
im.save('test.jpg')
11861186

1187-
datasets = DATASETS('tensorflow')
1187+
datasets = Datasets('tensorflow')
11881188
dataset = datasets['style_transfer'](content_folder='./', style_folder='./')
11891189
length = len(dataset)
11901190
image, label = dataset[0]
@@ -1223,7 +1223,7 @@ def test_tensorflow_list_dict(self):
12231223
# self.assertEqual(data[0][1], 2)
12241224

12251225
def test_pytorch_dummy(self):
1226-
datasets = DATASETS('pytorch')
1226+
datasets = Datasets('pytorch')
12271227
transform = TRANSFORMS('pytorch', 'preprocess')['Resize'](**{'size':100})
12281228
dataset = datasets['dummy'](shape=[(4, 256, 256, 3), (4, 1)], \
12291229
high=[10., 10.], low=[0., 0.], transform=transform)
@@ -1240,7 +1240,7 @@ def test_pytorch_dummy(self):
12401240

12411241
@unittest.skipIf(platform.system().lower() == "windows", "not support mxnet on windows yet")
12421242
def test_mxnet_dummy(self):
1243-
datasets = DATASETS('mxnet')
1243+
datasets = Datasets('mxnet')
12441244
transform = TRANSFORMS('mxnet', 'preprocess')['Resize'](**{'size':100})
12451245
dataset = datasets['dummy'](shape=(4, 256, 256, 3), transform=transform)
12461246

@@ -1258,7 +1258,7 @@ def test_mxnet_dummy(self):
12581258
self.assertEqual(dataset[0][1], 0)
12591259

12601260
def test_onnxrt_qlinear_dummy(self):
1261-
datasets = DATASETS('onnxrt_qlinearops')
1261+
datasets = Datasets('onnxrt_qlinearops')
12621262
transform = TRANSFORMS('onnxrt_qlinearops', 'preprocess')['Resize'](**{'size':100})
12631263
dataset = datasets['dummy'](shape=(4, 256, 256, 3), transform=transform)
12641264

@@ -1283,7 +1283,7 @@ def test_onnxrt_qlinear_dummy(self):
12831283
shape=[(4, 256, 256, 3), (4, 256, 256, 3)], dtype=['float32', 'int8', 'int8'])
12841284

12851285
def test_onnx_integer_dummy(self):
1286-
datasets = DATASETS('onnxrt_integerops')
1286+
datasets = Datasets('onnxrt_integerops')
12871287
dataset = datasets['dummy'](shape=(4, 256, 256, 3))
12881288

12891289
data_loader = DATALOADERS['onnxrt_integerops'](dataset)
@@ -1321,7 +1321,7 @@ def test_onnx_bert(self):
13211321
tsv_w.writerow(['Quality', '#1 ID', '#2 ID', '#1 String', '#2 String'])
13221322
tsv_w.writerow(['1', '702876', '702977', """Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .""", """Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence ."""])
13231323

1324-
datasets = DATASETS('onnxrt_integerops')
1324+
datasets = Datasets('onnxrt_integerops')
13251325
args = {'GLUE':
13261326
{'data_dir': './MRPC',
13271327
'model_name_or_path': 'bert-base-uncased',

test/data/test_filter.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import json
55
import shutil
66
from PIL import Image
7-
from neural_compressor.data import FILTERS, TRANSFORMS, DATASETS, DATALOADERS
7+
from neural_compressor.data import FILTERS, TRANSFORMS, Datasets, DATALOADERS
88
from neural_compressor.utils.create_obj_from_config import create_dataset, get_preprocess, create_dataloader
99

1010
import tensorflow as tf
@@ -60,7 +60,7 @@ def testLabelBalanceCOCORecord(self):
6060
preprocesses = TRANSFORMS('tensorflow', 'preprocess')
6161
filters = FILTERS('tensorflow')
6262
filter = filters['LabelBalanceCOCORecord'](2)
63-
datasets = DATASETS('tensorflow')
63+
datasets = Datasets('tensorflow')
6464
dataset = datasets['COCORecord']('test.record', \
6565
transform=None, filter=filter)
6666
dataloader = DATALOADERS['tensorflow'](dataset=dataset, batch_size=1)
@@ -146,7 +146,7 @@ def testLabelBalanceCOCORaw(self):
146146

147147
filters = FILTERS('onnxrt_qlinearops')
148148
filter = filters['LabelBalanceCOCORaw'](1)
149-
datasets = DATASETS('onnxrt_qlinearops')
149+
datasets = Datasets('onnxrt_qlinearops')
150150
dataset = datasets['COCORaw']('./', transform=None, filter=filter)
151151
dataloader = DATALOADERS['onnxrt_qlinearops'](dataset=dataset, batch_size=1)
152152
for (inputs, labels) in dataloader:

0 commit comments

Comments
 (0)