From 584069e7e2056e76e70ae6f301816987856434a9 Mon Sep 17 00:00:00 2001 From: Virgile Mison Date: Fri, 28 Jan 2022 22:21:44 -0500 Subject: [PATCH 1/5] add test_yelpreviewpolarity to mock YelpReviewP.. --- test/datasets/test_yelpreviewpolarity.py | 85 ++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 test/datasets/test_yelpreviewpolarity.py diff --git a/test/datasets/test_yelpreviewpolarity.py b/test/datasets/test_yelpreviewpolarity.py new file mode 100644 index 0000000000..25429a38b1 --- /dev/null +++ b/test/datasets/test_yelpreviewpolarity.py @@ -0,0 +1,85 @@ +import os +import random +import string +import tarfile +from collections import defaultdict +from unittest.mock import patch + +from parameterized import parameterized +from torchtext.datasets.yelpreviewpolarity import YelpReviewPolarity + +from ..common.case_utils import TempDirMixin, zip_equal +from ..common.torchtext_test_case import TorchtextTestCase + + +def _get_mock_dataset(root_dir): + """ + root_dir: directory to the mocked dataset + """ + base_dir = os.path.join(root_dir, "YelpReviewPolarity") + temp_dataset_dir = os.path.join(base_dir, "yelp_review_polarity_csv") + os.makedirs(temp_dataset_dir, exist_ok=True) + + seed = 1 + mocked_data = defaultdict(list) + for file_name in ["train.csv", "test.csv"]: + csv_file = os.path.join(temp_dataset_dir, file_name) + mocked_lines = mocked_data[os.path.splitext(file_name)[0]] + with open(csv_file, "w") as f: + for i in range(5): + label = seed % 11 + rand_string = " ".join( + random.choice(string.ascii_letters) for i in range(seed) + ) + dataset_line = (label, f"{rand_string}") + f.write(f'"{label}","{rand_string}"\n') + + # append line to correct dataset split + mocked_lines.append(dataset_line) + seed += 1 + + compressed_dataset_path = os.path.join(base_dir, "yelp_review_polarity_csv.tar.gz") + # create gz file from dataset folder + with tarfile.open(compressed_dataset_path, "w:gz") as tar: + for file_name in ("train.csv", "test.csv"): + csv_file = os.path.join(temp_dataset_dir, file_name) + tar.add(csv_file) + + return mocked_data + + +class TestYelpReviewPolarity(TempDirMixin, TorchtextTestCase): + root_dir = None + samples = [] + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.root_dir = cls.get_base_temp_dir() + cls.samples = _get_mock_dataset(cls.root_dir) + cls.patcher = patch( + "torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True + ) + cls.patcher.start() + + @classmethod + def tearDownClass(cls): + cls.patcher.stop() + super().tearDownClass() + + @parameterized.expand(["train", "test"]) + def test_yelpreviewpolarity(self, split): + dataset = YelpReviewPolarity(root=self.root_dir, split=split) + + samples = list(dataset) + expected_samples = self.samples[split] + for sample, expected_sample in zip_equal(samples, expected_samples): + self.assertEqual(sample, expected_sample) + + @parameterized.expand(["train", "test"]) + def test_yelpreviewpolarity_split_argument(self, split): + dataset1 = YelpReviewPolarity(root=self.root_dir, split=split) + (dataset2,) = YelpReviewPolarity(root=self.root_dir, split=(split,)) + + for d1, d2 in zip_equal(dataset1, dataset2): + self.assertEqual(d1, d2) From ffea227646d16e737e8950118930f0828a68cc43 Mon Sep 17 00:00:00 2001 From: Virgile Mison Date: Fri, 28 Jan 2022 22:34:13 -0500 Subject: [PATCH 2/5] add test_dbpedia to mock DBpedia --- test/datasets/test_dbpedia.py | 85 +++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 test/datasets/test_dbpedia.py diff --git a/test/datasets/test_dbpedia.py b/test/datasets/test_dbpedia.py new file mode 100644 index 0000000000..6b26fabcfd --- /dev/null +++ b/test/datasets/test_dbpedia.py @@ -0,0 +1,85 @@ +import os +import random +import string +import tarfile +from collections import defaultdict +from unittest.mock import patch + +from parameterized import parameterized +from torchtext.datasets.dbpedia import DBpedia + +from ..common.case_utils import TempDirMixin, zip_equal +from ..common.torchtext_test_case import TorchtextTestCase + + +def _get_mock_dataset(root_dir): + """ + root_dir: directory to the mocked dataset + """ + base_dir = os.path.join(root_dir, "DBpedia") + temp_dataset_dir = os.path.join(base_dir, "dbpedia_csv") + os.makedirs(temp_dataset_dir, exist_ok=True) + + seed = 1 + mocked_data = defaultdict(list) + for file_name in ["train.csv", "test.csv"]: + csv_file = os.path.join(temp_dataset_dir, file_name) + mocked_lines = mocked_data[os.path.splitext(file_name)[0]] + with open(csv_file, "w") as f: + for i in range(5): + label = seed % 11 + rand_string = " ".join( + random.choice(string.ascii_letters) for i in range(seed) + ) + dataset_line = (label, f"{rand_string}") + f.write(f'{label},"{rand_string}"\n') + + # append line to correct dataset split + mocked_lines.append(dataset_line) + seed += 1 + + compressed_dataset_path = os.path.join(base_dir, "dbpedia_csv.tar.gz") + # create gz file from dataset folder + with tarfile.open(compressed_dataset_path, "w:gz") as tar: + for file_name in ("train.csv", "test.csv"): + csv_file = os.path.join(temp_dataset_dir, file_name) + tar.add(csv_file) + + return mocked_data + + +class TestDBpedia(TempDirMixin, TorchtextTestCase): + root_dir = None + samples = [] + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.root_dir = cls.get_base_temp_dir() + cls.samples = _get_mock_dataset(cls.root_dir) + cls.patcher = patch( + "torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True + ) + cls.patcher.start() + + @classmethod + def tearDownClass(cls): + cls.patcher.stop() + super().tearDownClass() + + @parameterized.expand(["train", "test"]) + def test_dbpedia(self, split): + dataset = DBpedia(root=self.root_dir, split=split) + + samples = list(dataset) + expected_samples = self.samples[split] + for sample, expected_sample in zip_equal(samples, expected_samples): + self.assertEqual(sample, expected_sample) + + @parameterized.expand(["train", "test"]) + def test_dbpedia_split_argument(self, split): + dataset1 = DBpedia(root=self.root_dir, split=split) + (dataset2,) = DBpedia(root=self.root_dir, split=(split,)) + + for d1, d2 in zip_equal(dataset1, dataset2): + self.assertEqual(d1, d2) From 299666861efb3fefa93b75436180a082bbd3072b Mon Sep 17 00:00:00 2001 From: Virgile Mison Date: Fri, 28 Jan 2022 22:36:01 -0500 Subject: [PATCH 3/5] remove yelpreview test --- test/datasets/test_yelpreviewpolarity.py | 85 ------------------------ 1 file changed, 85 deletions(-) delete mode 100644 test/datasets/test_yelpreviewpolarity.py diff --git a/test/datasets/test_yelpreviewpolarity.py b/test/datasets/test_yelpreviewpolarity.py deleted file mode 100644 index 25429a38b1..0000000000 --- a/test/datasets/test_yelpreviewpolarity.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import random -import string -import tarfile -from collections import defaultdict -from unittest.mock import patch - -from parameterized import parameterized -from torchtext.datasets.yelpreviewpolarity import YelpReviewPolarity - -from ..common.case_utils import TempDirMixin, zip_equal -from ..common.torchtext_test_case import TorchtextTestCase - - -def _get_mock_dataset(root_dir): - """ - root_dir: directory to the mocked dataset - """ - base_dir = os.path.join(root_dir, "YelpReviewPolarity") - temp_dataset_dir = os.path.join(base_dir, "yelp_review_polarity_csv") - os.makedirs(temp_dataset_dir, exist_ok=True) - - seed = 1 - mocked_data = defaultdict(list) - for file_name in ["train.csv", "test.csv"]: - csv_file = os.path.join(temp_dataset_dir, file_name) - mocked_lines = mocked_data[os.path.splitext(file_name)[0]] - with open(csv_file, "w") as f: - for i in range(5): - label = seed % 11 - rand_string = " ".join( - random.choice(string.ascii_letters) for i in range(seed) - ) - dataset_line = (label, f"{rand_string}") - f.write(f'"{label}","{rand_string}"\n') - - # append line to correct dataset split - mocked_lines.append(dataset_line) - seed += 1 - - compressed_dataset_path = os.path.join(base_dir, "yelp_review_polarity_csv.tar.gz") - # create gz file from dataset folder - with tarfile.open(compressed_dataset_path, "w:gz") as tar: - for file_name in ("train.csv", "test.csv"): - csv_file = os.path.join(temp_dataset_dir, file_name) - tar.add(csv_file) - - return mocked_data - - -class TestYelpReviewPolarity(TempDirMixin, TorchtextTestCase): - root_dir = None - samples = [] - - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.root_dir = cls.get_base_temp_dir() - cls.samples = _get_mock_dataset(cls.root_dir) - cls.patcher = patch( - "torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True - ) - cls.patcher.start() - - @classmethod - def tearDownClass(cls): - cls.patcher.stop() - super().tearDownClass() - - @parameterized.expand(["train", "test"]) - def test_yelpreviewpolarity(self, split): - dataset = YelpReviewPolarity(root=self.root_dir, split=split) - - samples = list(dataset) - expected_samples = self.samples[split] - for sample, expected_sample in zip_equal(samples, expected_samples): - self.assertEqual(sample, expected_sample) - - @parameterized.expand(["train", "test"]) - def test_yelpreviewpolarity_split_argument(self, split): - dataset1 = YelpReviewPolarity(root=self.root_dir, split=split) - (dataset2,) = YelpReviewPolarity(root=self.root_dir, split=(split,)) - - for d1, d2 in zip_equal(dataset1, dataset2): - self.assertEqual(d1, d2) From be156652f2d2e96b21918b6102d160c30ebff2ac Mon Sep 17 00:00:00 2001 From: VirgileHlav Date: Fri, 28 Jan 2022 22:37:16 -0500 Subject: [PATCH 4/5] Delete test_yelpreviewpolarity.py remove test_yelpreview (different pr) --- test/datasets/test_yelpreviewpolarity.py | 85 ------------------------ 1 file changed, 85 deletions(-) delete mode 100644 test/datasets/test_yelpreviewpolarity.py diff --git a/test/datasets/test_yelpreviewpolarity.py b/test/datasets/test_yelpreviewpolarity.py deleted file mode 100644 index 25429a38b1..0000000000 --- a/test/datasets/test_yelpreviewpolarity.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import random -import string -import tarfile -from collections import defaultdict -from unittest.mock import patch - -from parameterized import parameterized -from torchtext.datasets.yelpreviewpolarity import YelpReviewPolarity - -from ..common.case_utils import TempDirMixin, zip_equal -from ..common.torchtext_test_case import TorchtextTestCase - - -def _get_mock_dataset(root_dir): - """ - root_dir: directory to the mocked dataset - """ - base_dir = os.path.join(root_dir, "YelpReviewPolarity") - temp_dataset_dir = os.path.join(base_dir, "yelp_review_polarity_csv") - os.makedirs(temp_dataset_dir, exist_ok=True) - - seed = 1 - mocked_data = defaultdict(list) - for file_name in ["train.csv", "test.csv"]: - csv_file = os.path.join(temp_dataset_dir, file_name) - mocked_lines = mocked_data[os.path.splitext(file_name)[0]] - with open(csv_file, "w") as f: - for i in range(5): - label = seed % 11 - rand_string = " ".join( - random.choice(string.ascii_letters) for i in range(seed) - ) - dataset_line = (label, f"{rand_string}") - f.write(f'"{label}","{rand_string}"\n') - - # append line to correct dataset split - mocked_lines.append(dataset_line) - seed += 1 - - compressed_dataset_path = os.path.join(base_dir, "yelp_review_polarity_csv.tar.gz") - # create gz file from dataset folder - with tarfile.open(compressed_dataset_path, "w:gz") as tar: - for file_name in ("train.csv", "test.csv"): - csv_file = os.path.join(temp_dataset_dir, file_name) - tar.add(csv_file) - - return mocked_data - - -class TestYelpReviewPolarity(TempDirMixin, TorchtextTestCase): - root_dir = None - samples = [] - - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.root_dir = cls.get_base_temp_dir() - cls.samples = _get_mock_dataset(cls.root_dir) - cls.patcher = patch( - "torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True - ) - cls.patcher.start() - - @classmethod - def tearDownClass(cls): - cls.patcher.stop() - super().tearDownClass() - - @parameterized.expand(["train", "test"]) - def test_yelpreviewpolarity(self, split): - dataset = YelpReviewPolarity(root=self.root_dir, split=split) - - samples = list(dataset) - expected_samples = self.samples[split] - for sample, expected_sample in zip_equal(samples, expected_samples): - self.assertEqual(sample, expected_sample) - - @parameterized.expand(["train", "test"]) - def test_yelpreviewpolarity_split_argument(self, split): - dataset1 = YelpReviewPolarity(root=self.root_dir, split=split) - (dataset2,) = YelpReviewPolarity(root=self.root_dir, split=(split,)) - - for d1, d2 in zip_equal(dataset1, dataset2): - self.assertEqual(d1, d2) From 146b6290a23009bf2b9204acbe69de61a26f4e5a Mon Sep 17 00:00:00 2001 From: Virgile Mison Date: Thu, 3 Feb 2022 16:22:22 -0500 Subject: [PATCH 5/5] PR comments follow up + add second string --- test/datasets/test_dbpedia.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/test/datasets/test_dbpedia.py b/test/datasets/test_dbpedia.py index 6b26fabcfd..51fbf19335 100644 --- a/test/datasets/test_dbpedia.py +++ b/test/datasets/test_dbpedia.py @@ -17,22 +17,22 @@ def _get_mock_dataset(root_dir): root_dir: directory to the mocked dataset """ base_dir = os.path.join(root_dir, "DBpedia") - temp_dataset_dir = os.path.join(base_dir, "dbpedia_csv") + temp_dataset_dir = os.path.join(base_dir, "temp_dataset_dir") os.makedirs(temp_dataset_dir, exist_ok=True) seed = 1 mocked_data = defaultdict(list) - for file_name in ["train.csv", "test.csv"]: + for file_name in ("train.csv", "test.csv"): csv_file = os.path.join(temp_dataset_dir, file_name) mocked_lines = mocked_data[os.path.splitext(file_name)[0]] with open(csv_file, "w") as f: for i in range(5): - label = seed % 11 + label = seed % 14 + 1 rand_string = " ".join( random.choice(string.ascii_letters) for i in range(seed) ) - dataset_line = (label, f"{rand_string}") - f.write(f'{label},"{rand_string}"\n') + dataset_line = (label, rand_string + " " + rand_string) + f.write(f'{label},"{rand_string}","{rand_string}"\n') # append line to correct dataset split mocked_lines.append(dataset_line) @@ -41,9 +41,7 @@ def _get_mock_dataset(root_dir): compressed_dataset_path = os.path.join(base_dir, "dbpedia_csv.tar.gz") # create gz file from dataset folder with tarfile.open(compressed_dataset_path, "w:gz") as tar: - for file_name in ("train.csv", "test.csv"): - csv_file = os.path.join(temp_dataset_dir, file_name) - tar.add(csv_file) + tar.add(temp_dataset_dir, arcname="dbpedia_csv") return mocked_data