|
1 | | -from torchtext.utils import ( |
2 | | - download_from_url, |
3 | | - extract_archive, |
4 | | -) |
| 1 | +from torchtext._internal.module_utils import is_module_available |
| 2 | +from typing import Union, Tuple |
| 3 | + |
| 4 | +if is_module_available("torchdata"): |
| 5 | + from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper |
| 6 | + |
5 | 7 | from torchtext.data.datasets_utils import ( |
6 | | - _RawTextIterableDataset, |
7 | 8 | _wrap_split_argument, |
8 | 9 | _add_docstring_header, |
9 | | - _find_match, |
10 | 10 | _create_dataset_directory, |
11 | | - _create_data_from_csv, |
12 | 11 | ) |
| 12 | + |
13 | 13 | import os |
14 | 14 |
|
15 | 15 | URL = 'https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k' |
|
28 | 28 |
|
29 | 29 | @_add_docstring_header(num_lines=NUM_LINES, num_classes=14) |
30 | 30 | @_create_dataset_directory(dataset_name=DATASET_NAME) |
31 | | -@_wrap_split_argument(('train', 'test')) |
32 | | -def DBpedia(root, split): |
33 | | - dataset_tar = download_from_url(URL, root=root, |
34 | | - path=os.path.join(root, _PATH), |
35 | | - hash_value=MD5, hash_type='md5') |
36 | | - extracted_files = extract_archive(dataset_tar) |
37 | | - |
38 | | - path = _find_match(split + '.csv', extracted_files) |
39 | | - return _RawTextIterableDataset(DATASET_NAME, NUM_LINES[split], |
40 | | - _create_data_from_csv(path)) |
| 31 | +@_wrap_split_argument(("train", "test")) |
| 32 | +def DBpedia(root: str, split: Union[Tuple[str], str]): |
| 33 | + # TODO Remove this after removing conditional dependency |
| 34 | + if not is_module_available("torchdata"): |
| 35 | + raise ModuleNotFoundError("Package `torchdata` not found. Please install following instructions at `https://github.com/pytorch/data`") |
| 36 | + |
| 37 | + url_dp = IterableWrapper([URL]) |
| 38 | + |
| 39 | + cache_dp = url_dp.on_disk_cache( |
| 40 | + filepath_fn=lambda x: os.path.join(root, _PATH), |
| 41 | + hash_dict={os.path.join(root, _PATH): MD5}, hash_type="md5" |
| 42 | + ) |
| 43 | + cache_dp = GDriveReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True) |
| 44 | + cache_dp = FileOpener(cache_dp, mode="b") |
| 45 | + |
| 46 | + extracted_files = cache_dp.read_from_tar() |
| 47 | + |
| 48 | + filter_extracted_files = extracted_files.filter(lambda x: split + ".csv" in x[0]) |
| 49 | + |
| 50 | + return filter_extracted_files.parse_csv().map(fn=lambda t: (int(t[0]), " ".join(t[1:]))) |
0 commit comments