|
| 1 | +import torch |
| 2 | +from torchtext import functional |
| 3 | +from .common.torchtext_test_case import TorchtextTestCase |
| 4 | + |
| 5 | + |
| 6 | +class TestFunctional(TorchtextTestCase): |
| 7 | + def test_to_tensor(self): |
| 8 | + input = [[1, 2], [1, 2, 3]] |
| 9 | + padding_value = 0 |
| 10 | + actual = functional.to_tensor(input, padding_value=padding_value) |
| 11 | + expected = torch.tensor([[1, 2, 0], [1, 2, 3]], dtype=torch.long) |
| 12 | + torch.testing.assert_close(actual, expected) |
| 13 | + |
| 14 | + def test_to_tensor_jit(self): |
| 15 | + input = [[1, 2], [1, 2, 3]] |
| 16 | + padding_value = 0 |
| 17 | + to_tensor_jit = torch.jit.script(functional.to_tensor) |
| 18 | + actual = to_tensor_jit(input, padding_value=padding_value) |
| 19 | + expected = torch.tensor([[1, 2, 0], [1, 2, 3]], dtype=torch.long) |
| 20 | + torch.testing.assert_close(actual, expected) |
| 21 | + |
| 22 | + def test_truncate(self): |
| 23 | + input = [[1, 2], [1, 2, 3]] |
| 24 | + max_seq_len = 2 |
| 25 | + actual = functional.truncate(input, max_seq_len=max_seq_len) |
| 26 | + expected = [[1, 2], [1, 2]] |
| 27 | + self.assertEqual(actual, expected) |
| 28 | + |
| 29 | + def test_truncate_jit(self): |
| 30 | + input = [[1, 2], [1, 2, 3]] |
| 31 | + max_seq_len = 2 |
| 32 | + truncate_jit = torch.jit.script(functional.truncate) |
| 33 | + actual = truncate_jit(input, max_seq_len=max_seq_len) |
| 34 | + expected = [[1, 2], [1, 2]] |
| 35 | + self.assertEqual(actual, expected) |
| 36 | + |
| 37 | + def test_add_token(self): |
| 38 | + input = [[1, 2], [1, 2, 3]] |
| 39 | + token_id = 0 |
| 40 | + actual = functional.add_token(input, token_id=token_id) |
| 41 | + expected = [[0, 1, 2], [0, 1, 2, 3]] |
| 42 | + self.assertEqual(actual, expected) |
| 43 | + |
| 44 | + actual = functional.add_token(input, token_id=token_id, begin=False) |
| 45 | + expected = [[1, 2, 0], [1, 2, 3, 0]] |
| 46 | + self.assertEqual(actual, expected) |
| 47 | + |
| 48 | + def test_add_token_jit(self): |
| 49 | + input = [[1, 2], [1, 2, 3]] |
| 50 | + token_id = 0 |
| 51 | + add_token_jit = torch.jit.script(functional.add_token) |
| 52 | + actual = add_token_jit(input, token_id=token_id) |
| 53 | + expected = [[0, 1, 2], [0, 1, 2, 3]] |
| 54 | + self.assertEqual(actual, expected) |
| 55 | + |
| 56 | + actual = add_token_jit(input, token_id=token_id, begin=False) |
| 57 | + expected = [[1, 2, 0], [1, 2, 3, 0]] |
| 58 | + self.assertEqual(actual, expected) |
0 commit comments