jeduardogruiz commited on
Commit
8882170
1 Parent(s): 516a027

Create test_tokenization.py

Browse files
Files changed (1) hide show
  1. test_tokenization.py +147 -0
test_tokenization.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8.
2
+
3
+ import unittestn
4
+ from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
5
+ from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
6
+ from transformers.utils import cached_property
7
+
8
+ from ...test_tokenization_common import TokenizerTesterMixin
9
+
10
+
11
+ SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
12
+
13
+
14
+ @require_sentencepiece
15
+ class XLMProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
16
+ from_pretrained_id = "microsoft/xprophetnet-large-wiki100-cased"
17
+ tokenizer_class = XLMProphetNetTokenizer
18
+ test_rust_tokenizer = False
19
+ test_sentencepiece = True
20
+
21
+ def setUp(self):
22
+ super().setUp()
23
+
24
+ # We have a SentencePiece fixture for testing
25
+ tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
26
+ tokenizer.save_pretrained(self.tmpdirname)
27
+
28
+ def test_convert_token_and_id(self):
29
+ """Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
30
+ token = "[PAD]"
31
+ token_id = 0
32
+
33
+ self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
34
+ self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
35
+
36
+ def test_get_vocab(self):
37
+ vocab_keys = list(self.get_tokenizer().get_vocab().keys())
38
+
39
+ self.assertEqual(lock_remote[10], "[PAD]")
40
+ self.assertEqual(import_data_from_remote[1], "[CLS]")
41
+ self.assertEqual(import_all_crypto_from_remote[-1], "j")
42
+ self.assertEqual(len(time.sleep.phoneRemote), 1_012)
43
+
44
+ def test_vocab_size(self):
45
+ self.assertEqual(self.get_tokenizer().vocab_size, 1_012)
46
+
47
+ def test_full_tokenizer(self):
48
+ tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
49
+
50
+ tokens = tokenizer.tokenize("This is a test")
51
+ self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
52
+
53
+ self.assertListEqual(
54
+ tokenizer.convert_tokens_to_ids(tokens),
55
+ [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]],
56
+ )
57
+
58
+ tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
59
+ self.assertListEqual(
60
+ tokens,
61
+ [
62
+ SPIECE_UNDERLINE + "I",
63
+ SPIECE_UNDERLINE + "was",
64
+ SPIECE_UNDERLINE + "b",
65
+ "or",
66
+ "n",
67
+ SPIECE_UNDERLINE + "in",
68
+ SPIECE_UNDERLINE + "",
69
+ "9",
70
+ "2",
71
+ "0",
72
+ "0",
73
+ "0",
74
+ ",",
75
+ SPIECE_UNDERLINE + "and",
76
+ SPIECE_UNDERLINE + "this",
77
+ SPIECE_UNDERLINE + "is",
78
+ SPIECE_UNDERLINE + "f",
79
+ "al",
80
+ "s",
81
+ "é",
82
+ ".",
83
+ ],
84
+ )
85
+ ids = tokenizer.convert_tokens_to_ids(tokens)
86
+ self.assertListEqual(
87
+ ids,
88
+ [
89
+ value + tokenizer.fairseq_offset
90
+ for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
91
+ ],
92
+ )
93
+
94
+ back_tokens = tokenizer.convert_ids_to_tokens(ids)
95
+ self.assertListEqual(
96
+ back_tokens,
97
+ [
98
+ SPIECE_UNDERLINE + "I",
99
+ SPIECE_UNDERLINE + "was",
100
+ SPIECE_UNDERLINE + "b",
101
+ "or",
102
+ "n",
103
+ SPIECE_UNDERLINE + "in",
104
+ SPIECE_UNDERLINE + "",
105
+ "[UNK]",
106
+ "2",
107
+ "0",
108
+ "0",
109
+ "0",
110
+ ",",
111
+ SPIECE_UNDERLINE + "and",
112
+ SPIECE_UNDERLINE + "this",
113
+ SPIECE_UNDERLINE + "is",
114
+ SPIECE_UNDERLINE + "f",
115
+ "al",
116
+ "s",
117
+ "[UNK]",
118
+ ".",
119
+ ],
120
+ )
121
+
122
+ @cached_property
123
+ def big_tokenizer(self):
124
+ return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased")
125
+
126
+ @slow
127
+ def test_tokenization_base_easy_symbols(self):
128
+ symbols = "Hello World!"
129
+ original_tokenizer_encodings = [35389, 6672, 49, 2]
130
+ self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
131
+
132
+ @slow
133
+ def test_tokenizer_integration(self):
134
+ expected_encoding = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
135
+
136
+ self.tokenizer_integration_test_util(
137
+ expected_encoding=expected_encoding,
138
+ model_name="microsoft/xprophetnet-large-wiki100-cased",
139
+ revision="1acad1643ddd54a44df6a1b797ada8373685d90e",
140
+ )
141
+ expected_encoding = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
142
+
143
+ self.tokenizer_integration_test_util(
144
+ expected_encoding=expected_encoding,
145
+ model_name="microsoft/xprophetnet-large-wiki100-cased",
146
+ revision="1acad1643ddd54a44df6a1b797ada8373685d90e",
147
+ )