cyr19 commited on
Commit
66ed1e1
1 Parent(s): 86fe64f

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</quatrain>": 50259,
3
+ "<PAD>": 50257,
4
+ "<alexandrine>": 50281,
5
+ "<amphibrach>": 50280,
6
+ "<anapaest>": 50277,
7
+ "<dactyl>": 50278,
8
+ "<iambus>": 50275,
9
+ "<other>": 50279,
10
+ "<quatrain>": 50258,
11
+ "<trochee>": 50276,
12
+ "[AAAA]": 50273,
13
+ "[AAAB]": 50271,
14
+ "[AABA]": 50269,
15
+ "[AABB]": 50270,
16
+ "[AABC]": 50261,
17
+ "[ABAA]": 50274,
18
+ "[ABAB]": 50265,
19
+ "[ABAC]": 50262,
20
+ "[ABBA]": 50264,
21
+ "[ABBB]": 50268,
22
+ "[ABBC]": 50267,
23
+ "[ABCA]": 50272,
24
+ "[ABCB]": 50266,
25
+ "[ABCC]": 50263,
26
+ "[ABCD]": 50260
27
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<PAD>",
4
+ "<quatrain>",
5
+ "</quatrain>",
6
+ "[ABCD]",
7
+ "[AABC]",
8
+ "[ABAC]",
9
+ "[ABCC]",
10
+ "[ABBA]",
11
+ "[ABAB]",
12
+ "[ABCB]",
13
+ "[ABBC]",
14
+ "[ABBB]",
15
+ "[AABA]",
16
+ "[AABB]",
17
+ "[AAAB]",
18
+ "[ABCA]",
19
+ "[AAAA]",
20
+ "[ABAA]",
21
+ "<iambus>",
22
+ "<trochee>",
23
+ "<anapaest>",
24
+ "<dactyl>",
25
+ "<other>",
26
+ "<amphibrach>",
27
+ "<alexandrine>"
28
+ ],
29
+ "bos_token": {
30
+ "content": "<|endoftext|>",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "eos_token": {
37
+ "content": "<|endoftext|>",
38
+ "lstrip": false,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false
42
+ },
43
+ "pad_token": {
44
+ "content": "<PAD>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false
49
+ },
50
+ "unk_token": {
51
+ "content": "<|endoftext|>",
52
+ "lstrip": false,
53
+ "normalized": true,
54
+ "rstrip": false,
55
+ "single_word": false
56
+ }
57
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "50256": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "50257": {
14
+ "content": "<PAD>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "50258": {
22
+ "content": "<quatrain>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "50259": {
30
+ "content": "</quatrain>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "50260": {
38
+ "content": "[ABCD]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "50261": {
46
+ "content": "[AABC]",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "50262": {
54
+ "content": "[ABAC]",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "50263": {
62
+ "content": "[ABCC]",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "50264": {
70
+ "content": "[ABBA]",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "50265": {
78
+ "content": "[ABAB]",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "50266": {
86
+ "content": "[ABCB]",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "50267": {
94
+ "content": "[ABBC]",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "50268": {
102
+ "content": "[ABBB]",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "50269": {
110
+ "content": "[AABA]",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "50270": {
118
+ "content": "[AABB]",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": true
124
+ },
125
+ "50271": {
126
+ "content": "[AAAB]",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": true
132
+ },
133
+ "50272": {
134
+ "content": "[ABCA]",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": true
140
+ },
141
+ "50273": {
142
+ "content": "[AAAA]",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": true
148
+ },
149
+ "50274": {
150
+ "content": "[ABAA]",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": true
156
+ },
157
+ "50275": {
158
+ "content": "<iambus>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": true
164
+ },
165
+ "50276": {
166
+ "content": "<trochee>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": true
172
+ },
173
+ "50277": {
174
+ "content": "<anapaest>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": true
180
+ },
181
+ "50278": {
182
+ "content": "<dactyl>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ },
189
+ "50279": {
190
+ "content": "<other>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": true
196
+ },
197
+ "50280": {
198
+ "content": "<amphibrach>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": true
204
+ },
205
+ "50281": {
206
+ "content": "<alexandrine>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": true
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<PAD>",
216
+ "<quatrain>",
217
+ "</quatrain>",
218
+ "[ABCD]",
219
+ "[AABC]",
220
+ "[ABAC]",
221
+ "[ABCC]",
222
+ "[ABBA]",
223
+ "[ABAB]",
224
+ "[ABCB]",
225
+ "[ABBC]",
226
+ "[ABBB]",
227
+ "[AABA]",
228
+ "[AABB]",
229
+ "[AAAB]",
230
+ "[ABCA]",
231
+ "[AAAA]",
232
+ "[ABAA]",
233
+ "<iambus>",
234
+ "<trochee>",
235
+ "<anapaest>",
236
+ "<dactyl>",
237
+ "<other>",
238
+ "<amphibrach>",
239
+ "<alexandrine>"
240
+ ],
241
+ "bos_token": "<|endoftext|>",
242
+ "clean_up_tokenization_spaces": true,
243
+ "eos_token": "<|endoftext|>",
244
+ "errors": "replace",
245
+ "model_max_length": 2048,
246
+ "pad_token": "<PAD>",
247
+ "tokenizer_class": "GPT2Tokenizer",
248
+ "unk_token": "<|endoftext|>"
249
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff