Upload tokenizer
Browse files- special_tokens_map.json +4 -4
- tokenizer.json +28 -6
- tokenizer_config.json +28 -8
special_tokens_map.json
CHANGED
@@ -35,28 +35,28 @@
|
|
35 |
"<|extra0|>"
|
36 |
],
|
37 |
"bos_token": {
|
38 |
-
"content": "
|
39 |
"lstrip": false,
|
40 |
"normalized": false,
|
41 |
"rstrip": false,
|
42 |
"single_word": false
|
43 |
},
|
44 |
"eos_token": {
|
45 |
-
"content": "
|
46 |
"lstrip": false,
|
47 |
"normalized": false,
|
48 |
"rstrip": false,
|
49 |
"single_word": false
|
50 |
},
|
51 |
"pad_token": {
|
52 |
-
"content": "
|
53 |
"lstrip": false,
|
54 |
"normalized": false,
|
55 |
"rstrip": false,
|
56 |
"single_word": false
|
57 |
},
|
58 |
"unk_token": {
|
59 |
-
"content": "
|
60 |
"lstrip": false,
|
61 |
"normalized": false,
|
62 |
"rstrip": false,
|
|
|
35 |
"<|extra0|>"
|
36 |
],
|
37 |
"bos_token": {
|
38 |
+
"content": "<s>",
|
39 |
"lstrip": false,
|
40 |
"normalized": false,
|
41 |
"rstrip": false,
|
42 |
"single_word": false
|
43 |
},
|
44 |
"eos_token": {
|
45 |
+
"content": "</s>",
|
46 |
"lstrip": false,
|
47 |
"normalized": false,
|
48 |
"rstrip": false,
|
49 |
"single_word": false
|
50 |
},
|
51 |
"pad_token": {
|
52 |
+
"content": "</s>",
|
53 |
"lstrip": false,
|
54 |
"normalized": false,
|
55 |
"rstrip": false,
|
56 |
"single_word": false
|
57 |
},
|
58 |
"unk_token": {
|
59 |
+
"content": "<unk>",
|
60 |
"lstrip": false,
|
61 |
"normalized": false,
|
62 |
"rstrip": false,
|
tokenizer.json
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
"direction": "Right",
|
5 |
-
"max_length": 2048,
|
6 |
-
"strategy": "LongestFirst",
|
7 |
-
"stride": 0
|
8 |
-
},
|
9 |
"padding": null,
|
10 |
"added_tokens": [
|
11 |
{
|
@@ -304,6 +299,33 @@
|
|
304 |
"rstrip": false,
|
305 |
"normalized": false,
|
306 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
}
|
308 |
],
|
309 |
"normalizer": null,
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": null,
|
|
|
|
|
|
|
|
|
|
|
4 |
"padding": null,
|
5 |
"added_tokens": [
|
6 |
{
|
|
|
299 |
"rstrip": false,
|
300 |
"normalized": false,
|
301 |
"special": true
|
302 |
+
},
|
303 |
+
{
|
304 |
+
"id": 100289,
|
305 |
+
"content": "<s>",
|
306 |
+
"single_word": false,
|
307 |
+
"lstrip": false,
|
308 |
+
"rstrip": false,
|
309 |
+
"normalized": false,
|
310 |
+
"special": true
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"id": 100290,
|
314 |
+
"content": "</s>",
|
315 |
+
"single_word": false,
|
316 |
+
"lstrip": false,
|
317 |
+
"rstrip": false,
|
318 |
+
"normalized": false,
|
319 |
+
"special": true
|
320 |
+
},
|
321 |
+
{
|
322 |
+
"id": 100291,
|
323 |
+
"content": "<unk>",
|
324 |
+
"single_word": false,
|
325 |
+
"lstrip": false,
|
326 |
+
"rstrip": false,
|
327 |
+
"normalized": false,
|
328 |
+
"special": true
|
329 |
}
|
330 |
],
|
331 |
"normalizer": null,
|
tokenizer_config.json
CHANGED
@@ -265,6 +265,30 @@
|
|
265 |
"rstrip": false,
|
266 |
"single_word": false,
|
267 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
}
|
269 |
},
|
270 |
"additional_special_tokens": [
|
@@ -302,16 +326,12 @@
|
|
302 |
"<|reg7|>",
|
303 |
"<|extra0|>"
|
304 |
],
|
305 |
-
"bos_token": "
|
306 |
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
307 |
"clean_up_tokenization_spaces": true,
|
308 |
-
"eos_token": "
|
309 |
-
"max_length": 2048,
|
310 |
"model_max_length": 2048,
|
311 |
-
"pad_token": "
|
312 |
-
"stride": 0,
|
313 |
"tokenizer_class": "GPT2Tokenizer",
|
314 |
-
"
|
315 |
-
"truncation_strategy": "longest_first",
|
316 |
-
"unk_token": "<|endoftext|>"
|
317 |
}
|
|
|
265 |
"rstrip": false,
|
266 |
"single_word": false,
|
267 |
"special": true
|
268 |
+
},
|
269 |
+
"100289": {
|
270 |
+
"content": "<s>",
|
271 |
+
"lstrip": false,
|
272 |
+
"normalized": false,
|
273 |
+
"rstrip": false,
|
274 |
+
"single_word": false,
|
275 |
+
"special": true
|
276 |
+
},
|
277 |
+
"100290": {
|
278 |
+
"content": "</s>",
|
279 |
+
"lstrip": false,
|
280 |
+
"normalized": false,
|
281 |
+
"rstrip": false,
|
282 |
+
"single_word": false,
|
283 |
+
"special": true
|
284 |
+
},
|
285 |
+
"100291": {
|
286 |
+
"content": "<unk>",
|
287 |
+
"lstrip": false,
|
288 |
+
"normalized": false,
|
289 |
+
"rstrip": false,
|
290 |
+
"single_word": false,
|
291 |
+
"special": true
|
292 |
}
|
293 |
},
|
294 |
"additional_special_tokens": [
|
|
|
326 |
"<|reg7|>",
|
327 |
"<|extra0|>"
|
328 |
],
|
329 |
+
"bos_token": "<s>",
|
330 |
"chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
331 |
"clean_up_tokenization_spaces": true,
|
332 |
+
"eos_token": "</s>",
|
|
|
333 |
"model_max_length": 2048,
|
334 |
+
"pad_token": "</s>",
|
|
|
335 |
"tokenizer_class": "GPT2Tokenizer",
|
336 |
+
"unk_token": "<unk>"
|
|
|
|
|
337 |
}
|