yassine benlaria commited on
Commit
e728050
1 Parent(s): 2bb62c7

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__liv__": 128104,
3
+ "__sma__": 128106,
4
+ "__sme__": 128107,
5
+ "__smj__": 128110,
6
+ "__smn__": 128108,
7
+ "__sms__": 128109,
8
+ "__vro__": 128105
9
+ }
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8f7c76ed2a5e0822be39f0a4f95a55eb19c78f4593ce609e2edbc2aea4d380a
3
+ size 2423393
special_tokens_map.json ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "__liv__",
4
+ "__vro__",
5
+ "__sma__",
6
+ "__sme__",
7
+ "__smn__",
8
+ "__sms__",
9
+ "__smj__",
10
+ "__af__",
11
+ "__am__",
12
+ "__ar__",
13
+ "__ast__",
14
+ "__az__",
15
+ "__ba__",
16
+ "__be__",
17
+ "__bg__",
18
+ "__bn__",
19
+ "__br__",
20
+ "__bs__",
21
+ "__ca__",
22
+ "__ceb__",
23
+ "__cs__",
24
+ "__cy__",
25
+ "__da__",
26
+ "__de__",
27
+ "__el__",
28
+ "__en__",
29
+ "__es__",
30
+ "__et__",
31
+ "__fa__",
32
+ "__ff__",
33
+ "__fi__",
34
+ "__fr__",
35
+ "__fy__",
36
+ "__ga__",
37
+ "__gd__",
38
+ "__gl__",
39
+ "__gu__",
40
+ "__ha__",
41
+ "__he__",
42
+ "__hi__",
43
+ "__hr__",
44
+ "__ht__",
45
+ "__hu__",
46
+ "__hy__",
47
+ "__id__",
48
+ "__ig__",
49
+ "__ilo__",
50
+ "__is__",
51
+ "__it__",
52
+ "__ja__",
53
+ "__jv__",
54
+ "__ka__",
55
+ "__kk__",
56
+ "__km__",
57
+ "__kn__",
58
+ "__ko__",
59
+ "__lb__",
60
+ "__lg__",
61
+ "__ln__",
62
+ "__lo__",
63
+ "__lt__",
64
+ "__lv__",
65
+ "__mg__",
66
+ "__mk__",
67
+ "__ml__",
68
+ "__mn__",
69
+ "__mr__",
70
+ "__ms__",
71
+ "__my__",
72
+ "__ne__",
73
+ "__nl__",
74
+ "__no__",
75
+ "__ns__",
76
+ "__oc__",
77
+ "__or__",
78
+ "__pa__",
79
+ "__pl__",
80
+ "__ps__",
81
+ "__pt__",
82
+ "__ro__",
83
+ "__ru__",
84
+ "__sd__",
85
+ "__si__",
86
+ "__sk__",
87
+ "__sl__",
88
+ "__so__",
89
+ "__sq__",
90
+ "__sr__",
91
+ "__ss__",
92
+ "__su__",
93
+ "__sv__",
94
+ "__sw__",
95
+ "__ta__",
96
+ "__th__",
97
+ "__tl__",
98
+ "__tn__",
99
+ "__tr__",
100
+ "__uk__",
101
+ "__ur__",
102
+ "__uz__",
103
+ "__vi__",
104
+ "__wo__",
105
+ "__xh__",
106
+ "__yi__",
107
+ "__yo__",
108
+ "__zh__",
109
+ "__zu__"
110
+ ],
111
+ "bos_token": "<s>",
112
+ "eos_token": "</s>",
113
+ "pad_token": "<pad>",
114
+ "sep_token": "</s>",
115
+ "unk_token": "<unk>"
116
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "__liv__",
4
+ "__vro__",
5
+ "__sma__",
6
+ "__sme__",
7
+ "__smn__",
8
+ "__sms__",
9
+ "__smj__",
10
+ "__af__",
11
+ "__am__",
12
+ "__ar__",
13
+ "__ast__",
14
+ "__az__",
15
+ "__ba__",
16
+ "__be__",
17
+ "__bg__",
18
+ "__bn__",
19
+ "__br__",
20
+ "__bs__",
21
+ "__ca__",
22
+ "__ceb__",
23
+ "__cs__",
24
+ "__cy__",
25
+ "__da__",
26
+ "__de__",
27
+ "__el__",
28
+ "__en__",
29
+ "__es__",
30
+ "__et__",
31
+ "__fa__",
32
+ "__ff__",
33
+ "__fi__",
34
+ "__fr__",
35
+ "__fy__",
36
+ "__ga__",
37
+ "__gd__",
38
+ "__gl__",
39
+ "__gu__",
40
+ "__ha__",
41
+ "__he__",
42
+ "__hi__",
43
+ "__hr__",
44
+ "__ht__",
45
+ "__hu__",
46
+ "__hy__",
47
+ "__id__",
48
+ "__ig__",
49
+ "__ilo__",
50
+ "__is__",
51
+ "__it__",
52
+ "__ja__",
53
+ "__jv__",
54
+ "__ka__",
55
+ "__kk__",
56
+ "__km__",
57
+ "__kn__",
58
+ "__ko__",
59
+ "__lb__",
60
+ "__lg__",
61
+ "__ln__",
62
+ "__lo__",
63
+ "__lt__",
64
+ "__lv__",
65
+ "__mg__",
66
+ "__mk__",
67
+ "__ml__",
68
+ "__mn__",
69
+ "__mr__",
70
+ "__ms__",
71
+ "__my__",
72
+ "__ne__",
73
+ "__nl__",
74
+ "__no__",
75
+ "__ns__",
76
+ "__oc__",
77
+ "__or__",
78
+ "__pa__",
79
+ "__pl__",
80
+ "__ps__",
81
+ "__pt__",
82
+ "__ro__",
83
+ "__ru__",
84
+ "__sd__",
85
+ "__si__",
86
+ "__sk__",
87
+ "__sl__",
88
+ "__so__",
89
+ "__sq__",
90
+ "__sr__",
91
+ "__ss__",
92
+ "__su__",
93
+ "__sv__",
94
+ "__sw__",
95
+ "__ta__",
96
+ "__th__",
97
+ "__tl__",
98
+ "__tn__",
99
+ "__tr__",
100
+ "__uk__",
101
+ "__ur__",
102
+ "__uz__",
103
+ "__vi__",
104
+ "__wo__",
105
+ "__xh__",
106
+ "__yi__",
107
+ "__yo__",
108
+ "__zh__",
109
+ "__zu__"
110
+ ],
111
+ "bos_token": "<s>",
112
+ "clean_up_tokenization_spaces": true,
113
+ "eos_token": "</s>",
114
+ "language_codes": "m2m100",
115
+ "model_max_length": 1024,
116
+ "num_madeup_words": 8,
117
+ "pad_token": "<pad>",
118
+ "sep_token": "</s>",
119
+ "sp_model_kwargs": {},
120
+ "src_lang": null,
121
+ "tgt_lang": null,
122
+ "tokenizer_class": "M2M100Tokenizer",
123
+ "tokenizer_file": null,
124
+ "unk_token": "<unk>"
125
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff