acmc commited on
Commit
c1efb16
1 Parent(s): 2e915c9

Pushing tokenizer (#2)

Browse files

- Pushing tokenizer (729549f2734e477410f28fee17bfe1582ac312f9)

added_tokens.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ " | ": 96109,
3
+ " ||| ": 96110,
4
+ "[BACKGROUND]": 96105,
5
+ "[CONCLUSIONS]": 96108,
6
+ "[CONTENT]": 96103,
7
+ "[METHODS]": 96106,
8
+ "[RESULTS]": 96107,
9
+ "[SUMMARY]": 96104
10
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<s>",
4
+ "</s>",
5
+ "<unk>",
6
+ "[SEP]",
7
+ "<pad>",
8
+ "[CLS]",
9
+ "[MASK]",
10
+ "[CONTENT]",
11
+ "[SUMMARY]",
12
+ "[BACKGROUND]",
13
+ "[METHODS]",
14
+ "[RESULTS]",
15
+ "[CONCLUSIONS]",
16
+ " | ",
17
+ " ||| "
18
+ ],
19
+ "bos_token": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "cls_token": {
27
+ "content": "[CLS]",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "eos_token": {
34
+ "content": "</s>",
35
+ "lstrip": false,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "mask_token": {
41
+ "content": "[MASK]",
42
+ "lstrip": true,
43
+ "normalized": true,
44
+ "rstrip": false,
45
+ "single_word": false
46
+ },
47
+ "pad_token": {
48
+ "content": "<pad>",
49
+ "lstrip": false,
50
+ "normalized": true,
51
+ "rstrip": false,
52
+ "single_word": false
53
+ },
54
+ "sep_token": {
55
+ "content": "[SEP]",
56
+ "lstrip": false,
57
+ "normalized": true,
58
+ "rstrip": false,
59
+ "single_word": false
60
+ },
61
+ "unk_token": {
62
+ "content": "<unk>",
63
+ "lstrip": false,
64
+ "normalized": true,
65
+ "rstrip": false,
66
+ "single_word": false
67
+ }
68
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe1b40df7e8825709c0172639c47338a68d5622e9e3b6cc0fae516537cae738b
3
+ size 1915455
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [],
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "cls_token": {
13
+ "__type": "AddedToken",
14
+ "content": "[CLS]",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "eos_token": {
21
+ "__type": "AddedToken",
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "mask_token": "[MASK]",
29
+ "mask_token_sent": null,
30
+ "model_max_length": 512,
31
+ "offset": 0,
32
+ "pad_token": {
33
+ "__type": "AddedToken",
34
+ "content": "<pad>",
35
+ "lstrip": false,
36
+ "normalized": true,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "sep_token": {
41
+ "__type": "AddedToken",
42
+ "content": "[SEP]",
43
+ "lstrip": false,
44
+ "normalized": true,
45
+ "rstrip": false,
46
+ "single_word": false
47
+ },
48
+ "tokenizer_class": "PegasusTokenizer",
49
+ "unk_token": {
50
+ "__type": "AddedToken",
51
+ "content": "<unk>",
52
+ "lstrip": false,
53
+ "normalized": true,
54
+ "rstrip": false,
55
+ "single_word": false
56
+ }
57
+ }