RapMinerz commited on
Commit
a82db47
1 Parent(s): f62ef8c
config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "flaubert/flaubert_large_cased",
3
+ "amp": 1,
4
+ "architectures": [
5
+ "FlaubertWithLMHeadModel"
6
+ ],
7
+ "asm": false,
8
+ "attention_dropout": 0.1,
9
+ "attention_probs_dropout_prob": 0.1,
10
+ "bos_index": 0,
11
+ "bos_token_id": 0,
12
+ "bptt": 512,
13
+ "causal": false,
14
+ "clip_grad_norm": 5,
15
+ "dropout": 0.1,
16
+ "emb_dim": 1024,
17
+ "embed_init_std": 0.02209708691207961,
18
+ "encoder_only": true,
19
+ "end_n_top": 5,
20
+ "eos_index": 1,
21
+ "fp16": true,
22
+ "gelu_activation": true,
23
+ "group_by_size": true,
24
+ "hidden_dropout_prob": 0.1,
25
+ "id2lang": {
26
+ "0": "fr"
27
+ },
28
+ "init_std": 0.02,
29
+ "is_encoder": true,
30
+ "lang2id": {
31
+ "fr": 0
32
+ },
33
+ "lang_id": 0,
34
+ "langs": [
35
+ "fr"
36
+ ],
37
+ "layer_norm_eps": 1e-06,
38
+ "layerdrop": 0.2,
39
+ "lg_sampling_factor": -1,
40
+ "lgs": "fr",
41
+ "mask_index": 5,
42
+ "mask_token_id": 0,
43
+ "max_batch_size": 0,
44
+ "max_position_embeddings": 512,
45
+ "max_vocab": -1,
46
+ "mlm_steps": [
47
+ [
48
+ "fr",
49
+ null
50
+ ]
51
+ ],
52
+ "model_type": "flaubert",
53
+ "n_heads": 16,
54
+ "n_langs": 1,
55
+ "n_layers": 24,
56
+ "pad_index": 2,
57
+ "pad_token_id": 2,
58
+ "pre_norm": true,
59
+ "sample_alpha": 0,
60
+ "share_inout_emb": true,
61
+ "sinusoidal_embeddings": false,
62
+ "start_n_top": 5,
63
+ "summary_activation": null,
64
+ "summary_first_dropout": 0.1,
65
+ "summary_proj_to_labels": true,
66
+ "summary_type": "first",
67
+ "summary_use_proj": true,
68
+ "tokens_per_batch": -1,
69
+ "transformers_version": "4.6.1",
70
+ "unk_index": 3,
71
+ "use_apex": true,
72
+ "use_lang_emb": true,
73
+ "vocab_size": 68729,
74
+ "word_blank": 0,
75
+ "word_dropout": 0,
76
+ "word_keep": 0.1,
77
+ "word_mask": 0.8,
78
+ "word_mask_keep_rand": "0.8,0.1,0.1",
79
+ "word_pred": 0.15,
80
+ "word_rand": 0.1,
81
+ "word_shuffle": 0
82
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a74d40b1c3d6d2930d3d8ae5d56985c79e380d112bd9966fe0e87ec6556f65c9
3
+ size 1493283621
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "</s>", "mask_token": "<special1>", "additional_special_tokens": ["<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>"]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "</s>", "mask_token": "<special1>", "additional_special_tokens": ["<special0>", "<special1>", "<special2>", "<special3>", "<special4>", "<special5>", "<special6>", "<special7>", "<special8>", "<special9>"], "lang2id": null, "id2lang": null, "do_lowercase_and_remove_accent": true, "do_lower_case": false, "model_max_length": 512, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "flaubert/flaubert_large_cased"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff