Dongfu Jiang commited on
Commit
fad1144
1 Parent(s): b3f84d4

Upload 10 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|candidate1|>": 128002,
3
+ "<|candidate2|>": 128003,
4
+ "<|candidate|>": 128004,
5
+ "<|source|>": 128001,
6
+ "[MASK]": 128000
7
+ }
config.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c689ee2a644c7173ab8649bbd6d856c25ebba9825db9951145c1e66e403997e
3
+ size 687
desktop.ini ADDED
Binary file (244 Bytes). View file
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b82e8cfba027b8a2f1b7f93f15887527c2aacb103fdebdd230a83073cfe72fe
3
+ size 1786165535
ranker_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "ranker_type": "pairranker",
3
+ "model_type": "deberta",
4
+ "model_name": "microsoft/deberta-v3-large",
5
+ "cache_dir": null,
6
+ "load_checkpoint": null,
7
+ "source_max_length": 128,
8
+ "candidate_max_length": 128,
9
+ "n_tasks": 1,
10
+ "num_pos": 5,
11
+ "num_neg": 5,
12
+ "sub_sampling_mode": "all_pair",
13
+ "sub_sampling_ratio": 0.4,
14
+ "loss_type": "instructgpt",
15
+ "reduce_type": "linear",
16
+ "inference_mode": "bubble",
17
+ "drop_out": 0.05,
18
+ "fp16": true
19
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
8
+ "unk_token": "[UNK]"
9
+ }
spm.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c679fbf93643d19aab7ee10c0b99e460bdbc02fedf34b92b05af343b4af586fd
3
+ size 2464616
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "[CLS]",
5
+ "do_lower_case": false,
6
+ "eos_token": "[SEP]",
7
+ "mask_token": "[MASK]",
8
+ "model_max_length": 1000000000000000019884624838656,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "sp_model_kwargs": {},
12
+ "split_by_punct": false,
13
+ "tokenizer_class": "DebertaV2Tokenizer",
14
+ "unk_token": "[UNK]",
15
+ "vocab_type": "spm"
16
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3acd68d4fc46ad568d4bba0a878036b85755818a7179a426ec308b4d1d9c2df
3
+ size 3823