qfrodicio commited on
Commit
2ee4846
1 Parent(s): 2c64a65

Training in progress, epoch 1

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "roberta-base",
3
+ "architectures": [
4
+ "RobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B-OTHER_PEER",
15
+ "1": "I-OTHER_PEER",
16
+ "2": "B-EXPLAIN",
17
+ "3": "I-EXPLAIN",
18
+ "4": "B-SELF",
19
+ "5": "I-SELF",
20
+ "6": "B-NO",
21
+ "7": "I-NO",
22
+ "8": "B-QUESTION",
23
+ "9": "I-QUESTION",
24
+ "10": "B-NO_GESTURE",
25
+ "11": "I-NO_GESTURE",
26
+ "12": "B-YES",
27
+ "13": "I-YES",
28
+ "14": "B-NEUTRAL",
29
+ "15": "I-NEUTRAL",
30
+ "16": "B-FRONT",
31
+ "17": "I-FRONT",
32
+ "18": "B-EMPHATIC",
33
+ "19": "I-EMPHATIC",
34
+ "20": "B-PLEASE",
35
+ "21": "I-PLEASE",
36
+ "22": "B-SORRY",
37
+ "23": "I-SORRY",
38
+ "24": "B-CALM_DOWN",
39
+ "25": "I-CALM_DOWN",
40
+ "26": "B-BUT",
41
+ "27": "I-BUT",
42
+ "28": "B-THIRD_PERSON",
43
+ "29": "I-THIRD_PERSON",
44
+ "30": "B-COME_ON",
45
+ "31": "I-COME_ON",
46
+ "32": "B-GREET",
47
+ "33": "I-GREET",
48
+ "34": "B-THANKS",
49
+ "35": "I-THANKS",
50
+ "36": "B-ITERATE",
51
+ "37": "I-ITERATE",
52
+ "38": "B-ENTHUSIASTIC",
53
+ "39": "I-ENTHUSIASTIC",
54
+ "40": "B-THINKING",
55
+ "41": "I-THINKING"
56
+ },
57
+ "initializer_range": 0.02,
58
+ "intermediate_size": 3072,
59
+ "label2id": {
60
+ "B-BUT": 26,
61
+ "B-CALM_DOWN": 24,
62
+ "B-COME_ON": 30,
63
+ "B-EMPHATIC": 18,
64
+ "B-ENTHUSIASTIC": 38,
65
+ "B-EXPLAIN": 2,
66
+ "B-FRONT": 16,
67
+ "B-GREET": 32,
68
+ "B-ITERATE": 36,
69
+ "B-NEUTRAL": 14,
70
+ "B-NO": 6,
71
+ "B-NO_GESTURE": 10,
72
+ "B-OTHER_PEER": 0,
73
+ "B-PLEASE": 20,
74
+ "B-QUESTION": 8,
75
+ "B-SELF": 4,
76
+ "B-SORRY": 22,
77
+ "B-THANKS": 34,
78
+ "B-THINKING": 40,
79
+ "B-THIRD_PERSON": 28,
80
+ "B-YES": 12,
81
+ "I-BUT": 27,
82
+ "I-CALM_DOWN": 25,
83
+ "I-COME_ON": 31,
84
+ "I-EMPHATIC": 19,
85
+ "I-ENTHUSIASTIC": 39,
86
+ "I-EXPLAIN": 3,
87
+ "I-FRONT": 17,
88
+ "I-GREET": 33,
89
+ "I-ITERATE": 37,
90
+ "I-NEUTRAL": 15,
91
+ "I-NO": 7,
92
+ "I-NO_GESTURE": 11,
93
+ "I-OTHER_PEER": 1,
94
+ "I-PLEASE": 21,
95
+ "I-QUESTION": 9,
96
+ "I-SELF": 5,
97
+ "I-SORRY": 23,
98
+ "I-THANKS": 35,
99
+ "I-THINKING": 41,
100
+ "I-THIRD_PERSON": 29,
101
+ "I-YES": 13
102
+ },
103
+ "layer_norm_eps": 1e-05,
104
+ "max_position_embeddings": 514,
105
+ "model_type": "roberta",
106
+ "num_attention_heads": 12,
107
+ "num_hidden_layers": 12,
108
+ "pad_token_id": 1,
109
+ "position_embedding_type": "absolute",
110
+ "torch_dtype": "float32",
111
+ "transformers_version": "4.26.1",
112
+ "type_vocab_size": 1,
113
+ "use_cache": true,
114
+ "vocab_size": 50265
115
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b39eb6e76be35680aac2506a1d5087db2fbd965dc4d0020d46258c4f03c8031
3
+ size 496422061
runs/Mar07_22-45-03_012dd7a14800/1678229115.7735648/events.out.tfevents.1678229115.012dd7a14800.136.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5ce729a295f24620ee567ab95a24be139d1350ac30ab4557b191b38873e1bf7
3
+ size 5757
runs/Mar07_22-45-03_012dd7a14800/events.out.tfevents.1678229115.012dd7a14800.136.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d8a9dc1a364e6d8815cf694fe1d1fe9a627a08528dd58942b62ed82cfbe32f0
3
+ size 6405
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "errors": "replace",
7
+ "mask_token": "<mask>",
8
+ "model_max_length": 512,
9
+ "name_or_path": "roberta-base",
10
+ "pad_token": "<pad>",
11
+ "sep_token": "</s>",
12
+ "special_tokens_map_file": null,
13
+ "tokenizer_class": "RobertaTokenizer",
14
+ "trim_offsets": true,
15
+ "unk_token": "<unk>"
16
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e860e987a01ebe680b844deea4d16274391227c97a69b55fcfd84724e4a51553
3
+ size 3515
vocab.json ADDED
The diff for this file is too large to render. See raw diff