carrie commited on
Commit
b315137
1 Parent(s): 36d072a

add model files

Browse files
config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 4096,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dropout_rate": 0.1,
11
+ "eos_token_id": 1,
12
+ "feed_forward_proj": "relu",
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "n_positions": 512,
18
+ "num_decoder_layers": 24,
19
+ "num_heads": 16,
20
+ "num_layers": 24,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "relative_attention_num_buckets": 32,
24
+ "task_specific_params": {
25
+ "summarization": {
26
+ "early_stopping": true,
27
+ "length_penalty": 2.0,
28
+ "max_length": 200,
29
+ "min_length": 30,
30
+ "no_repeat_ngram_size": 3,
31
+ "num_beams": 4,
32
+ "prefix": "summarize: "
33
+ },
34
+ "translation_en_to_de": {
35
+ "early_stopping": true,
36
+ "max_length": 300,
37
+ "num_beams": 4,
38
+ "prefix": "translate English to German: "
39
+ },
40
+ "translation_en_to_fr": {
41
+ "early_stopping": true,
42
+ "max_length": 300,
43
+ "num_beams": 4,
44
+ "prefix": "translate English to French: "
45
+ },
46
+ "translation_en_to_ro": {
47
+ "early_stopping": true,
48
+ "max_length": 300,
49
+ "num_beams": 4,
50
+ "prefix": "translate English to Romanian: "
51
+ }
52
+ },
53
+ "torch_dtype": "float32",
54
+ "transformers_version": "4.11.3",
55
+ "use_cache": true,
56
+ "vocab_size": 32100
57
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c20aa45fecd1a64f4230b2d340ae2815f05dbc54de2472af4286e8ecc5733bb3
3
+ size 2950790023
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "t5-large", "tokenizer_class": "T5Tokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6115376353591917,
3
+ "best_model_checkpoint": "/data2/fxu/lfqa_discourse/t5_large_finetuning_eli5_only_888/checkpoint-504",
4
+ "epoch": 30.0,
5
+ "global_step": 540,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_Answer": 0.0,
13
+ "eval_Answer(Summary)": 0.44135188866799197,
14
+ "eval_Answer-Example": 0.0,
15
+ "eval_Answer-Organizationalsentence": 0.0,
16
+ "eval_AuxiliaryInformation": 0.0,
17
+ "eval_Miscellaneous": 0.0,
18
+ "eval_accuracy": 0.26811594202898553,
19
+ "eval_loss": 0.27198654413223267,
20
+ "eval_macro_f1": 0.07355864811133199,
21
+ "eval_runtime": 13.2965,
22
+ "eval_samples_per_second": 4.663,
23
+ "eval_steps_per_second": 0.301,
24
+ "step": 18
25
+ },
26
+ {
27
+ "epoch": 2.0,
28
+ "eval_Answer": 0.4731707317073171,
29
+ "eval_Answer(Summary)": 0.4433497536945813,
30
+ "eval_Answer-Example": 0.0,
31
+ "eval_Answer-Organizationalsentence": 0.0,
32
+ "eval_AuxiliaryInformation": 0.0,
33
+ "eval_Miscellaneous": 0.6,
34
+ "eval_accuracy": 0.4082125603864734,
35
+ "eval_loss": 0.18863672018051147,
36
+ "eval_macro_f1": 0.2527534142336497,
37
+ "eval_runtime": 10.164,
38
+ "eval_samples_per_second": 6.1,
39
+ "eval_steps_per_second": 0.394,
40
+ "step": 36
41
+ },
42
+ {
43
+ "epoch": 3.0,
44
+ "eval_Answer": 0.375,
45
+ "eval_Answer(Summary)": 0.49720670391061456,
46
+ "eval_Answer-Example": 0.15384615384615388,
47
+ "eval_Answer-Organizationalsentence": 0.0,
48
+ "eval_AuxiliaryInformation": 0.0,
49
+ "eval_Miscellaneous": 0.6923076923076923,
50
+ "eval_accuracy": 0.4082125603864734,
51
+ "eval_loss": 0.17385143041610718,
52
+ "eval_macro_f1": 0.28639342501074344,
53
+ "eval_runtime": 12.0287,
54
+ "eval_samples_per_second": 5.154,
55
+ "eval_steps_per_second": 0.333,
56
+ "step": 54
57
+ },
58
+ {
59
+ "epoch": 4.0,
60
+ "eval_Answer": 0.4250871080139373,
61
+ "eval_Answer(Summary)": 0.4433962264150943,
62
+ "eval_Answer-Example": 0.5189189189189188,
63
+ "eval_Answer-Organizationalsentence": 0.0,
64
+ "eval_AuxiliaryInformation": 0.0,
65
+ "eval_Miscellaneous": 0.6842105263157895,
66
+ "eval_accuracy": 0.4396135265700483,
67
+ "eval_loss": 0.1758367419242859,
68
+ "eval_macro_f1": 0.3452687966106233,
69
+ "eval_runtime": 11.2352,
70
+ "eval_samples_per_second": 5.518,
71
+ "eval_steps_per_second": 0.356,
72
+ "step": 72
73
+ },
74
+ {
75
+ "epoch": 5.0,
76
+ "eval_Answer": 0.43205574912891986,
77
+ "eval_Answer(Summary)": 0.4,
78
+ "eval_Answer-Example": 0.5072463768115941,
79
+ "eval_Answer-Organizationalsentence": 0.0,
80
+ "eval_AuxiliaryInformation": 0.2797202797202797,
81
+ "eval_Miscellaneous": 0.765432098765432,
82
+ "eval_accuracy": 0.4420289855072464,
83
+ "eval_loss": 0.1732899248600006,
84
+ "eval_macro_f1": 0.3974090840710376,
85
+ "eval_runtime": 11.2192,
86
+ "eval_samples_per_second": 5.526,
87
+ "eval_steps_per_second": 0.357,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 6.0,
92
+ "eval_Answer": 0.475609756097561,
93
+ "eval_Answer(Summary)": 0.4120603015075377,
94
+ "eval_Answer-Example": 0.5222929936305732,
95
+ "eval_Answer-Organizationalsentence": 0.0,
96
+ "eval_AuxiliaryInformation": 0.02985074626865672,
97
+ "eval_Miscellaneous": 0.7123287671232877,
98
+ "eval_accuracy": 0.45169082125603865,
99
+ "eval_loss": 0.16830046474933624,
100
+ "eval_macro_f1": 0.3586904274379361,
101
+ "eval_runtime": 11.055,
102
+ "eval_samples_per_second": 5.608,
103
+ "eval_steps_per_second": 0.362,
104
+ "step": 108
105
+ },
106
+ {
107
+ "epoch": 7.0,
108
+ "eval_Answer": 0.4397163120567376,
109
+ "eval_Answer(Summary)": 0.5263157894736842,
110
+ "eval_Answer-Example": 0.6206896551724138,
111
+ "eval_Answer-Organizationalsentence": 0.0,
112
+ "eval_AuxiliaryInformation": 0.25999999999999995,
113
+ "eval_Miscellaneous": 0.6732673267326732,
114
+ "eval_accuracy": 0.49516908212560384,
115
+ "eval_loss": 0.15517334640026093,
116
+ "eval_macro_f1": 0.41999818057258476,
117
+ "eval_runtime": 11.346,
118
+ "eval_samples_per_second": 5.464,
119
+ "eval_steps_per_second": 0.353,
120
+ "step": 126
121
+ },
122
+ {
123
+ "epoch": 8.0,
124
+ "eval_Answer": 0.4920127795527157,
125
+ "eval_Answer(Summary)": 0.5025641025641026,
126
+ "eval_Answer-Example": 0.6495726495726496,
127
+ "eval_Answer-Organizationalsentence": 0.0,
128
+ "eval_AuxiliaryInformation": 0.32786885245901637,
129
+ "eval_Miscellaneous": 0.7272727272727273,
130
+ "eval_accuracy": 0.5120772946859904,
131
+ "eval_loss": 0.1799505650997162,
132
+ "eval_macro_f1": 0.44988185190353525,
133
+ "eval_runtime": 10.9313,
134
+ "eval_samples_per_second": 5.672,
135
+ "eval_steps_per_second": 0.366,
136
+ "step": 144
137
+ },
138
+ {
139
+ "epoch": 9.0,
140
+ "eval_Answer": 0.5092024539877301,
141
+ "eval_Answer(Summary)": 0.5235602094240838,
142
+ "eval_Answer-Example": 0.5636363636363636,
143
+ "eval_Answer-Organizationalsentence": 0.2857142857142857,
144
+ "eval_AuxiliaryInformation": 0.22033898305084745,
145
+ "eval_Miscellaneous": 0.736842105263158,
146
+ "eval_accuracy": 0.4975845410628019,
147
+ "eval_loss": 0.18231035768985748,
148
+ "eval_macro_f1": 0.47321573351274476,
149
+ "eval_runtime": 10.9336,
150
+ "eval_samples_per_second": 5.671,
151
+ "eval_steps_per_second": 0.366,
152
+ "step": 162
153
+ },
154
+ {
155
+ "epoch": 10.0,
156
+ "eval_Answer": 0.42704626334519574,
157
+ "eval_Answer(Summary)": 0.6071428571428571,
158
+ "eval_Answer-Example": 0.5161290322580646,
159
+ "eval_Answer-Organizationalsentence": 0.4,
160
+ "eval_AuxiliaryInformation": 0.33333333333333337,
161
+ "eval_Miscellaneous": 0.765432098765432,
162
+ "eval_accuracy": 0.5024154589371981,
163
+ "eval_loss": 0.20293939113616943,
164
+ "eval_macro_f1": 0.5081805974741471,
165
+ "eval_runtime": 11.8591,
166
+ "eval_samples_per_second": 5.228,
167
+ "eval_steps_per_second": 0.337,
168
+ "step": 180
169
+ },
170
+ {
171
+ "epoch": 11.0,
172
+ "eval_Answer": 0.46583850931677023,
173
+ "eval_Answer(Summary)": 0.5294117647058824,
174
+ "eval_Answer-Example": 0.6153846153846154,
175
+ "eval_Answer-Organizationalsentence": 0.4,
176
+ "eval_AuxiliaryInformation": 0.28571428571428575,
177
+ "eval_Miscellaneous": 0.7297297297297297,
178
+ "eval_accuracy": 0.4975845410628019,
179
+ "eval_loss": 0.20877273380756378,
180
+ "eval_macro_f1": 0.5043464841418805,
181
+ "eval_runtime": 10.9599,
182
+ "eval_samples_per_second": 5.657,
183
+ "eval_steps_per_second": 0.365,
184
+ "step": 198
185
+ },
186
+ {
187
+ "epoch": 12.0,
188
+ "eval_Answer": 0.42909090909090913,
189
+ "eval_Answer(Summary)": 0.5714285714285714,
190
+ "eval_Answer-Example": 0.6611570247933884,
191
+ "eval_Answer-Organizationalsentence": 0.0,
192
+ "eval_AuxiliaryInformation": 0.3364485981308411,
193
+ "eval_Miscellaneous": 0.736842105263158,
194
+ "eval_accuracy": 0.5193236714975845,
195
+ "eval_loss": 0.22424167394638062,
196
+ "eval_macro_f1": 0.45582786811781134,
197
+ "eval_runtime": 11.5376,
198
+ "eval_samples_per_second": 5.374,
199
+ "eval_steps_per_second": 0.347,
200
+ "step": 216
201
+ },
202
+ {
203
+ "epoch": 13.0,
204
+ "eval_Answer": 0.4677966101694915,
205
+ "eval_Answer(Summary)": 0.5739910313901346,
206
+ "eval_Answer-Example": 0.6721311475409836,
207
+ "eval_Answer-Organizationalsentence": 0.4,
208
+ "eval_AuxiliaryInformation": 0.31067961165048547,
209
+ "eval_Miscellaneous": 0.7749999999999999,
210
+ "eval_accuracy": 0.5362318840579711,
211
+ "eval_loss": 0.23804587125778198,
212
+ "eval_macro_f1": 0.5332664001251824,
213
+ "eval_runtime": 10.943,
214
+ "eval_samples_per_second": 5.666,
215
+ "eval_steps_per_second": 0.366,
216
+ "step": 234
217
+ },
218
+ {
219
+ "epoch": 14.0,
220
+ "eval_Answer": 0.4834437086092715,
221
+ "eval_Answer(Summary)": 0.5517241379310346,
222
+ "eval_Answer-Example": 0.6666666666666667,
223
+ "eval_Answer-Organizationalsentence": 0.5,
224
+ "eval_AuxiliaryInformation": 0.3709677419354839,
225
+ "eval_Miscellaneous": 0.75,
226
+ "eval_accuracy": 0.5314009661835749,
227
+ "eval_loss": 0.2845667898654938,
228
+ "eval_macro_f1": 0.5538003758570761,
229
+ "eval_runtime": 11.0141,
230
+ "eval_samples_per_second": 5.629,
231
+ "eval_steps_per_second": 0.363,
232
+ "step": 252
233
+ },
234
+ {
235
+ "epoch": 15.0,
236
+ "eval_Answer": 0.5100671140939598,
237
+ "eval_Answer(Summary)": 0.5714285714285714,
238
+ "eval_Answer-Example": 0.6464646464646465,
239
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
240
+ "eval_AuxiliaryInformation": 0.4153846153846154,
241
+ "eval_Miscellaneous": 0.7126436781609196,
242
+ "eval_accuracy": 0.5483091787439613,
243
+ "eval_loss": 0.2950053811073303,
244
+ "eval_macro_f1": 0.5871092153665631,
245
+ "eval_runtime": 11.553,
246
+ "eval_samples_per_second": 5.367,
247
+ "eval_steps_per_second": 0.346,
248
+ "step": 270
249
+ },
250
+ {
251
+ "epoch": 16.0,
252
+ "eval_Answer": 0.45637583892617456,
253
+ "eval_Answer(Summary)": 0.5945945945945946,
254
+ "eval_Answer-Example": 0.607843137254902,
255
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
256
+ "eval_AuxiliaryInformation": 0.38333333333333336,
257
+ "eval_Miscellaneous": 0.7848101265822784,
258
+ "eval_accuracy": 0.5314009661835749,
259
+ "eval_loss": 0.28483256697654724,
260
+ "eval_macro_f1": 0.5822706162263249,
261
+ "eval_runtime": 10.9374,
262
+ "eval_samples_per_second": 5.669,
263
+ "eval_steps_per_second": 0.366,
264
+ "step": 288
265
+ },
266
+ {
267
+ "epoch": 17.0,
268
+ "eval_Answer": 0.4557823129251701,
269
+ "eval_Answer(Summary)": 0.5952380952380952,
270
+ "eval_Answer-Example": 0.6923076923076923,
271
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
272
+ "eval_AuxiliaryInformation": 0.3125,
273
+ "eval_Miscellaneous": 0.7848101265822784,
274
+ "eval_accuracy": 0.5434782608695652,
275
+ "eval_loss": 0.29017218947410583,
276
+ "eval_macro_f1": 0.5845508156199838,
277
+ "eval_runtime": 11.3062,
278
+ "eval_samples_per_second": 5.484,
279
+ "eval_steps_per_second": 0.354,
280
+ "step": 306
281
+ },
282
+ {
283
+ "epoch": 18.0,
284
+ "eval_Answer": 0.4621212121212122,
285
+ "eval_Answer(Summary)": 0.5844748858447489,
286
+ "eval_Answer-Example": 0.6611570247933884,
287
+ "eval_Answer-Organizationalsentence": 0.33333333333333337,
288
+ "eval_AuxiliaryInformation": 0.43283582089552236,
289
+ "eval_Miscellaneous": 0.7380952380952381,
290
+ "eval_accuracy": 0.5458937198067633,
291
+ "eval_loss": 0.3154158294200897,
292
+ "eval_macro_f1": 0.5353362525139073,
293
+ "eval_runtime": 11.5419,
294
+ "eval_samples_per_second": 5.372,
295
+ "eval_steps_per_second": 0.347,
296
+ "step": 324
297
+ },
298
+ {
299
+ "epoch": 19.0,
300
+ "eval_Answer": 0.46905537459283386,
301
+ "eval_Answer(Summary)": 0.5526315789473685,
302
+ "eval_Answer-Example": 0.6315789473684211,
303
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
304
+ "eval_AuxiliaryInformation": 0.38775510204081637,
305
+ "eval_Miscellaneous": 0.7692307692307693,
306
+ "eval_accuracy": 0.533816425120773,
307
+ "eval_loss": 0.3426768183708191,
308
+ "eval_macro_f1": 0.5794864064744794,
309
+ "eval_runtime": 11.3895,
310
+ "eval_samples_per_second": 5.444,
311
+ "eval_steps_per_second": 0.351,
312
+ "step": 342
313
+ },
314
+ {
315
+ "epoch": 20.0,
316
+ "eval_Answer": 0.5089820359281436,
317
+ "eval_Answer(Summary)": 0.5700934579439253,
318
+ "eval_Answer-Example": 0.45161290322580644,
319
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
320
+ "eval_AuxiliaryInformation": 0.3921568627450981,
321
+ "eval_Miscellaneous": 0.765432098765432,
322
+ "eval_accuracy": 0.5289855072463768,
323
+ "eval_loss": 0.3493908941745758,
324
+ "eval_macro_f1": 0.5591573375458453,
325
+ "eval_runtime": 11.3644,
326
+ "eval_samples_per_second": 5.456,
327
+ "eval_steps_per_second": 0.352,
328
+ "step": 360
329
+ },
330
+ {
331
+ "epoch": 21.0,
332
+ "eval_Answer": 0.4316546762589928,
333
+ "eval_Answer(Summary)": 0.5701754385964912,
334
+ "eval_Answer-Example": 0.6666666666666667,
335
+ "eval_Answer-Organizationalsentence": 0.5,
336
+ "eval_AuxiliaryInformation": 0.4,
337
+ "eval_Miscellaneous": 0.7749999999999999,
338
+ "eval_accuracy": 0.5265700483091788,
339
+ "eval_loss": 0.34431466460227966,
340
+ "eval_macro_f1": 0.5572494635870251,
341
+ "eval_runtime": 11.6127,
342
+ "eval_samples_per_second": 5.339,
343
+ "eval_steps_per_second": 0.344,
344
+ "step": 378
345
+ },
346
+ {
347
+ "epoch": 22.0,
348
+ "eval_Answer": 0.4968944099378882,
349
+ "eval_Answer(Summary)": 0.5909090909090909,
350
+ "eval_Answer-Example": 0.4597701149425288,
351
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
352
+ "eval_AuxiliaryInformation": 0.4000000000000001,
353
+ "eval_Miscellaneous": 0.7749999999999999,
354
+ "eval_accuracy": 0.5314009661835749,
355
+ "eval_loss": 0.3664790987968445,
356
+ "eval_macro_f1": 0.5648733804093624,
357
+ "eval_runtime": 11.1595,
358
+ "eval_samples_per_second": 5.556,
359
+ "eval_steps_per_second": 0.358,
360
+ "step": 396
361
+ },
362
+ {
363
+ "epoch": 23.0,
364
+ "eval_Answer": 0.48965517241379314,
365
+ "eval_Answer(Summary)": 0.6079295154185023,
366
+ "eval_Answer-Example": 0.6666666666666667,
367
+ "eval_Answer-Organizationalsentence": 0.5,
368
+ "eval_AuxiliaryInformation": 0.416,
369
+ "eval_Miscellaneous": 0.7749999999999999,
370
+ "eval_accuracy": 0.5603864734299517,
371
+ "eval_loss": 0.36973538994789124,
372
+ "eval_macro_f1": 0.575875225749827,
373
+ "eval_runtime": 11.3197,
374
+ "eval_samples_per_second": 5.477,
375
+ "eval_steps_per_second": 0.353,
376
+ "step": 414
377
+ },
378
+ {
379
+ "epoch": 24.0,
380
+ "eval_Answer": 0.4774193548387097,
381
+ "eval_Answer(Summary)": 0.5833333333333334,
382
+ "eval_Answer-Example": 0.5217391304347826,
383
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
384
+ "eval_AuxiliaryInformation": 0.40944881889763785,
385
+ "eval_Miscellaneous": 0.759493670886076,
386
+ "eval_accuracy": 0.5265700483091788,
387
+ "eval_loss": 0.3806516230106354,
388
+ "eval_macro_f1": 0.5696834958428677,
389
+ "eval_runtime": 11.2648,
390
+ "eval_samples_per_second": 5.504,
391
+ "eval_steps_per_second": 0.355,
392
+ "step": 432
393
+ },
394
+ {
395
+ "epoch": 25.0,
396
+ "eval_Answer": 0.4871794871794871,
397
+ "eval_Answer(Summary)": 0.588785046728972,
398
+ "eval_Answer-Example": 0.5833333333333334,
399
+ "eval_Answer-Organizationalsentence": 0.5,
400
+ "eval_AuxiliaryInformation": 0.3934426229508197,
401
+ "eval_Miscellaneous": 0.759493670886076,
402
+ "eval_accuracy": 0.5362318840579711,
403
+ "eval_loss": 0.39177072048187256,
404
+ "eval_macro_f1": 0.552039026846448,
405
+ "eval_runtime": 11.3644,
406
+ "eval_samples_per_second": 5.456,
407
+ "eval_steps_per_second": 0.352,
408
+ "step": 450
409
+ },
410
+ {
411
+ "epoch": 26.0,
412
+ "eval_Answer": 0.5116279069767442,
413
+ "eval_Answer(Summary)": 0.5915492957746479,
414
+ "eval_Answer-Example": 0.7102803738317757,
415
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
416
+ "eval_AuxiliaryInformation": 0.4032258064516129,
417
+ "eval_Miscellaneous": 0.759493670886076,
418
+ "eval_accuracy": 0.5652173913043478,
419
+ "eval_loss": 0.3835026025772095,
420
+ "eval_macro_f1": 0.6071406200979206,
421
+ "eval_runtime": 11.3582,
422
+ "eval_samples_per_second": 5.459,
423
+ "eval_steps_per_second": 0.352,
424
+ "step": 468
425
+ },
426
+ {
427
+ "epoch": 27.0,
428
+ "eval_Answer": 0.5100671140939598,
429
+ "eval_Answer(Summary)": 0.609090909090909,
430
+ "eval_Answer-Example": 0.69811320754717,
431
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
432
+ "eval_AuxiliaryInformation": 0.4132231404958678,
433
+ "eval_Miscellaneous": 0.759493670886076,
434
+ "eval_accuracy": 0.5700483091787439,
435
+ "eval_loss": 0.3833402693271637,
436
+ "eval_macro_f1": 0.6094424514634416,
437
+ "eval_runtime": 11.4693,
438
+ "eval_samples_per_second": 5.406,
439
+ "eval_steps_per_second": 0.349,
440
+ "step": 486
441
+ },
442
+ {
443
+ "epoch": 27.78,
444
+ "learning_rate": 7.4074074074074075e-06,
445
+ "loss": 0.0945,
446
+ "step": 500
447
+ },
448
+ {
449
+ "epoch": 28.0,
450
+ "eval_Answer": 0.5167785234899329,
451
+ "eval_Answer(Summary)": 0.5972850678733032,
452
+ "eval_Answer-Example": 0.7222222222222223,
453
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
454
+ "eval_AuxiliaryInformation": 0.4067796610169492,
455
+ "eval_Miscellaneous": 0.759493670886076,
456
+ "eval_accuracy": 0.572463768115942,
457
+ "eval_loss": 0.39141198992729187,
458
+ "eval_macro_f1": 0.6115376353591917,
459
+ "eval_runtime": 11.4624,
460
+ "eval_samples_per_second": 5.409,
461
+ "eval_steps_per_second": 0.349,
462
+ "step": 504
463
+ },
464
+ {
465
+ "epoch": 29.0,
466
+ "eval_Answer": 0.5214521452145214,
467
+ "eval_Answer(Summary)": 0.599078341013825,
468
+ "eval_Answer-Example": 0.7102803738317757,
469
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
470
+ "eval_AuxiliaryInformation": 0.4067796610169492,
471
+ "eval_Miscellaneous": 0.759493670886076,
472
+ "eval_accuracy": 0.572463768115942,
473
+ "eval_loss": 0.3956356346607208,
474
+ "eval_macro_f1": 0.610625143104969,
475
+ "eval_runtime": 11.2747,
476
+ "eval_samples_per_second": 5.499,
477
+ "eval_steps_per_second": 0.355,
478
+ "step": 522
479
+ },
480
+ {
481
+ "epoch": 30.0,
482
+ "eval_Answer": 0.5197368421052633,
483
+ "eval_Answer(Summary)": 0.5925925925925926,
484
+ "eval_Answer-Example": 0.7102803738317757,
485
+ "eval_Answer-Organizationalsentence": 0.6666666666666666,
486
+ "eval_AuxiliaryInformation": 0.4067796610169492,
487
+ "eval_Miscellaneous": 0.759493670886076,
488
+ "eval_accuracy": 0.5700483091787439,
489
+ "eval_loss": 0.39962947368621826,
490
+ "eval_macro_f1": 0.6092583011832206,
491
+ "eval_runtime": 11.3436,
492
+ "eval_samples_per_second": 5.466,
493
+ "eval_steps_per_second": 0.353,
494
+ "step": 540
495
+ },
496
+ {
497
+ "epoch": 30.0,
498
+ "step": 540,
499
+ "total_flos": 1.5886402423296e+16,
500
+ "train_loss": 0.08773032142608254,
501
+ "train_runtime": 1826.4445,
502
+ "train_samples_per_second": 4.714,
503
+ "train_steps_per_second": 0.296
504
+ }
505
+ ],
506
+ "max_steps": 540,
507
+ "num_train_epochs": 30,
508
+ "total_flos": 1.5886402423296e+16,
509
+ "trial_name": null,
510
+ "trial_params": null
511
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7df648d0055b2c07133e57699f4fff7e73e5efd0053a82e29c47d6ab72a66349
3
+ size 3055