gokuls commited on
Commit
032355e
1 Parent(s): bb3354c

End of training

Browse files
README.md CHANGED
@@ -1,4 +1,6 @@
1
  ---
 
 
2
  tags:
3
  - generated_from_trainer
4
  datasets:
@@ -12,7 +14,7 @@ model-index:
12
  name: Text Classification
13
  type: text-classification
14
  dataset:
15
- name: glue
16
  type: glue
17
  config: cola
18
  split: validation
@@ -28,9 +30,9 @@ should probably proofread and complete it, then remove this comment. -->
28
 
29
  # hBERTv2_cola
30
 
31
- This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2](https://huggingface.co/gokuls/bert_12_layer_model_v2) on the glue dataset.
32
  It achieves the following results on the evaluation set:
33
- - Loss: 0.6196
34
  - Matthews Correlation: 0.0
35
 
36
  ## Model description
 
1
  ---
2
+ language:
3
+ - en
4
  tags:
5
  - generated_from_trainer
6
  datasets:
 
14
  name: Text Classification
15
  type: text-classification
16
  dataset:
17
+ name: GLUE COLA
18
  type: glue
19
  config: cola
20
  split: validation
 
30
 
31
  # hBERTv2_cola
32
 
33
+ This model is a fine-tuned version of [gokuls/bert_12_layer_model_v2](https://huggingface.co/gokuls/bert_12_layer_model_v2) on the GLUE COLA dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.6182
36
  - Matthews Correlation: 0.0
37
 
38
  ## Model description
all_results.json CHANGED
@@ -2,13 +2,13 @@
2
  "epoch": 7.0,
3
  "eval_loss": 0.6181691884994507,
4
  "eval_matthews_correlation": 0.0,
5
- "eval_runtime": 1.2226,
6
  "eval_samples": 1043,
7
- "eval_samples_per_second": 853.105,
8
- "eval_steps_per_second": 4.09,
9
- "train_loss": 0.43535192473595885,
10
- "train_runtime": 180.6728,
11
  "train_samples": 8551,
12
- "train_samples_per_second": 2366.433,
13
- "train_steps_per_second": 9.409
14
  }
 
2
  "epoch": 7.0,
3
  "eval_loss": 0.6181691884994507,
4
  "eval_matthews_correlation": 0.0,
5
+ "eval_runtime": 1.2209,
6
  "eval_samples": 1043,
7
+ "eval_samples_per_second": 854.273,
8
+ "eval_steps_per_second": 4.095,
9
+ "train_loss": 0.6130136441783745,
10
+ "train_runtime": 303.6084,
11
  "train_samples": 8551,
12
+ "train_samples_per_second": 1408.228,
13
+ "train_steps_per_second": 5.599
14
  }
eval_results.json CHANGED
@@ -2,8 +2,8 @@
2
  "epoch": 7.0,
3
  "eval_loss": 0.6181691884994507,
4
  "eval_matthews_correlation": 0.0,
5
- "eval_runtime": 1.2226,
6
  "eval_samples": 1043,
7
- "eval_samples_per_second": 853.105,
8
- "eval_steps_per_second": 4.09
9
  }
 
2
  "epoch": 7.0,
3
  "eval_loss": 0.6181691884994507,
4
  "eval_matthews_correlation": 0.0,
5
+ "eval_runtime": 1.2209,
6
  "eval_samples": 1043,
7
+ "eval_samples_per_second": 854.273,
8
+ "eval_steps_per_second": 4.095
9
  }
logs/events.out.tfevents.1677737520.serv-3317.65116.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dd247652e32e5ef210f9f8b5ce0463fd3e85f724ea97a604da5f5a79ec4128c
3
+ size 375
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 7.0,
3
- "train_loss": 0.43535192473595885,
4
- "train_runtime": 180.6728,
5
  "train_samples": 8551,
6
- "train_samples_per_second": 2366.433,
7
- "train_steps_per_second": 9.409
8
  }
 
1
  {
2
  "epoch": 7.0,
3
+ "train_loss": 0.6130136441783745,
4
+ "train_runtime": 303.6084,
5
  "train_samples": 8551,
6
+ "train_samples_per_second": 1408.228,
7
+ "train_steps_per_second": 5.599
8
  }
trainer_state.json CHANGED
@@ -17,9 +17,9 @@
17
  "epoch": 1.0,
18
  "eval_loss": 0.618189811706543,
19
  "eval_matthews_correlation": 0.0,
20
- "eval_runtime": 1.2309,
21
- "eval_samples_per_second": 847.325,
22
- "eval_steps_per_second": 4.062,
23
  "step": 34
24
  },
25
  {
@@ -32,9 +32,9 @@
32
  "epoch": 2.0,
33
  "eval_loss": 0.6181691884994507,
34
  "eval_matthews_correlation": 0.0,
35
- "eval_runtime": 1.2378,
36
- "eval_samples_per_second": 842.642,
37
- "eval_steps_per_second": 4.04,
38
  "step": 68
39
  },
40
  {
@@ -47,9 +47,9 @@
47
  "epoch": 3.0,
48
  "eval_loss": 0.6213549971580505,
49
  "eval_matthews_correlation": 0.0,
50
- "eval_runtime": 1.218,
51
- "eval_samples_per_second": 856.323,
52
- "eval_steps_per_second": 4.105,
53
  "step": 102
54
  },
55
  {
@@ -62,9 +62,9 @@
62
  "epoch": 4.0,
63
  "eval_loss": 0.6190556287765503,
64
  "eval_matthews_correlation": 0.0,
65
- "eval_runtime": 1.2211,
66
- "eval_samples_per_second": 854.173,
67
- "eval_steps_per_second": 4.095,
68
  "step": 136
69
  },
70
  {
@@ -77,9 +77,9 @@
77
  "epoch": 5.0,
78
  "eval_loss": 0.6221293210983276,
79
  "eval_matthews_correlation": 0.0,
80
- "eval_runtime": 1.2273,
81
- "eval_samples_per_second": 849.806,
82
- "eval_steps_per_second": 4.074,
83
  "step": 170
84
  },
85
  {
@@ -92,9 +92,9 @@
92
  "epoch": 6.0,
93
  "eval_loss": 0.6182675361633301,
94
  "eval_matthews_correlation": 0.0,
95
- "eval_runtime": 1.243,
96
- "eval_samples_per_second": 839.13,
97
- "eval_steps_per_second": 4.023,
98
  "step": 204
99
  },
100
  {
@@ -107,19 +107,19 @@
107
  "epoch": 7.0,
108
  "eval_loss": 0.6196076273918152,
109
  "eval_matthews_correlation": 0.0,
110
- "eval_runtime": 1.228,
111
- "eval_samples_per_second": 849.376,
112
- "eval_steps_per_second": 4.072,
113
  "step": 238
114
  },
115
  {
116
  "epoch": 7.0,
117
  "step": 238,
118
  "total_flos": 7455394754461696.0,
119
- "train_loss": 0.43535192473595885,
120
- "train_runtime": 180.6728,
121
- "train_samples_per_second": 2366.433,
122
- "train_steps_per_second": 9.409
123
  }
124
  ],
125
  "max_steps": 1700,
 
17
  "epoch": 1.0,
18
  "eval_loss": 0.618189811706543,
19
  "eval_matthews_correlation": 0.0,
20
+ "eval_runtime": 1.2234,
21
+ "eval_samples_per_second": 852.526,
22
+ "eval_steps_per_second": 4.087,
23
  "step": 34
24
  },
25
  {
 
32
  "epoch": 2.0,
33
  "eval_loss": 0.6181691884994507,
34
  "eval_matthews_correlation": 0.0,
35
+ "eval_runtime": 1.2214,
36
+ "eval_samples_per_second": 853.934,
37
+ "eval_steps_per_second": 4.094,
38
  "step": 68
39
  },
40
  {
 
47
  "epoch": 3.0,
48
  "eval_loss": 0.6213549971580505,
49
  "eval_matthews_correlation": 0.0,
50
+ "eval_runtime": 1.2227,
51
+ "eval_samples_per_second": 853.001,
52
+ "eval_steps_per_second": 4.089,
53
  "step": 102
54
  },
55
  {
 
62
  "epoch": 4.0,
63
  "eval_loss": 0.6190556287765503,
64
  "eval_matthews_correlation": 0.0,
65
+ "eval_runtime": 1.2244,
66
+ "eval_samples_per_second": 851.842,
67
+ "eval_steps_per_second": 4.084,
68
  "step": 136
69
  },
70
  {
 
77
  "epoch": 5.0,
78
  "eval_loss": 0.6221293210983276,
79
  "eval_matthews_correlation": 0.0,
80
+ "eval_runtime": 1.2243,
81
+ "eval_samples_per_second": 851.906,
82
+ "eval_steps_per_second": 4.084,
83
  "step": 170
84
  },
85
  {
 
92
  "epoch": 6.0,
93
  "eval_loss": 0.6182675361633301,
94
  "eval_matthews_correlation": 0.0,
95
+ "eval_runtime": 1.2259,
96
+ "eval_samples_per_second": 850.776,
97
+ "eval_steps_per_second": 4.079,
98
  "step": 204
99
  },
100
  {
 
107
  "epoch": 7.0,
108
  "eval_loss": 0.6196076273918152,
109
  "eval_matthews_correlation": 0.0,
110
+ "eval_runtime": 1.22,
111
+ "eval_samples_per_second": 854.907,
112
+ "eval_steps_per_second": 4.098,
113
  "step": 238
114
  },
115
  {
116
  "epoch": 7.0,
117
  "step": 238,
118
  "total_flos": 7455394754461696.0,
119
+ "train_loss": 0.6130136441783745,
120
+ "train_runtime": 303.6084,
121
+ "train_samples_per_second": 1408.228,
122
+ "train_steps_per_second": 5.599
123
  }
124
  ],
125
  "max_steps": 1700,