timewanderer commited on
Commit
cd5cf65
1 Parent(s): c351164

Training in progress, step 1500

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ee948a20c89755d7a96becd052e4426cb6cdccf9b8ce076c95e8904c6697ce5
3
  size 268290900
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c9a63d3004610f74819b6489ca4b580024510d89b7576222160d376a94edd03
3
  size 268290900
run-2/checkpoint-1500/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d014109cae35cf2373bc22e98f1aa7e70f39ca877935f2badc32b0b62bea6131
3
  size 268290900
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c9a63d3004610f74819b6489ca4b580024510d89b7576222160d376a94edd03
3
  size 268290900
run-2/checkpoint-1500/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7fdf8b9d0ab7c662a4ac7746908ed03be93ccfc5c7b8cc749ef376471741bb78
3
  size 536643898
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90b0a266aa6d2c7cee70af38c41264e284f7c2475c5c6c36d6483a6cc0cab912
3
  size 536643898
run-2/checkpoint-1500/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71453465aad25f4c5a0a948496c64b1f74df850abda497954afe3695c00756ee
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad4d7d251acf36e559c362893a1fb310c9f46b20e8a330025a14b6829ce4ab07
3
  size 1064
run-2/checkpoint-1500/trainer_state.json CHANGED
@@ -10,66 +10,66 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_accuracy": 0.5796774193548387,
14
- "eval_loss": 0.1907225400209427,
15
- "eval_runtime": 5.6367,
16
- "eval_samples_per_second": 549.965,
17
- "eval_steps_per_second": 11.532,
18
  "step": 318
19
  },
20
  {
21
  "epoch": 1.5723270440251573,
22
- "grad_norm": 0.5174282193183899,
23
- "learning_rate": 1.685534591194969e-05,
24
- "loss": 0.3083,
25
  "step": 500
26
  },
27
  {
28
  "epoch": 2.0,
29
- "eval_accuracy": 0.817741935483871,
30
- "eval_loss": 0.09267137199640274,
31
- "eval_runtime": 5.8965,
32
- "eval_samples_per_second": 525.732,
33
- "eval_steps_per_second": 11.023,
34
  "step": 636
35
  },
36
  {
37
  "epoch": 3.0,
38
- "eval_accuracy": 0.8780645161290322,
39
- "eval_loss": 0.06213448941707611,
40
- "eval_runtime": 6.1922,
41
- "eval_samples_per_second": 500.633,
42
- "eval_steps_per_second": 10.497,
43
  "step": 954
44
  },
45
  {
46
  "epoch": 3.1446540880503147,
47
- "grad_norm": 0.42976459860801697,
48
- "learning_rate": 1.371069182389937e-05,
49
- "loss": 0.1074,
50
  "step": 1000
51
  },
52
  {
53
  "epoch": 4.0,
54
- "eval_accuracy": 0.9006451612903226,
55
- "eval_loss": 0.047995250672101974,
56
- "eval_runtime": 5.9722,
57
- "eval_samples_per_second": 519.074,
58
- "eval_steps_per_second": 10.884,
59
  "step": 1272
60
  },
61
  {
62
  "epoch": 4.716981132075472,
63
- "grad_norm": 0.29368260502815247,
64
- "learning_rate": 1.0566037735849058e-05,
65
- "loss": 0.0685,
66
  "step": 1500
67
  }
68
  ],
69
  "logging_steps": 500,
70
- "max_steps": 3180,
71
  "num_input_tokens_seen": 0,
72
- "num_train_epochs": 10,
73
  "save_steps": 500,
74
  "stateful_callbacks": {
75
  "TrainerControl": {
@@ -87,8 +87,8 @@
87
  "train_batch_size": 48,
88
  "trial_name": null,
89
  "trial_params": {
90
- "alpha": 0.4529889909540463,
91
- "num_train_epochs": 10,
92
- "temperature": 17
93
  }
94
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_accuracy": 0.6445161290322581,
14
+ "eval_loss": 0.29249975085258484,
15
+ "eval_runtime": 5.4218,
16
+ "eval_samples_per_second": 571.762,
17
+ "eval_steps_per_second": 11.989,
18
  "step": 318
19
  },
20
  {
21
  "epoch": 1.5723270440251573,
22
+ "grad_norm": 0.6633772850036621,
23
+ "learning_rate": 1.550763701707098e-05,
24
+ "loss": 0.4626,
25
  "step": 500
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "eval_accuracy": 0.8380645161290322,
30
+ "eval_loss": 0.12248263508081436,
31
+ "eval_runtime": 5.3956,
32
+ "eval_samples_per_second": 574.542,
33
+ "eval_steps_per_second": 12.047,
34
  "step": 636
35
  },
36
  {
37
  "epoch": 3.0,
38
+ "eval_accuracy": 0.8887096774193548,
39
+ "eval_loss": 0.07343784719705582,
40
+ "eval_runtime": 5.2977,
41
+ "eval_samples_per_second": 585.157,
42
+ "eval_steps_per_second": 12.269,
43
  "step": 954
44
  },
45
  {
46
  "epoch": 3.1446540880503147,
47
+ "grad_norm": 0.59869384765625,
48
+ "learning_rate": 1.101527403414196e-05,
49
+ "loss": 0.1438,
50
  "step": 1000
51
  },
52
  {
53
  "epoch": 4.0,
54
+ "eval_accuracy": 0.9045161290322581,
55
+ "eval_loss": 0.05444410815834999,
56
+ "eval_runtime": 5.5714,
57
+ "eval_samples_per_second": 556.417,
58
+ "eval_steps_per_second": 11.667,
59
  "step": 1272
60
  },
61
  {
62
  "epoch": 4.716981132075472,
63
+ "grad_norm": 0.4075869023799896,
64
+ "learning_rate": 6.522911051212939e-06,
65
+ "loss": 0.0845,
66
  "step": 1500
67
  }
68
  ],
69
  "logging_steps": 500,
70
+ "max_steps": 2226,
71
  "num_input_tokens_seen": 0,
72
+ "num_train_epochs": 7,
73
  "save_steps": 500,
74
  "stateful_callbacks": {
75
  "TrainerControl": {
 
87
  "train_batch_size": 48,
88
  "trial_name": null,
89
  "trial_params": {
90
+ "alpha": 0.444035914041536,
91
+ "num_train_epochs": 7,
92
+ "temperature": 3
93
  }
94
  }
run-2/checkpoint-1500/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:721f120a8e258fc8c48320ec67ae2a387816a0061225dcba3c70a5da25825846
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22d5df12fd1120947d14fad0a0388bc240150e4fa0277e30f926f1ec12ddb8e9
3
  size 5240
runs/Oct12_05-09-37_f46fcd247c16/events.out.tfevents.1728711374.f46fcd247c16.522.3 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:230aeae6b604707cf2f31f5ec67e88d5da3547b635ddcd68dcdf649799d72f97
3
- size 14155
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96655c74898d6436df313562795fa6a996fa4d35e878e19019d1b079d714fdff
3
+ size 14689