masatochi commited on
Commit
d63bb42
1 Parent(s): 00c14b5

Training in progress, step 10, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:746646e7a6a6925ca7ebb29d53fcf0b1c0893b8f20d6362eeb750ff6f122341b
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8602602d27b89a8d5aa06cbaaa7afce4df95670e302824b14d2d995dc4681680
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c85ac0ba301141ca1014d25de18920f7b9670970e107f1a01d438569192cbc6f
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dba108f0cdce82ad3e2cd068b6f849f104457a9e357302d101aa78149d8dd441
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ea4c86c7e8a86ff4c17a1f954f7b8c92b5100baf63d25f32ca35b20d0a8cdf7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04232fce99c4802dc579b85d4a7938b6bd47a3fd2dae4192a67625460029c733
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16178af669793c21eac8710459f1cc10bcf09af1984c5f2d80e76b26b329c8ba
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67680b8b35ea230d53acee387012f8809825a2c511bd41340f42320f849e349d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0024451372333272204,
5
  "eval_steps": 34,
6
- "global_step": 5,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -50,6 +50,41 @@
50
  "learning_rate": 3.3333333333333335e-05,
51
  "loss": 8.8885,
52
  "step": 5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  }
54
  ],
55
  "logging_steps": 1,
@@ -69,7 +104,7 @@
69
  "attributes": {}
70
  }
71
  },
72
- "total_flos": 1.102239837978624e+16,
73
  "train_batch_size": 3,
74
  "trial_name": null,
75
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.004890274466654441,
5
  "eval_steps": 34,
6
+ "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
50
  "learning_rate": 3.3333333333333335e-05,
51
  "loss": 8.8885,
52
  "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.0029341646799926646,
56
+ "grad_norm": 8.087092190490132e+18,
57
+ "learning_rate": 4e-05,
58
+ "loss": 9.0537,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.0034231921266581087,
63
+ "grad_norm": Infinity,
64
+ "learning_rate": 4.666666666666667e-05,
65
+ "loss": 8.6808,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.0039122195733235525,
70
+ "grad_norm": Infinity,
71
+ "learning_rate": 5.333333333333333e-05,
72
+ "loss": 8.8092,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.004401247019988997,
77
+ "grad_norm": Infinity,
78
+ "learning_rate": 6e-05,
79
+ "loss": 9.7075,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.004890274466654441,
84
+ "grad_norm": Infinity,
85
+ "learning_rate": 6.666666666666667e-05,
86
+ "loss": 9.2624,
87
+ "step": 10
88
  }
89
  ],
90
  "logging_steps": 1,
 
104
  "attributes": {}
105
  }
106
  },
107
+ "total_flos": 2.204479675957248e+16,
108
  "train_batch_size": 3,
109
  "trial_name": null,
110
  "trial_params": null