masatochi commited on
Commit
56784c0
1 Parent(s): 33ca120

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b39424892d2b750e22eaa5c0216a3d5de439c4f797acbb4f9c456ba34c8a355e
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adac11fe9b13bf1b76ad183dcdd7e81553c07712d98750b3a009b0fde4a8f61b
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:802ed36b7afa5017c93a6fd3510a67856790006ca5398e785e74ab898f545ef4
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f08485c8ba652d542301fa3d085478be8bed85616b2bcb26abf83e95f4faea3
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26d6b797df47639e9d56f9b6739dd1a2f36c0e6d770b9652f614b93b6b7b4046
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:887c396fe906e451edd5dc1fa4bc857e822a44e9109e8deb2d1acfa642c3f206
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8074483b54111bf60815c86124b01e5486a0c97c3c0bcc7642d6609b17e4381b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6c6ef871b14e9c2b6e6517244b08d50c3b172209dbf2c7f2f35e5b70f657975
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.022006235099944985,
5
  "eval_steps": 34,
6
- "global_step": 45,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -338,6 +338,41 @@
338
  "learning_rate": 0.00019618256431728194,
339
  "loss": 9.9237,
340
  "step": 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
  }
342
  ],
343
  "logging_steps": 1,
@@ -357,7 +392,7 @@
357
  "attributes": {}
358
  }
359
  },
360
- "total_flos": 9.920158541807616e+16,
361
  "train_batch_size": 3,
362
  "trial_name": null,
363
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.024451372333272204,
5
  "eval_steps": 34,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
338
  "learning_rate": 0.00019618256431728194,
339
  "loss": 9.9237,
340
  "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.02249526254661043,
344
+ "grad_norm": 9.941431195118076e+18,
345
+ "learning_rate": 0.0001956604419500441,
346
+ "loss": 8.1136,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.022984289993275873,
351
+ "grad_norm": Infinity,
352
+ "learning_rate": 0.00019510565162951537,
353
+ "loss": 9.7949,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.023473317439941317,
358
+ "grad_norm": Infinity,
359
+ "learning_rate": 0.00019451838281608197,
360
+ "loss": 10.1889,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.02396234488660676,
365
+ "grad_norm": Infinity,
366
+ "learning_rate": 0.00019389883606150566,
367
+ "loss": 8.871,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.024451372333272204,
372
+ "grad_norm": Infinity,
373
+ "learning_rate": 0.00019324722294043558,
374
+ "loss": 10.5181,
375
+ "step": 50
376
  }
377
  ],
378
  "logging_steps": 1,
 
392
  "attributes": {}
393
  }
394
  },
395
+ "total_flos": 1.102239837978624e+17,
396
  "train_batch_size": 3,
397
  "trial_name": null,
398
  "trial_params": null