masatochi commited on
Commit
be9b0e0
1 Parent(s): d61f239

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6eecf6c4a4c2a448a4c03e74d76a6c9b0d97d68be2f24e4f95a69c96000eaf02
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bb76a987567f4914b75cc888ae593ff9f52cad966ca3dbfe6826ccae8af21fe
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a698541df604e5482b4f9ba4406478786a267e2e932d1a0a5a26f57b5326d26
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:162f2c34b141d9aada321f716e3e4e6c2796d4291428461ba4003efd1d635bb1
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0dfa4c978e9e36a07b5616ff9458f53247c12dc4ecf4c596cc8b6cb0fe8c5e1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da87c0fa21811b5f2090e7e71a5104bc8bb10adee2d7d157218e5f725fa79433
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9a25c72339c898b564e0c464a3f6fc75bbeec408008928b7ed05533156b98c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
- "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1420,6 +1420,41 @@
1420
  "learning_rate": 4.2658237049655323e-07,
1421
  "loss": 9.7404,
1422
  "step": 195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423
  }
1424
  ],
1425
  "logging_steps": 1,
@@ -1434,12 +1469,12 @@
1434
  "should_evaluate": false,
1435
  "should_log": false,
1436
  "should_save": true,
1437
- "should_training_stop": false
1438
  },
1439
  "attributes": {}
1440
  }
1441
  },
1442
- "total_flos": 4.2987353681166336e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09780548933308882,
5
  "eval_steps": 34,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1420
  "learning_rate": 4.2658237049655323e-07,
1421
  "loss": 9.7404,
1422
  "step": 195
1423
+ },
1424
+ {
1425
+ "epoch": 0.09584937954642704,
1426
+ "grad_norm": Infinity,
1427
+ "learning_rate": 2.7308266142119785e-07,
1428
+ "loss": 8.6763,
1429
+ "step": 196
1430
+ },
1431
+ {
1432
+ "epoch": 0.09633840699309248,
1433
+ "grad_norm": Infinity,
1434
+ "learning_rate": 1.5363960325660565e-07,
1435
+ "loss": 9.8882,
1436
+ "step": 197
1437
+ },
1438
+ {
1439
+ "epoch": 0.09682743443975793,
1440
+ "grad_norm": Infinity,
1441
+ "learning_rate": 6.829398569770939e-08,
1442
+ "loss": 8.5344,
1443
+ "step": 198
1444
+ },
1445
+ {
1446
+ "epoch": 0.09731646188642337,
1447
+ "grad_norm": Infinity,
1448
+ "learning_rate": 1.7074954194729044e-08,
1449
+ "loss": 8.5774,
1450
+ "step": 199
1451
+ },
1452
+ {
1453
+ "epoch": 0.09780548933308882,
1454
+ "grad_norm": Infinity,
1455
+ "learning_rate": 0.0,
1456
+ "loss": 8.7934,
1457
+ "step": 200
1458
  }
1459
  ],
1460
  "logging_steps": 1,
 
1469
  "should_evaluate": false,
1470
  "should_log": false,
1471
  "should_save": true,
1472
+ "should_training_stop": true
1473
  },
1474
  "attributes": {}
1475
  }
1476
  },
1477
+ "total_flos": 4.408959351914496e+17,
1478
  "train_batch_size": 3,
1479
  "trial_name": null,
1480
  "trial_params": null