masatochi commited on
Commit
10c5dd7
1 Parent(s): 9e0d96f

Training in progress, step 195, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca3c8a9f00d293dd253e12e2902978c87a7b1732187c713f7dcc56dab3f31196
3
  size 22573704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e414d493c7b174cfc9f15e8c8f498a62290ef39418bff71646044ba53766d2f
3
  size 22573704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4075100b03b5aee441579d318bde3f874036f3f3703250b3166ed2a132b016a0
3
  size 11710970
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746384d1c74a736559be2e4296fa1ef73756a741e53afa3adbe66178b2abe282
3
  size 11710970
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c6b8abea532445f7a2b8197f8311b5a4d582f8239dfa11c211ca7d24d646650
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:333f2a71b3e34575844f8fd8a45254433fb8a5c1731e62ba395b530a464ea2d4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b289b438571df9d34409287e67864402aaad98d9ecdf87ccd44b9abb7f5b6982
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09291521486643438,
5
  "eval_steps": 34,
6
- "global_step": 190,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1385,6 +1385,41 @@
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 0.9855,
1387
  "step": 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388
  }
1389
  ],
1390
  "logging_steps": 1,
@@ -1404,7 +1439,7 @@
1404
  "attributes": {}
1405
  }
1406
  },
1407
- "total_flos": 1.0968875503976448e+17,
1408
  "train_batch_size": 3,
1409
  "trial_name": null,
1410
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
+ "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 0.9855,
1387
  "step": 190
1388
+ },
1389
+ {
1390
+ "epoch": 0.09340424231309982,
1391
+ "grad_norm": 1.6108776330947876,
1392
+ "learning_rate": 1.3799252646597426e-06,
1393
+ "loss": 1.2474,
1394
+ "step": 191
1395
+ },
1396
+ {
1397
+ "epoch": 0.09389326975976527,
1398
+ "grad_norm": 1.4905693531036377,
1399
+ "learning_rate": 1.0908391628854041e-06,
1400
+ "loss": 1.0982,
1401
+ "step": 192
1402
+ },
1403
+ {
1404
+ "epoch": 0.09438229720643071,
1405
+ "grad_norm": 1.3788636922836304,
1406
+ "learning_rate": 8.355304489257254e-07,
1407
+ "loss": 1.0791,
1408
+ "step": 193
1409
+ },
1410
+ {
1411
+ "epoch": 0.09487132465309615,
1412
+ "grad_norm": 1.8257007598876953,
1413
+ "learning_rate": 6.140863104726391e-07,
1414
+ "loss": 1.1666,
1415
+ "step": 194
1416
+ },
1417
+ {
1418
+ "epoch": 0.0953603520997616,
1419
+ "grad_norm": 1.4958456754684448,
1420
+ "learning_rate": 4.2658237049655323e-07,
1421
+ "loss": 0.9653,
1422
+ "step": 195
1423
  }
1424
  ],
1425
  "logging_steps": 1,
 
1439
  "attributes": {}
1440
  }
1441
  },
1442
+ "total_flos": 1.1257530122502144e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null