joe611 commited on
Commit
9754666
1 Parent(s): 18739b0

Training in progress, epoch 150, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e9790a957a17076b2fd3e81a46a61514d45e3fd83828e7c71e313b269d2d43c
3
  size 166496880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebb52cac5f3071a35cd575bf00e75571ed3dd4448f59a5e5a1b8dae1834ca28d
3
  size 166496880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bda7aaccfa295b45aedc9af5b9bfb50bc9bb8fdffc88c815357e96fd66ca4fd2
3
  size 330495866
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:530bfbe04eff8688a65fe4f706c6565c86e05c710f480592b540996802f511a2
3
  size 330495866
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a396222dff59b097bed0889ab42f3d144ca156561494a7c8362451cc383f6f2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2369e82f6a6f2e47a7e47e23a9296d17f755e58759c961f78d5cc3d96e6e1325
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d97cfcad85d9182f57a6d2bdf768ab9899061715a0c3f6d191db6d91a27ccfef
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96205cabb1588b7cd4f382979691c79dabb654b1f45a221ef5567a570af3df90
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.29718613624572754,
3
  "best_model_checkpoint": "chickens-composite-201616161616-150-epochs-wo-transform-metrics-test-shfld/checkpoint-49500",
4
- "epoch": 149.0,
5
  "eval_steps": 500,
6
- "global_step": 74500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -21262,6 +21262,151 @@
21262
  "eval_samples_per_second": 14.146,
21263
  "eval_steps_per_second": 1.839,
21264
  "step": 74500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21265
  }
21266
  ],
21267
  "logging_steps": 30,
@@ -21276,12 +21421,12 @@
21276
  "should_evaluate": false,
21277
  "should_log": false,
21278
  "should_save": true,
21279
- "should_training_stop": false
21280
  },
21281
  "attributes": {}
21282
  }
21283
  },
21284
- "total_flos": 2.56297219117056e+19,
21285
  "train_batch_size": 2,
21286
  "trial_name": null,
21287
  "trial_params": null
 
1
  {
2
  "best_metric": 0.29718613624572754,
3
  "best_model_checkpoint": "chickens-composite-201616161616-150-epochs-wo-transform-metrics-test-shfld/checkpoint-49500",
4
+ "epoch": 150.0,
5
  "eval_steps": 500,
6
+ "global_step": 75000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
21262
  "eval_samples_per_second": 14.146,
21263
  "eval_steps_per_second": 1.839,
21264
  "step": 74500
21265
+ },
21266
+ {
21267
+ "epoch": 149.04,
21268
+ "grad_norm": 65.51058959960938,
21269
+ "learning_rate": 1.0106134441850712e-09,
21270
+ "loss": 0.2223,
21271
+ "step": 74520
21272
+ },
21273
+ {
21274
+ "epoch": 149.1,
21275
+ "grad_norm": 37.981048583984375,
21276
+ "learning_rate": 8.88238095955174e-10,
21277
+ "loss": 0.1933,
21278
+ "step": 74550
21279
+ },
21280
+ {
21281
+ "epoch": 149.16,
21282
+ "grad_norm": 158.58409118652344,
21283
+ "learning_rate": 7.737570275573314e-10,
21284
+ "loss": 0.2096,
21285
+ "step": 74580
21286
+ },
21287
+ {
21288
+ "epoch": 149.22,
21289
+ "grad_norm": 56.425628662109375,
21290
+ "learning_rate": 6.671704197735995e-10,
21291
+ "loss": 0.2107,
21292
+ "step": 74610
21293
+ },
21294
+ {
21295
+ "epoch": 149.28,
21296
+ "grad_norm": 36.22454833984375,
21297
+ "learning_rate": 5.684784409182298e-10,
21298
+ "loss": 0.1885,
21299
+ "step": 74640
21300
+ },
21301
+ {
21302
+ "epoch": 149.34,
21303
+ "grad_norm": 87.21402740478516,
21304
+ "learning_rate": 4.776812468398895e-10,
21305
+ "loss": 0.1967,
21306
+ "step": 74670
21307
+ },
21308
+ {
21309
+ "epoch": 149.4,
21310
+ "grad_norm": 56.854522705078125,
21311
+ "learning_rate": 3.9477898091944135e-10,
21312
+ "loss": 0.194,
21313
+ "step": 74700
21314
+ },
21315
+ {
21316
+ "epoch": 149.46,
21317
+ "grad_norm": 32.06566619873047,
21318
+ "learning_rate": 3.1977177407105376e-10,
21319
+ "loss": 0.1959,
21320
+ "step": 74730
21321
+ },
21322
+ {
21323
+ "epoch": 149.52,
21324
+ "grad_norm": 66.09393310546875,
21325
+ "learning_rate": 2.5265974474109054e-10,
21326
+ "loss": 0.2128,
21327
+ "step": 74760
21328
+ },
21329
+ {
21330
+ "epoch": 149.58,
21331
+ "grad_norm": 60.07960510253906,
21332
+ "learning_rate": 1.9344299890866614e-10,
21333
+ "loss": 0.2127,
21334
+ "step": 74790
21335
+ },
21336
+ {
21337
+ "epoch": 149.64,
21338
+ "grad_norm": 44.7581672668457,
21339
+ "learning_rate": 1.4212163008509028e-10,
21340
+ "loss": 0.1932,
21341
+ "step": 74820
21342
+ },
21343
+ {
21344
+ "epoch": 149.7,
21345
+ "grad_norm": 61.01675033569336,
21346
+ "learning_rate": 9.869571931442334e-11,
21347
+ "loss": 0.2248,
21348
+ "step": 74850
21349
+ },
21350
+ {
21351
+ "epoch": 149.76,
21352
+ "grad_norm": 64.22322082519531,
21353
+ "learning_rate": 6.316533517125578e-11,
21354
+ "loss": 0.2113,
21355
+ "step": 74880
21356
+ },
21357
+ {
21358
+ "epoch": 149.82,
21359
+ "grad_norm": 24.638652801513672,
21360
+ "learning_rate": 3.55305337634837e-11,
21361
+ "loss": 0.1962,
21362
+ "step": 74910
21363
+ },
21364
+ {
21365
+ "epoch": 149.88,
21366
+ "grad_norm": 40.451576232910156,
21367
+ "learning_rate": 1.57913587295333e-11,
21368
+ "loss": 0.2058,
21369
+ "step": 74940
21370
+ },
21371
+ {
21372
+ "epoch": 149.94,
21373
+ "grad_norm": 28.84409523010254,
21374
+ "learning_rate": 3.947841241136452e-12,
21375
+ "loss": 0.2357,
21376
+ "step": 74970
21377
+ },
21378
+ {
21379
+ "epoch": 150.0,
21380
+ "grad_norm": 203.35765075683594,
21381
+ "learning_rate": 0.0,
21382
+ "loss": 0.2344,
21383
+ "step": 75000
21384
+ },
21385
+ {
21386
+ "epoch": 150.0,
21387
+ "eval_loss": 0.30587318539619446,
21388
+ "eval_map": 0.8044,
21389
+ "eval_map_50": 0.9405,
21390
+ "eval_map_75": 0.9024,
21391
+ "eval_map_chicken": 0.7936,
21392
+ "eval_map_duck": 0.7475,
21393
+ "eval_map_large": 0.7843,
21394
+ "eval_map_medium": 0.8141,
21395
+ "eval_map_plant": 0.8722,
21396
+ "eval_map_small": 0.2979,
21397
+ "eval_mar_1": 0.3221,
21398
+ "eval_mar_10": 0.8382,
21399
+ "eval_mar_100": 0.8419,
21400
+ "eval_mar_100_chicken": 0.844,
21401
+ "eval_mar_100_duck": 0.7804,
21402
+ "eval_mar_100_plant": 0.9012,
21403
+ "eval_mar_large": 0.8145,
21404
+ "eval_mar_medium": 0.8546,
21405
+ "eval_mar_small": 0.3829,
21406
+ "eval_runtime": 6.8325,
21407
+ "eval_samples_per_second": 14.636,
21408
+ "eval_steps_per_second": 1.903,
21409
+ "step": 75000
21410
  }
21411
  ],
21412
  "logging_steps": 30,
 
21421
  "should_evaluate": false,
21422
  "should_log": false,
21423
  "should_save": true,
21424
+ "should_training_stop": true
21425
  },
21426
  "attributes": {}
21427
  }
21428
  },
21429
+ "total_flos": 2.580173346816e+19,
21430
  "train_batch_size": 2,
21431
  "trial_name": null,
21432
  "trial_params": null