MarkelFe commited on
Commit
680163f
1 Parent(s): 3ce4480

Training in progress, step 110000

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "DeepESP/gpt2-spanish",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
1
  {
2
+ "_name_or_path": "MarkelFe/PoliticalSpeech2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
last-checkpoint/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "DeepESP/gpt2-spanish",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
1
  {
2
+ "_name_or_path": "MarkelFe/PoliticalSpeech2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:62a1421f8e6cb5bb67425311aef9dda9661c917aab4a357a23ec44b2e3daab85
3
  size 995605445
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a9bc8c12cb2cf6ed3ccb5b585450d138e62bee12142711a3446cb1a0dc2a7c1
3
  size 995605445
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f16473c3f1ed9f0581444acbd15664c381713c51843177e152cb72b1bd63766
3
  size 510398013
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64b7857fda6aab7245b90375ddb99a6ea560cd4b8c3a5c8e8a357ebcea3a71db
3
  size 510398013
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:412df950fe881812d4df452ea4ec2fcd612c8449348c26dda4f3e5ddd7d9a49a
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39a74b88b3614b3e58ed0796513c6c2b83a26d8f63a05b20d3ddb63f51d8cfd5
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e61191d318bf68e1245e4fc98bbc55f1b8337c3d4b3f28a8adc2b1ca718b159b
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98d37f2648c88f2e98f12ff7b39e7b8b0f53d4838c5848eaeff572e95b5d5ca3
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.367648451557913,
5
- "global_step": 100000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -1292,11 +1292,139 @@
1292
  "eval_samples_per_second": 166.106,
1293
  "eval_steps_per_second": 20.768,
1294
  "step": 100000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1295
  }
1296
  ],
1297
  "max_steps": 633540,
1298
  "num_train_epochs": 15,
1299
- "total_flos": 2.120856134976e+16,
1300
  "trial_name": null,
1301
  "trial_params": null
1302
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.604413296713704,
5
+ "global_step": 110000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
1292
  "eval_samples_per_second": 166.106,
1293
  "eval_steps_per_second": 20.768,
1294
  "step": 100000
1295
+ },
1296
+ {
1297
+ "epoch": 2.38,
1298
+ "learning_rate": 2.644353947659185e-05,
1299
+ "loss": 2.766,
1300
+ "step": 100500
1301
+ },
1302
+ {
1303
+ "epoch": 2.39,
1304
+ "learning_rate": 2.638040218455031e-05,
1305
+ "loss": 2.7858,
1306
+ "step": 101000
1307
+ },
1308
+ {
1309
+ "epoch": 2.4,
1310
+ "learning_rate": 2.631726489250876e-05,
1311
+ "loss": 2.7735,
1312
+ "step": 101500
1313
+ },
1314
+ {
1315
+ "epoch": 2.42,
1316
+ "learning_rate": 2.6254127600467217e-05,
1317
+ "loss": 2.7908,
1318
+ "step": 102000
1319
+ },
1320
+ {
1321
+ "epoch": 2.43,
1322
+ "learning_rate": 2.6190990308425672e-05,
1323
+ "loss": 2.814,
1324
+ "step": 102500
1325
+ },
1326
+ {
1327
+ "epoch": 2.44,
1328
+ "learning_rate": 2.6127853016384127e-05,
1329
+ "loss": 2.7823,
1330
+ "step": 103000
1331
+ },
1332
+ {
1333
+ "epoch": 2.45,
1334
+ "learning_rate": 2.6064715724342585e-05,
1335
+ "loss": 2.7855,
1336
+ "step": 103500
1337
+ },
1338
+ {
1339
+ "epoch": 2.46,
1340
+ "learning_rate": 2.600157843230104e-05,
1341
+ "loss": 2.7984,
1342
+ "step": 104000
1343
+ },
1344
+ {
1345
+ "epoch": 2.47,
1346
+ "learning_rate": 2.5938441140259496e-05,
1347
+ "loss": 2.8117,
1348
+ "step": 104500
1349
+ },
1350
+ {
1351
+ "epoch": 2.49,
1352
+ "learning_rate": 2.587530384821795e-05,
1353
+ "loss": 2.8052,
1354
+ "step": 105000
1355
+ },
1356
+ {
1357
+ "epoch": 2.5,
1358
+ "learning_rate": 2.581216655617641e-05,
1359
+ "loss": 2.8137,
1360
+ "step": 105500
1361
+ },
1362
+ {
1363
+ "epoch": 2.51,
1364
+ "learning_rate": 2.5749029264134864e-05,
1365
+ "loss": 2.8106,
1366
+ "step": 106000
1367
+ },
1368
+ {
1369
+ "epoch": 2.52,
1370
+ "learning_rate": 2.568589197209332e-05,
1371
+ "loss": 2.796,
1372
+ "step": 106500
1373
+ },
1374
+ {
1375
+ "epoch": 2.53,
1376
+ "learning_rate": 2.5622754680051775e-05,
1377
+ "loss": 2.8181,
1378
+ "step": 107000
1379
+ },
1380
+ {
1381
+ "epoch": 2.55,
1382
+ "learning_rate": 2.5559617388010233e-05,
1383
+ "loss": 2.806,
1384
+ "step": 107500
1385
+ },
1386
+ {
1387
+ "epoch": 2.56,
1388
+ "learning_rate": 2.5496480095968688e-05,
1389
+ "loss": 2.8292,
1390
+ "step": 108000
1391
+ },
1392
+ {
1393
+ "epoch": 2.57,
1394
+ "learning_rate": 2.5433342803927143e-05,
1395
+ "loss": 2.8104,
1396
+ "step": 108500
1397
+ },
1398
+ {
1399
+ "epoch": 2.58,
1400
+ "learning_rate": 2.53702055118856e-05,
1401
+ "loss": 2.8292,
1402
+ "step": 109000
1403
+ },
1404
+ {
1405
+ "epoch": 2.59,
1406
+ "learning_rate": 2.530706821984405e-05,
1407
+ "loss": 2.8115,
1408
+ "step": 109500
1409
+ },
1410
+ {
1411
+ "epoch": 2.6,
1412
+ "learning_rate": 2.524393092780251e-05,
1413
+ "loss": 2.8272,
1414
+ "step": 110000
1415
+ },
1416
+ {
1417
+ "epoch": 2.6,
1418
+ "eval_loss": 3.1270127296447754,
1419
+ "eval_runtime": 113.6604,
1420
+ "eval_samples_per_second": 165.159,
1421
+ "eval_steps_per_second": 20.649,
1422
+ "step": 110000
1423
  }
1424
  ],
1425
  "max_steps": 633540,
1426
  "num_train_epochs": 15,
1427
+ "total_flos": 2.332982396736e+16,
1428
  "trial_name": null,
1429
  "trial_params": null
1430
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e13bd02ff5c40117ddbf942838af2297189fae003bd9b651003f2826488ee929
3
  size 4987
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ae549f0bd35c85b2cbb303f165785a4c8cb5d499cfc8e300500dad5f12d79c4
3
  size 4987
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f16473c3f1ed9f0581444acbd15664c381713c51843177e152cb72b1bd63766
3
  size 510398013
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64b7857fda6aab7245b90375ddb99a6ea560cd4b8c3a5c8e8a357ebcea3a71db
3
  size 510398013
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e13bd02ff5c40117ddbf942838af2297189fae003bd9b651003f2826488ee929
3
  size 4987
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ae549f0bd35c85b2cbb303f165785a4c8cb5d499cfc8e300500dad5f12d79c4
3
  size 4987