gsmyrnis's picture
End of training
ed442b7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 939,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03194888178913738,
"grad_norm": 4.9730574359401585,
"learning_rate": 5e-06,
"loss": 0.9178,
"step": 10
},
{
"epoch": 0.06389776357827476,
"grad_norm": 2.414364422517365,
"learning_rate": 5e-06,
"loss": 0.8185,
"step": 20
},
{
"epoch": 0.09584664536741214,
"grad_norm": 2.355627570386528,
"learning_rate": 5e-06,
"loss": 0.7955,
"step": 30
},
{
"epoch": 0.12779552715654952,
"grad_norm": 0.9368274495065052,
"learning_rate": 5e-06,
"loss": 0.777,
"step": 40
},
{
"epoch": 0.1597444089456869,
"grad_norm": 1.0303864547714687,
"learning_rate": 5e-06,
"loss": 0.7544,
"step": 50
},
{
"epoch": 0.19169329073482427,
"grad_norm": 0.8799746481470822,
"learning_rate": 5e-06,
"loss": 0.7414,
"step": 60
},
{
"epoch": 0.22364217252396165,
"grad_norm": 2.571145704302205,
"learning_rate": 5e-06,
"loss": 0.7335,
"step": 70
},
{
"epoch": 0.25559105431309903,
"grad_norm": 1.4093624036341388,
"learning_rate": 5e-06,
"loss": 0.7174,
"step": 80
},
{
"epoch": 0.28753993610223644,
"grad_norm": 0.8615478628829397,
"learning_rate": 5e-06,
"loss": 0.7318,
"step": 90
},
{
"epoch": 0.3194888178913738,
"grad_norm": 0.5831162653054314,
"learning_rate": 5e-06,
"loss": 0.72,
"step": 100
},
{
"epoch": 0.3514376996805112,
"grad_norm": 0.6173922126615862,
"learning_rate": 5e-06,
"loss": 0.716,
"step": 110
},
{
"epoch": 0.38338658146964855,
"grad_norm": 0.8888214325930446,
"learning_rate": 5e-06,
"loss": 0.7134,
"step": 120
},
{
"epoch": 0.41533546325878595,
"grad_norm": 0.6979078907766634,
"learning_rate": 5e-06,
"loss": 0.7021,
"step": 130
},
{
"epoch": 0.4472843450479233,
"grad_norm": 0.6491577783946951,
"learning_rate": 5e-06,
"loss": 0.6991,
"step": 140
},
{
"epoch": 0.4792332268370607,
"grad_norm": 0.5395727376776203,
"learning_rate": 5e-06,
"loss": 0.7068,
"step": 150
},
{
"epoch": 0.5111821086261981,
"grad_norm": 0.689753166346745,
"learning_rate": 5e-06,
"loss": 0.6935,
"step": 160
},
{
"epoch": 0.5431309904153354,
"grad_norm": 0.5521859868964065,
"learning_rate": 5e-06,
"loss": 0.705,
"step": 170
},
{
"epoch": 0.5750798722044729,
"grad_norm": 0.5708494730680989,
"learning_rate": 5e-06,
"loss": 0.6961,
"step": 180
},
{
"epoch": 0.6070287539936102,
"grad_norm": 0.7742403437422439,
"learning_rate": 5e-06,
"loss": 0.7045,
"step": 190
},
{
"epoch": 0.6389776357827476,
"grad_norm": 0.6022796802365044,
"learning_rate": 5e-06,
"loss": 0.6911,
"step": 200
},
{
"epoch": 0.670926517571885,
"grad_norm": 0.5364796054217524,
"learning_rate": 5e-06,
"loss": 0.695,
"step": 210
},
{
"epoch": 0.7028753993610224,
"grad_norm": 0.47930189735330775,
"learning_rate": 5e-06,
"loss": 0.6961,
"step": 220
},
{
"epoch": 0.7348242811501597,
"grad_norm": 0.5376571771457028,
"learning_rate": 5e-06,
"loss": 0.7035,
"step": 230
},
{
"epoch": 0.7667731629392971,
"grad_norm": 0.7570921090383397,
"learning_rate": 5e-06,
"loss": 0.6801,
"step": 240
},
{
"epoch": 0.7987220447284346,
"grad_norm": 0.5375423682771385,
"learning_rate": 5e-06,
"loss": 0.6809,
"step": 250
},
{
"epoch": 0.8306709265175719,
"grad_norm": 0.5836884654889642,
"learning_rate": 5e-06,
"loss": 0.6864,
"step": 260
},
{
"epoch": 0.8626198083067093,
"grad_norm": 0.598608895789362,
"learning_rate": 5e-06,
"loss": 0.6902,
"step": 270
},
{
"epoch": 0.8945686900958466,
"grad_norm": 0.5331459973786111,
"learning_rate": 5e-06,
"loss": 0.6879,
"step": 280
},
{
"epoch": 0.9265175718849841,
"grad_norm": 0.5067341091409512,
"learning_rate": 5e-06,
"loss": 0.6881,
"step": 290
},
{
"epoch": 0.9584664536741214,
"grad_norm": 0.5948122722251207,
"learning_rate": 5e-06,
"loss": 0.6868,
"step": 300
},
{
"epoch": 0.9904153354632588,
"grad_norm": 0.6883144495229085,
"learning_rate": 5e-06,
"loss": 0.6818,
"step": 310
},
{
"epoch": 1.0,
"eval_loss": 0.680176854133606,
"eval_runtime": 30.2696,
"eval_samples_per_second": 277.968,
"eval_steps_per_second": 1.09,
"step": 313
},
{
"epoch": 1.0223642172523961,
"grad_norm": 0.7468894293006159,
"learning_rate": 5e-06,
"loss": 0.656,
"step": 320
},
{
"epoch": 1.0543130990415335,
"grad_norm": 0.6942190077761362,
"learning_rate": 5e-06,
"loss": 0.6387,
"step": 330
},
{
"epoch": 1.0862619808306708,
"grad_norm": 0.6242193449068775,
"learning_rate": 5e-06,
"loss": 0.6407,
"step": 340
},
{
"epoch": 1.1182108626198084,
"grad_norm": 0.6329861406612104,
"learning_rate": 5e-06,
"loss": 0.6428,
"step": 350
},
{
"epoch": 1.1501597444089458,
"grad_norm": 0.6142460964851904,
"learning_rate": 5e-06,
"loss": 0.6356,
"step": 360
},
{
"epoch": 1.182108626198083,
"grad_norm": 0.6850789950403501,
"learning_rate": 5e-06,
"loss": 0.636,
"step": 370
},
{
"epoch": 1.2140575079872205,
"grad_norm": 0.5355828508870474,
"learning_rate": 5e-06,
"loss": 0.6382,
"step": 380
},
{
"epoch": 1.2460063897763578,
"grad_norm": 0.5629126693045496,
"learning_rate": 5e-06,
"loss": 0.6344,
"step": 390
},
{
"epoch": 1.2779552715654952,
"grad_norm": 0.5204488937445614,
"learning_rate": 5e-06,
"loss": 0.6401,
"step": 400
},
{
"epoch": 1.3099041533546325,
"grad_norm": 0.5396176523426488,
"learning_rate": 5e-06,
"loss": 0.6382,
"step": 410
},
{
"epoch": 1.34185303514377,
"grad_norm": 0.6023672235090743,
"learning_rate": 5e-06,
"loss": 0.6417,
"step": 420
},
{
"epoch": 1.3738019169329074,
"grad_norm": 0.5459880912136352,
"learning_rate": 5e-06,
"loss": 0.6434,
"step": 430
},
{
"epoch": 1.4057507987220448,
"grad_norm": 0.5092923506095731,
"learning_rate": 5e-06,
"loss": 0.6366,
"step": 440
},
{
"epoch": 1.4376996805111821,
"grad_norm": 0.6382479698360073,
"learning_rate": 5e-06,
"loss": 0.6353,
"step": 450
},
{
"epoch": 1.4696485623003195,
"grad_norm": 0.7533985947305522,
"learning_rate": 5e-06,
"loss": 0.6392,
"step": 460
},
{
"epoch": 1.5015974440894568,
"grad_norm": 0.7229864015275371,
"learning_rate": 5e-06,
"loss": 0.6381,
"step": 470
},
{
"epoch": 1.5335463258785942,
"grad_norm": 0.5758239404648058,
"learning_rate": 5e-06,
"loss": 0.636,
"step": 480
},
{
"epoch": 1.5654952076677318,
"grad_norm": 0.5422584017870065,
"learning_rate": 5e-06,
"loss": 0.6393,
"step": 490
},
{
"epoch": 1.5974440894568689,
"grad_norm": 0.500259651199132,
"learning_rate": 5e-06,
"loss": 0.6441,
"step": 500
},
{
"epoch": 1.6293929712460065,
"grad_norm": 0.8813431746686585,
"learning_rate": 5e-06,
"loss": 0.6308,
"step": 510
},
{
"epoch": 1.6613418530351438,
"grad_norm": 0.49923586083798,
"learning_rate": 5e-06,
"loss": 0.6365,
"step": 520
},
{
"epoch": 1.6932907348242812,
"grad_norm": 0.46329712814558677,
"learning_rate": 5e-06,
"loss": 0.6279,
"step": 530
},
{
"epoch": 1.7252396166134185,
"grad_norm": 0.7606638693711257,
"learning_rate": 5e-06,
"loss": 0.6341,
"step": 540
},
{
"epoch": 1.7571884984025559,
"grad_norm": 0.5490152416615772,
"learning_rate": 5e-06,
"loss": 0.6378,
"step": 550
},
{
"epoch": 1.7891373801916934,
"grad_norm": 0.5845892096604395,
"learning_rate": 5e-06,
"loss": 0.6395,
"step": 560
},
{
"epoch": 1.8210862619808306,
"grad_norm": 0.5147171055711276,
"learning_rate": 5e-06,
"loss": 0.6297,
"step": 570
},
{
"epoch": 1.8530351437699681,
"grad_norm": 0.5052763678219258,
"learning_rate": 5e-06,
"loss": 0.6342,
"step": 580
},
{
"epoch": 1.8849840255591053,
"grad_norm": 0.5390734384968956,
"learning_rate": 5e-06,
"loss": 0.6369,
"step": 590
},
{
"epoch": 1.9169329073482428,
"grad_norm": 0.6413516519865096,
"learning_rate": 5e-06,
"loss": 0.6418,
"step": 600
},
{
"epoch": 1.9488817891373802,
"grad_norm": 0.5239174298332788,
"learning_rate": 5e-06,
"loss": 0.6274,
"step": 610
},
{
"epoch": 1.9808306709265175,
"grad_norm": 0.7039020492664947,
"learning_rate": 5e-06,
"loss": 0.6385,
"step": 620
},
{
"epoch": 2.0,
"eval_loss": 0.6707362532615662,
"eval_runtime": 29.7735,
"eval_samples_per_second": 282.6,
"eval_steps_per_second": 1.108,
"step": 626
},
{
"epoch": 2.012779552715655,
"grad_norm": 0.9555964669975626,
"learning_rate": 5e-06,
"loss": 0.6152,
"step": 630
},
{
"epoch": 2.0447284345047922,
"grad_norm": 0.6929923210576585,
"learning_rate": 5e-06,
"loss": 0.5881,
"step": 640
},
{
"epoch": 2.07667731629393,
"grad_norm": 0.5186537877634542,
"learning_rate": 5e-06,
"loss": 0.5871,
"step": 650
},
{
"epoch": 2.108626198083067,
"grad_norm": 0.5532963474572669,
"learning_rate": 5e-06,
"loss": 0.5896,
"step": 660
},
{
"epoch": 2.1405750798722045,
"grad_norm": 0.5667781850304423,
"learning_rate": 5e-06,
"loss": 0.589,
"step": 670
},
{
"epoch": 2.1725239616613417,
"grad_norm": 0.5285098402420302,
"learning_rate": 5e-06,
"loss": 0.5901,
"step": 680
},
{
"epoch": 2.2044728434504792,
"grad_norm": 0.536646702155888,
"learning_rate": 5e-06,
"loss": 0.5863,
"step": 690
},
{
"epoch": 2.236421725239617,
"grad_norm": 0.5015237529663761,
"learning_rate": 5e-06,
"loss": 0.5846,
"step": 700
},
{
"epoch": 2.268370607028754,
"grad_norm": 0.6152245702781334,
"learning_rate": 5e-06,
"loss": 0.5885,
"step": 710
},
{
"epoch": 2.3003194888178915,
"grad_norm": 0.6897591535184826,
"learning_rate": 5e-06,
"loss": 0.584,
"step": 720
},
{
"epoch": 2.3322683706070286,
"grad_norm": 0.48574756268499075,
"learning_rate": 5e-06,
"loss": 0.5888,
"step": 730
},
{
"epoch": 2.364217252396166,
"grad_norm": 0.7776088479205505,
"learning_rate": 5e-06,
"loss": 0.591,
"step": 740
},
{
"epoch": 2.3961661341853033,
"grad_norm": 0.5643419759040877,
"learning_rate": 5e-06,
"loss": 0.5975,
"step": 750
},
{
"epoch": 2.428115015974441,
"grad_norm": 0.7618894474745995,
"learning_rate": 5e-06,
"loss": 0.5907,
"step": 760
},
{
"epoch": 2.460063897763578,
"grad_norm": 0.566521654030305,
"learning_rate": 5e-06,
"loss": 0.5902,
"step": 770
},
{
"epoch": 2.4920127795527156,
"grad_norm": 0.539754953993636,
"learning_rate": 5e-06,
"loss": 0.5884,
"step": 780
},
{
"epoch": 2.523961661341853,
"grad_norm": 0.5625428853361434,
"learning_rate": 5e-06,
"loss": 0.5818,
"step": 790
},
{
"epoch": 2.5559105431309903,
"grad_norm": 0.6279591943173818,
"learning_rate": 5e-06,
"loss": 0.588,
"step": 800
},
{
"epoch": 2.587859424920128,
"grad_norm": 0.6224175939518917,
"learning_rate": 5e-06,
"loss": 0.5934,
"step": 810
},
{
"epoch": 2.619808306709265,
"grad_norm": 0.5642613523825399,
"learning_rate": 5e-06,
"loss": 0.5865,
"step": 820
},
{
"epoch": 2.6517571884984026,
"grad_norm": 0.6865432310936253,
"learning_rate": 5e-06,
"loss": 0.5926,
"step": 830
},
{
"epoch": 2.68370607028754,
"grad_norm": 0.5373522424367686,
"learning_rate": 5e-06,
"loss": 0.5921,
"step": 840
},
{
"epoch": 2.7156549520766773,
"grad_norm": 0.567457215981538,
"learning_rate": 5e-06,
"loss": 0.5954,
"step": 850
},
{
"epoch": 2.747603833865815,
"grad_norm": 0.6426061724785515,
"learning_rate": 5e-06,
"loss": 0.5988,
"step": 860
},
{
"epoch": 2.779552715654952,
"grad_norm": 0.59442339830324,
"learning_rate": 5e-06,
"loss": 0.6008,
"step": 870
},
{
"epoch": 2.8115015974440896,
"grad_norm": 0.5592579452365836,
"learning_rate": 5e-06,
"loss": 0.6022,
"step": 880
},
{
"epoch": 2.8434504792332267,
"grad_norm": 0.5094979594863669,
"learning_rate": 5e-06,
"loss": 0.592,
"step": 890
},
{
"epoch": 2.8753993610223643,
"grad_norm": 0.5091387458726695,
"learning_rate": 5e-06,
"loss": 0.6,
"step": 900
},
{
"epoch": 2.9073482428115014,
"grad_norm": 0.5094298110498509,
"learning_rate": 5e-06,
"loss": 0.5938,
"step": 910
},
{
"epoch": 2.939297124600639,
"grad_norm": 0.5130578982623665,
"learning_rate": 5e-06,
"loss": 0.586,
"step": 920
},
{
"epoch": 2.9712460063897765,
"grad_norm": 0.4928009047818368,
"learning_rate": 5e-06,
"loss": 0.5911,
"step": 930
},
{
"epoch": 3.0,
"eval_loss": 0.6763173341751099,
"eval_runtime": 29.5519,
"eval_samples_per_second": 284.719,
"eval_steps_per_second": 1.117,
"step": 939
},
{
"epoch": 3.0,
"step": 939,
"total_flos": 1572859973468160.0,
"train_loss": 0.648996304907103,
"train_runtime": 5978.5297,
"train_samples_per_second": 80.212,
"train_steps_per_second": 0.157
}
],
"logging_steps": 10,
"max_steps": 939,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1572859973468160.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}