OH_original_wo_gpt4_llm / trainer_state.json
sedrickkeh's picture
End of training
f08ca7e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.995488721804511,
"eval_steps": 500,
"global_step": 996,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03007518796992481,
"grad_norm": 5.823914796640075,
"learning_rate": 5e-06,
"loss": 0.801,
"step": 10
},
{
"epoch": 0.06015037593984962,
"grad_norm": 2.3399969132192253,
"learning_rate": 5e-06,
"loss": 0.7172,
"step": 20
},
{
"epoch": 0.09022556390977443,
"grad_norm": 0.8783578572732146,
"learning_rate": 5e-06,
"loss": 0.6923,
"step": 30
},
{
"epoch": 0.12030075187969924,
"grad_norm": 1.7232554735132826,
"learning_rate": 5e-06,
"loss": 0.6834,
"step": 40
},
{
"epoch": 0.15037593984962405,
"grad_norm": 1.6494035164725938,
"learning_rate": 5e-06,
"loss": 0.6655,
"step": 50
},
{
"epoch": 0.18045112781954886,
"grad_norm": 0.9942658093552336,
"learning_rate": 5e-06,
"loss": 0.6568,
"step": 60
},
{
"epoch": 0.21052631578947367,
"grad_norm": 0.8609069727317351,
"learning_rate": 5e-06,
"loss": 0.6548,
"step": 70
},
{
"epoch": 0.24060150375939848,
"grad_norm": 0.8740060444495542,
"learning_rate": 5e-06,
"loss": 0.6454,
"step": 80
},
{
"epoch": 0.2706766917293233,
"grad_norm": 1.0114201009544495,
"learning_rate": 5e-06,
"loss": 0.6331,
"step": 90
},
{
"epoch": 0.3007518796992481,
"grad_norm": 0.782353210012559,
"learning_rate": 5e-06,
"loss": 0.6267,
"step": 100
},
{
"epoch": 0.3308270676691729,
"grad_norm": 0.6061308240718153,
"learning_rate": 5e-06,
"loss": 0.6305,
"step": 110
},
{
"epoch": 0.3609022556390977,
"grad_norm": 0.663886451339362,
"learning_rate": 5e-06,
"loss": 0.6246,
"step": 120
},
{
"epoch": 0.39097744360902253,
"grad_norm": 0.8509285444900951,
"learning_rate": 5e-06,
"loss": 0.6305,
"step": 130
},
{
"epoch": 0.42105263157894735,
"grad_norm": 0.650634026198435,
"learning_rate": 5e-06,
"loss": 0.6291,
"step": 140
},
{
"epoch": 0.45112781954887216,
"grad_norm": 0.7050446482120244,
"learning_rate": 5e-06,
"loss": 0.6279,
"step": 150
},
{
"epoch": 0.48120300751879697,
"grad_norm": 0.5260864128397903,
"learning_rate": 5e-06,
"loss": 0.6167,
"step": 160
},
{
"epoch": 0.5112781954887218,
"grad_norm": 0.5159636362174167,
"learning_rate": 5e-06,
"loss": 0.6189,
"step": 170
},
{
"epoch": 0.5413533834586466,
"grad_norm": 0.576027072578746,
"learning_rate": 5e-06,
"loss": 0.6096,
"step": 180
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.6020714354457843,
"learning_rate": 5e-06,
"loss": 0.6207,
"step": 190
},
{
"epoch": 0.6015037593984962,
"grad_norm": 0.8904744763001984,
"learning_rate": 5e-06,
"loss": 0.6143,
"step": 200
},
{
"epoch": 0.631578947368421,
"grad_norm": 0.5645606661034147,
"learning_rate": 5e-06,
"loss": 0.619,
"step": 210
},
{
"epoch": 0.6616541353383458,
"grad_norm": 0.7456282489942,
"learning_rate": 5e-06,
"loss": 0.6229,
"step": 220
},
{
"epoch": 0.6917293233082706,
"grad_norm": 0.47671003637879944,
"learning_rate": 5e-06,
"loss": 0.6141,
"step": 230
},
{
"epoch": 0.7218045112781954,
"grad_norm": 0.5712162016443497,
"learning_rate": 5e-06,
"loss": 0.6148,
"step": 240
},
{
"epoch": 0.7518796992481203,
"grad_norm": 0.5787527818544967,
"learning_rate": 5e-06,
"loss": 0.6112,
"step": 250
},
{
"epoch": 0.7819548872180451,
"grad_norm": 0.7727895820276254,
"learning_rate": 5e-06,
"loss": 0.6124,
"step": 260
},
{
"epoch": 0.8120300751879699,
"grad_norm": 0.6627350412484229,
"learning_rate": 5e-06,
"loss": 0.6101,
"step": 270
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.49307733921025576,
"learning_rate": 5e-06,
"loss": 0.6101,
"step": 280
},
{
"epoch": 0.8721804511278195,
"grad_norm": 0.5299740752648865,
"learning_rate": 5e-06,
"loss": 0.6074,
"step": 290
},
{
"epoch": 0.9022556390977443,
"grad_norm": 0.9821666854504033,
"learning_rate": 5e-06,
"loss": 0.6035,
"step": 300
},
{
"epoch": 0.9323308270676691,
"grad_norm": 0.5537120584191855,
"learning_rate": 5e-06,
"loss": 0.6037,
"step": 310
},
{
"epoch": 0.9624060150375939,
"grad_norm": 0.5346047989400851,
"learning_rate": 5e-06,
"loss": 0.6128,
"step": 320
},
{
"epoch": 0.9924812030075187,
"grad_norm": 0.5177704737274331,
"learning_rate": 5e-06,
"loss": 0.6043,
"step": 330
},
{
"epoch": 0.9984962406015038,
"eval_loss": 0.6096732020378113,
"eval_runtime": 116.9563,
"eval_samples_per_second": 76.593,
"eval_steps_per_second": 0.599,
"step": 332
},
{
"epoch": 1.0225563909774436,
"grad_norm": 0.4996873605367385,
"learning_rate": 5e-06,
"loss": 0.58,
"step": 340
},
{
"epoch": 1.0526315789473684,
"grad_norm": 0.59979801353788,
"learning_rate": 5e-06,
"loss": 0.5596,
"step": 350
},
{
"epoch": 1.0827067669172932,
"grad_norm": 0.645810347625552,
"learning_rate": 5e-06,
"loss": 0.5591,
"step": 360
},
{
"epoch": 1.112781954887218,
"grad_norm": 0.6113621459091085,
"learning_rate": 5e-06,
"loss": 0.5553,
"step": 370
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.6400931476209645,
"learning_rate": 5e-06,
"loss": 0.5564,
"step": 380
},
{
"epoch": 1.1729323308270676,
"grad_norm": 0.6192317007821239,
"learning_rate": 5e-06,
"loss": 0.5605,
"step": 390
},
{
"epoch": 1.2030075187969924,
"grad_norm": 0.5237078678145406,
"learning_rate": 5e-06,
"loss": 0.5519,
"step": 400
},
{
"epoch": 1.2330827067669172,
"grad_norm": 0.5140051147329998,
"learning_rate": 5e-06,
"loss": 0.5574,
"step": 410
},
{
"epoch": 1.263157894736842,
"grad_norm": 0.6369697944320184,
"learning_rate": 5e-06,
"loss": 0.5541,
"step": 420
},
{
"epoch": 1.2932330827067668,
"grad_norm": 0.5985062376730702,
"learning_rate": 5e-06,
"loss": 0.5582,
"step": 430
},
{
"epoch": 1.3233082706766917,
"grad_norm": 0.48850272868134653,
"learning_rate": 5e-06,
"loss": 0.5532,
"step": 440
},
{
"epoch": 1.3533834586466165,
"grad_norm": 0.5920840023317582,
"learning_rate": 5e-06,
"loss": 0.5571,
"step": 450
},
{
"epoch": 1.3834586466165413,
"grad_norm": 0.5287134171137652,
"learning_rate": 5e-06,
"loss": 0.5487,
"step": 460
},
{
"epoch": 1.413533834586466,
"grad_norm": 0.48569117162380887,
"learning_rate": 5e-06,
"loss": 0.5586,
"step": 470
},
{
"epoch": 1.443609022556391,
"grad_norm": 0.5524909003831754,
"learning_rate": 5e-06,
"loss": 0.5528,
"step": 480
},
{
"epoch": 1.4736842105263157,
"grad_norm": 0.45528161895298735,
"learning_rate": 5e-06,
"loss": 0.5624,
"step": 490
},
{
"epoch": 1.5037593984962405,
"grad_norm": 0.5654083015816137,
"learning_rate": 5e-06,
"loss": 0.5565,
"step": 500
},
{
"epoch": 1.5338345864661656,
"grad_norm": 0.5087876058685868,
"learning_rate": 5e-06,
"loss": 0.5587,
"step": 510
},
{
"epoch": 1.5639097744360901,
"grad_norm": 0.4290062049121276,
"learning_rate": 5e-06,
"loss": 0.5493,
"step": 520
},
{
"epoch": 1.5939849624060152,
"grad_norm": 0.6235091434876753,
"learning_rate": 5e-06,
"loss": 0.5603,
"step": 530
},
{
"epoch": 1.6240601503759398,
"grad_norm": 0.5314512113272476,
"learning_rate": 5e-06,
"loss": 0.5564,
"step": 540
},
{
"epoch": 1.6541353383458648,
"grad_norm": 0.5049026603516721,
"learning_rate": 5e-06,
"loss": 0.5605,
"step": 550
},
{
"epoch": 1.6842105263157894,
"grad_norm": 0.5650921119799542,
"learning_rate": 5e-06,
"loss": 0.5601,
"step": 560
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.5193160484708012,
"learning_rate": 5e-06,
"loss": 0.5554,
"step": 570
},
{
"epoch": 1.744360902255639,
"grad_norm": 0.5213313242863221,
"learning_rate": 5e-06,
"loss": 0.5645,
"step": 580
},
{
"epoch": 1.774436090225564,
"grad_norm": 0.6327600595776153,
"learning_rate": 5e-06,
"loss": 0.5536,
"step": 590
},
{
"epoch": 1.8045112781954886,
"grad_norm": 0.5461472827380875,
"learning_rate": 5e-06,
"loss": 0.5625,
"step": 600
},
{
"epoch": 1.8345864661654137,
"grad_norm": 0.6541180795154581,
"learning_rate": 5e-06,
"loss": 0.554,
"step": 610
},
{
"epoch": 1.8646616541353382,
"grad_norm": 0.5572692340571848,
"learning_rate": 5e-06,
"loss": 0.5529,
"step": 620
},
{
"epoch": 1.8947368421052633,
"grad_norm": 0.5724927689201437,
"learning_rate": 5e-06,
"loss": 0.547,
"step": 630
},
{
"epoch": 1.9248120300751879,
"grad_norm": 0.48899030621990486,
"learning_rate": 5e-06,
"loss": 0.5508,
"step": 640
},
{
"epoch": 1.954887218045113,
"grad_norm": 0.4783796037203571,
"learning_rate": 5e-06,
"loss": 0.5626,
"step": 650
},
{
"epoch": 1.9849624060150375,
"grad_norm": 0.4380429796439722,
"learning_rate": 5e-06,
"loss": 0.5579,
"step": 660
},
{
"epoch": 2.0,
"eval_loss": 0.6012639403343201,
"eval_runtime": 116.0368,
"eval_samples_per_second": 77.2,
"eval_steps_per_second": 0.603,
"step": 665
},
{
"epoch": 2.0150375939849625,
"grad_norm": 0.8195061236201225,
"learning_rate": 5e-06,
"loss": 0.5349,
"step": 670
},
{
"epoch": 2.045112781954887,
"grad_norm": 0.5727072462132908,
"learning_rate": 5e-06,
"loss": 0.5023,
"step": 680
},
{
"epoch": 2.075187969924812,
"grad_norm": 0.5716937306059446,
"learning_rate": 5e-06,
"loss": 0.5025,
"step": 690
},
{
"epoch": 2.1052631578947367,
"grad_norm": 0.5936120169922254,
"learning_rate": 5e-06,
"loss": 0.502,
"step": 700
},
{
"epoch": 2.1353383458646618,
"grad_norm": 0.5323072505815232,
"learning_rate": 5e-06,
"loss": 0.5005,
"step": 710
},
{
"epoch": 2.1654135338345863,
"grad_norm": 0.5082968360924967,
"learning_rate": 5e-06,
"loss": 0.5037,
"step": 720
},
{
"epoch": 2.1954887218045114,
"grad_norm": 0.5227485772254855,
"learning_rate": 5e-06,
"loss": 0.4989,
"step": 730
},
{
"epoch": 2.225563909774436,
"grad_norm": 0.6533374147188858,
"learning_rate": 5e-06,
"loss": 0.5051,
"step": 740
},
{
"epoch": 2.255639097744361,
"grad_norm": 0.5912234992209703,
"learning_rate": 5e-06,
"loss": 0.5048,
"step": 750
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.5434227115064355,
"learning_rate": 5e-06,
"loss": 0.508,
"step": 760
},
{
"epoch": 2.3157894736842106,
"grad_norm": 0.5720087264790767,
"learning_rate": 5e-06,
"loss": 0.5099,
"step": 770
},
{
"epoch": 2.345864661654135,
"grad_norm": 0.6089476678610672,
"learning_rate": 5e-06,
"loss": 0.5024,
"step": 780
},
{
"epoch": 2.3759398496240602,
"grad_norm": 0.5508640915356684,
"learning_rate": 5e-06,
"loss": 0.5115,
"step": 790
},
{
"epoch": 2.406015037593985,
"grad_norm": 0.5118656252283122,
"learning_rate": 5e-06,
"loss": 0.5051,
"step": 800
},
{
"epoch": 2.43609022556391,
"grad_norm": 0.5065404177593222,
"learning_rate": 5e-06,
"loss": 0.5094,
"step": 810
},
{
"epoch": 2.4661654135338344,
"grad_norm": 0.5181429354716248,
"learning_rate": 5e-06,
"loss": 0.5106,
"step": 820
},
{
"epoch": 2.4962406015037595,
"grad_norm": 0.5322514457543749,
"learning_rate": 5e-06,
"loss": 0.5109,
"step": 830
},
{
"epoch": 2.526315789473684,
"grad_norm": 0.6196654734671412,
"learning_rate": 5e-06,
"loss": 0.5061,
"step": 840
},
{
"epoch": 2.556390977443609,
"grad_norm": 0.5256550356188423,
"learning_rate": 5e-06,
"loss": 0.5096,
"step": 850
},
{
"epoch": 2.5864661654135337,
"grad_norm": 0.4721653082846406,
"learning_rate": 5e-06,
"loss": 0.5091,
"step": 860
},
{
"epoch": 2.6165413533834587,
"grad_norm": 0.5461766886120224,
"learning_rate": 5e-06,
"loss": 0.5125,
"step": 870
},
{
"epoch": 2.6466165413533833,
"grad_norm": 0.8881592761313983,
"learning_rate": 5e-06,
"loss": 0.5133,
"step": 880
},
{
"epoch": 2.6766917293233083,
"grad_norm": 0.5215263169318092,
"learning_rate": 5e-06,
"loss": 0.5137,
"step": 890
},
{
"epoch": 2.706766917293233,
"grad_norm": 0.5970816519010325,
"learning_rate": 5e-06,
"loss": 0.5085,
"step": 900
},
{
"epoch": 2.736842105263158,
"grad_norm": 0.5663757119297533,
"learning_rate": 5e-06,
"loss": 0.5136,
"step": 910
},
{
"epoch": 2.7669172932330826,
"grad_norm": 0.48531820284070437,
"learning_rate": 5e-06,
"loss": 0.5105,
"step": 920
},
{
"epoch": 2.7969924812030076,
"grad_norm": 0.5235848494113168,
"learning_rate": 5e-06,
"loss": 0.5106,
"step": 930
},
{
"epoch": 2.827067669172932,
"grad_norm": 0.6230274670862681,
"learning_rate": 5e-06,
"loss": 0.5109,
"step": 940
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.5464639670084257,
"learning_rate": 5e-06,
"loss": 0.502,
"step": 950
},
{
"epoch": 2.887218045112782,
"grad_norm": 0.5388668922788399,
"learning_rate": 5e-06,
"loss": 0.5059,
"step": 960
},
{
"epoch": 2.917293233082707,
"grad_norm": 0.5634658215547509,
"learning_rate": 5e-06,
"loss": 0.5147,
"step": 970
},
{
"epoch": 2.9473684210526314,
"grad_norm": 0.5876554762818471,
"learning_rate": 5e-06,
"loss": 0.507,
"step": 980
},
{
"epoch": 2.9774436090225564,
"grad_norm": 0.5816222818084577,
"learning_rate": 5e-06,
"loss": 0.5116,
"step": 990
},
{
"epoch": 2.995488721804511,
"eval_loss": 0.605595588684082,
"eval_runtime": 114.1196,
"eval_samples_per_second": 78.497,
"eval_steps_per_second": 0.613,
"step": 996
},
{
"epoch": 2.995488721804511,
"step": 996,
"total_flos": 1667918337146880.0,
"train_loss": 0.5664463261045126,
"train_runtime": 17372.9601,
"train_samples_per_second": 29.39,
"train_steps_per_second": 0.057
}
],
"logging_steps": 10,
"max_steps": 996,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1667918337146880.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}