qwen2.5-7b-ins-v3 / trainer_state.json
happzy2633's picture
Upload folder using huggingface_hub
1a8c5b4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3679890560875512,
"eval_steps": 200,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013679890560875513,
"grad_norm": 5.053287595555438,
"learning_rate": 2.73224043715847e-07,
"loss": 0.8932,
"step": 10
},
{
"epoch": 0.027359781121751026,
"grad_norm": 3.45447174521638,
"learning_rate": 5.46448087431694e-07,
"loss": 0.8525,
"step": 20
},
{
"epoch": 0.04103967168262654,
"grad_norm": 2.3893130639694533,
"learning_rate": 8.196721311475409e-07,
"loss": 0.7783,
"step": 30
},
{
"epoch": 0.05471956224350205,
"grad_norm": 1.847028743837757,
"learning_rate": 1.092896174863388e-06,
"loss": 0.7177,
"step": 40
},
{
"epoch": 0.06839945280437756,
"grad_norm": 1.2462393245377212,
"learning_rate": 1.3661202185792352e-06,
"loss": 0.6729,
"step": 50
},
{
"epoch": 0.08207934336525308,
"grad_norm": 1.1362700785141568,
"learning_rate": 1.6393442622950819e-06,
"loss": 0.6565,
"step": 60
},
{
"epoch": 0.09575923392612859,
"grad_norm": 0.9326843187064789,
"learning_rate": 1.912568306010929e-06,
"loss": 0.63,
"step": 70
},
{
"epoch": 0.1094391244870041,
"grad_norm": 1.0028614214412337,
"learning_rate": 2.185792349726776e-06,
"loss": 0.6298,
"step": 80
},
{
"epoch": 0.12311901504787962,
"grad_norm": 1.0100999618069098,
"learning_rate": 2.459016393442623e-06,
"loss": 0.6173,
"step": 90
},
{
"epoch": 0.13679890560875513,
"grad_norm": 0.9681705152676173,
"learning_rate": 2.7322404371584705e-06,
"loss": 0.605,
"step": 100
},
{
"epoch": 0.15047879616963064,
"grad_norm": 0.859523331087589,
"learning_rate": 3.0054644808743173e-06,
"loss": 0.5933,
"step": 110
},
{
"epoch": 0.16415868673050615,
"grad_norm": 0.9272526409254467,
"learning_rate": 3.2786885245901638e-06,
"loss": 0.6145,
"step": 120
},
{
"epoch": 0.17783857729138167,
"grad_norm": 0.92928854698688,
"learning_rate": 3.551912568306011e-06,
"loss": 0.5845,
"step": 130
},
{
"epoch": 0.19151846785225718,
"grad_norm": 0.9785518793547441,
"learning_rate": 3.825136612021858e-06,
"loss": 0.6019,
"step": 140
},
{
"epoch": 0.2051983584131327,
"grad_norm": 0.9109059865561239,
"learning_rate": 4.098360655737705e-06,
"loss": 0.5814,
"step": 150
},
{
"epoch": 0.2188782489740082,
"grad_norm": 0.980213683646779,
"learning_rate": 4.371584699453552e-06,
"loss": 0.5952,
"step": 160
},
{
"epoch": 0.23255813953488372,
"grad_norm": 0.9880924801019324,
"learning_rate": 4.6448087431694e-06,
"loss": 0.5929,
"step": 170
},
{
"epoch": 0.24623803009575923,
"grad_norm": 1.1007085585322145,
"learning_rate": 4.918032786885246e-06,
"loss": 0.5864,
"step": 180
},
{
"epoch": 0.25991792065663477,
"grad_norm": 0.9717512421902434,
"learning_rate": 5.191256830601094e-06,
"loss": 0.5948,
"step": 190
},
{
"epoch": 0.27359781121751026,
"grad_norm": 0.9555482181766941,
"learning_rate": 5.464480874316941e-06,
"loss": 0.5661,
"step": 200
},
{
"epoch": 0.27359781121751026,
"eval_loss": 0.5981519222259521,
"eval_runtime": 159.9655,
"eval_samples_per_second": 32.494,
"eval_steps_per_second": 4.063,
"step": 200
},
{
"epoch": 0.2872777017783858,
"grad_norm": 0.9338696098956073,
"learning_rate": 5.737704918032787e-06,
"loss": 0.569,
"step": 210
},
{
"epoch": 0.3009575923392613,
"grad_norm": 0.9740202140684622,
"learning_rate": 6.010928961748635e-06,
"loss": 0.575,
"step": 220
},
{
"epoch": 0.3146374829001368,
"grad_norm": 0.9288295579333367,
"learning_rate": 6.284153005464482e-06,
"loss": 0.576,
"step": 230
},
{
"epoch": 0.3283173734610123,
"grad_norm": 0.9619629878043822,
"learning_rate": 6.5573770491803276e-06,
"loss": 0.5741,
"step": 240
},
{
"epoch": 0.34199726402188785,
"grad_norm": 0.9040298845571978,
"learning_rate": 6.830601092896175e-06,
"loss": 0.5772,
"step": 250
},
{
"epoch": 0.35567715458276333,
"grad_norm": 0.8786739631066459,
"learning_rate": 7.103825136612022e-06,
"loss": 0.5813,
"step": 260
},
{
"epoch": 0.3693570451436389,
"grad_norm": 0.9576443418934807,
"learning_rate": 7.3770491803278695e-06,
"loss": 0.5719,
"step": 270
},
{
"epoch": 0.38303693570451436,
"grad_norm": 0.9202603458629603,
"learning_rate": 7.650273224043716e-06,
"loss": 0.5628,
"step": 280
},
{
"epoch": 0.3967168262653899,
"grad_norm": 0.942478653508796,
"learning_rate": 7.923497267759564e-06,
"loss": 0.5792,
"step": 290
},
{
"epoch": 0.4103967168262654,
"grad_norm": 0.9168741658706063,
"learning_rate": 8.19672131147541e-06,
"loss": 0.5646,
"step": 300
},
{
"epoch": 0.4240766073871409,
"grad_norm": 0.8558956129931499,
"learning_rate": 8.469945355191259e-06,
"loss": 0.5656,
"step": 310
},
{
"epoch": 0.4377564979480164,
"grad_norm": 0.9526515723477538,
"learning_rate": 8.743169398907103e-06,
"loss": 0.579,
"step": 320
},
{
"epoch": 0.45143638850889195,
"grad_norm": 0.9391628958319846,
"learning_rate": 9.016393442622952e-06,
"loss": 0.5809,
"step": 330
},
{
"epoch": 0.46511627906976744,
"grad_norm": 0.9325548398873699,
"learning_rate": 9.2896174863388e-06,
"loss": 0.5684,
"step": 340
},
{
"epoch": 0.478796169630643,
"grad_norm": 1.0983050300163264,
"learning_rate": 9.562841530054644e-06,
"loss": 0.5619,
"step": 350
},
{
"epoch": 0.49247606019151846,
"grad_norm": 1.0292824667919394,
"learning_rate": 9.836065573770493e-06,
"loss": 0.5705,
"step": 360
},
{
"epoch": 0.506155950752394,
"grad_norm": 0.9901844003150019,
"learning_rate": 9.999963505160212e-06,
"loss": 0.5706,
"step": 370
},
{
"epoch": 0.5198358413132695,
"grad_norm": 1.0258853719887646,
"learning_rate": 9.999552944330875e-06,
"loss": 0.5686,
"step": 380
},
{
"epoch": 0.533515731874145,
"grad_norm": 0.9891878174671198,
"learning_rate": 9.99868624170547e-06,
"loss": 0.5776,
"step": 390
},
{
"epoch": 0.5471956224350205,
"grad_norm": 0.8370829060088154,
"learning_rate": 9.997363476358921e-06,
"loss": 0.5585,
"step": 400
},
{
"epoch": 0.5471956224350205,
"eval_loss": 0.580952525138855,
"eval_runtime": 159.3361,
"eval_samples_per_second": 32.623,
"eval_steps_per_second": 4.079,
"step": 400
},
{
"epoch": 0.560875512995896,
"grad_norm": 0.9735691799963614,
"learning_rate": 9.995584768975735e-06,
"loss": 0.5652,
"step": 410
},
{
"epoch": 0.5745554035567716,
"grad_norm": 0.9521955992581953,
"learning_rate": 9.993350281838978e-06,
"loss": 0.5628,
"step": 420
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.9637326745913726,
"learning_rate": 9.990660218815473e-06,
"loss": 0.5577,
"step": 430
},
{
"epoch": 0.6019151846785226,
"grad_norm": 0.9752756108588176,
"learning_rate": 9.987514825337198e-06,
"loss": 0.5768,
"step": 440
},
{
"epoch": 0.615595075239398,
"grad_norm": 1.0301927608105819,
"learning_rate": 9.983914388378902e-06,
"loss": 0.5725,
"step": 450
},
{
"epoch": 0.6292749658002736,
"grad_norm": 0.9590925028019378,
"learning_rate": 9.979859236431907e-06,
"loss": 0.5626,
"step": 460
},
{
"epoch": 0.6429548563611491,
"grad_norm": 0.9835455217529487,
"learning_rate": 9.975349739474156e-06,
"loss": 0.5779,
"step": 470
},
{
"epoch": 0.6566347469220246,
"grad_norm": 1.062105507229158,
"learning_rate": 9.97038630893644e-06,
"loss": 0.5595,
"step": 480
},
{
"epoch": 0.6703146374829001,
"grad_norm": 0.9569530182519088,
"learning_rate": 9.964969397664874e-06,
"loss": 0.571,
"step": 490
},
{
"epoch": 0.6839945280437757,
"grad_norm": 0.8649735306955498,
"learning_rate": 9.959099499879577e-06,
"loss": 0.5424,
"step": 500
},
{
"epoch": 0.6976744186046512,
"grad_norm": 0.9382216608570519,
"learning_rate": 9.952777151129571e-06,
"loss": 0.5613,
"step": 510
},
{
"epoch": 0.7113543091655267,
"grad_norm": 0.9630521981838124,
"learning_rate": 9.94600292824394e-06,
"loss": 0.5686,
"step": 520
},
{
"epoch": 0.7250341997264022,
"grad_norm": 0.9242290659377395,
"learning_rate": 9.93877744927918e-06,
"loss": 0.5708,
"step": 530
},
{
"epoch": 0.7387140902872777,
"grad_norm": 1.0108351042211203,
"learning_rate": 9.931101373462823e-06,
"loss": 0.5497,
"step": 540
},
{
"epoch": 0.7523939808481532,
"grad_norm": 1.0578262433869685,
"learning_rate": 9.922975401133292e-06,
"loss": 0.5507,
"step": 550
},
{
"epoch": 0.7660738714090287,
"grad_norm": 1.015088571928902,
"learning_rate": 9.914400273675997e-06,
"loss": 0.5685,
"step": 560
},
{
"epoch": 0.7797537619699042,
"grad_norm": 1.0502619605526902,
"learning_rate": 9.905376773455697e-06,
"loss": 0.5611,
"step": 570
},
{
"epoch": 0.7934336525307798,
"grad_norm": 1.008086132136798,
"learning_rate": 9.895905723745123e-06,
"loss": 0.5562,
"step": 580
},
{
"epoch": 0.8071135430916553,
"grad_norm": 1.0470171940467392,
"learning_rate": 9.885987988649857e-06,
"loss": 0.5846,
"step": 590
},
{
"epoch": 0.8207934336525308,
"grad_norm": 0.9491762214187747,
"learning_rate": 9.875624473029508e-06,
"loss": 0.5627,
"step": 600
},
{
"epoch": 0.8207934336525308,
"eval_loss": 0.5711638927459717,
"eval_runtime": 158.8185,
"eval_samples_per_second": 32.729,
"eval_steps_per_second": 4.093,
"step": 600
},
{
"epoch": 0.8344733242134063,
"grad_norm": 0.9239516206253897,
"learning_rate": 9.864816122415139e-06,
"loss": 0.5578,
"step": 610
},
{
"epoch": 0.8481532147742818,
"grad_norm": 1.0112183427616674,
"learning_rate": 9.853563922923015e-06,
"loss": 0.5603,
"step": 620
},
{
"epoch": 0.8618331053351573,
"grad_norm": 0.9922429403728903,
"learning_rate": 9.841868901164621e-06,
"loss": 0.5649,
"step": 630
},
{
"epoch": 0.8755129958960328,
"grad_norm": 0.9650188185711245,
"learning_rate": 9.829732124153012e-06,
"loss": 0.5587,
"step": 640
},
{
"epoch": 0.8891928864569083,
"grad_norm": 0.9100924243083076,
"learning_rate": 9.817154699205438e-06,
"loss": 0.538,
"step": 650
},
{
"epoch": 0.9028727770177839,
"grad_norm": 0.8693475054624618,
"learning_rate": 9.804137773842346e-06,
"loss": 0.5489,
"step": 660
},
{
"epoch": 0.9165526675786594,
"grad_norm": 0.9005541862355255,
"learning_rate": 9.790682535682667e-06,
"loss": 0.5492,
"step": 670
},
{
"epoch": 0.9302325581395349,
"grad_norm": 0.9500349512704993,
"learning_rate": 9.776790212335461e-06,
"loss": 0.5414,
"step": 680
},
{
"epoch": 0.9439124487004104,
"grad_norm": 0.936576575842477,
"learning_rate": 9.762462071287919e-06,
"loss": 0.5534,
"step": 690
},
{
"epoch": 0.957592339261286,
"grad_norm": 0.9125490638893844,
"learning_rate": 9.747699419789724e-06,
"loss": 0.5552,
"step": 700
},
{
"epoch": 0.9712722298221614,
"grad_norm": 0.981568813631045,
"learning_rate": 9.732503604733776e-06,
"loss": 0.5621,
"step": 710
},
{
"epoch": 0.9849521203830369,
"grad_norm": 0.8600448310209287,
"learning_rate": 9.716876012533312e-06,
"loss": 0.5336,
"step": 720
},
{
"epoch": 0.9986320109439124,
"grad_norm": 1.0269642570973099,
"learning_rate": 9.700818068995407e-06,
"loss": 0.5695,
"step": 730
},
{
"epoch": 1.012311901504788,
"grad_norm": 0.932280864433308,
"learning_rate": 9.6843312391909e-06,
"loss": 0.4611,
"step": 740
},
{
"epoch": 1.0259917920656634,
"grad_norm": 0.9022906510351205,
"learning_rate": 9.66741702732071e-06,
"loss": 0.4345,
"step": 750
},
{
"epoch": 1.039671682626539,
"grad_norm": 0.9197395697056543,
"learning_rate": 9.650076976578615e-06,
"loss": 0.4386,
"step": 760
},
{
"epoch": 1.0533515731874146,
"grad_norm": 0.9282255816560907,
"learning_rate": 9.632312669010446e-06,
"loss": 0.4352,
"step": 770
},
{
"epoch": 1.06703146374829,
"grad_norm": 0.8070237110231103,
"learning_rate": 9.614125725369748e-06,
"loss": 0.4363,
"step": 780
},
{
"epoch": 1.0807113543091655,
"grad_norm": 0.8499427387381004,
"learning_rate": 9.595517804969907e-06,
"loss": 0.4395,
"step": 790
},
{
"epoch": 1.094391244870041,
"grad_norm": 0.887896589636451,
"learning_rate": 9.576490605532766e-06,
"loss": 0.4403,
"step": 800
},
{
"epoch": 1.094391244870041,
"eval_loss": 0.5807020664215088,
"eval_runtime": 160.3129,
"eval_samples_per_second": 32.424,
"eval_steps_per_second": 4.055,
"step": 800
},
{
"epoch": 1.1080711354309165,
"grad_norm": 0.9530846973221945,
"learning_rate": 9.55704586303373e-06,
"loss": 0.4459,
"step": 810
},
{
"epoch": 1.121751025991792,
"grad_norm": 0.9027470539677687,
"learning_rate": 9.537185351543367e-06,
"loss": 0.4367,
"step": 820
},
{
"epoch": 1.1354309165526675,
"grad_norm": 0.9278779163587121,
"learning_rate": 9.51691088306557e-06,
"loss": 0.4474,
"step": 830
},
{
"epoch": 1.1491108071135432,
"grad_norm": 0.8962115950716881,
"learning_rate": 9.496224307372225e-06,
"loss": 0.4501,
"step": 840
},
{
"epoch": 1.1627906976744187,
"grad_norm": 0.9005709870286739,
"learning_rate": 9.475127511834438e-06,
"loss": 0.4457,
"step": 850
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.8966783526748298,
"learning_rate": 9.453622421250353e-06,
"loss": 0.4494,
"step": 860
},
{
"epoch": 1.1901504787961696,
"grad_norm": 0.8734193357405506,
"learning_rate": 9.431710997669523e-06,
"loss": 0.4318,
"step": 870
},
{
"epoch": 1.2038303693570451,
"grad_norm": 0.8537412892919725,
"learning_rate": 9.409395240213917e-06,
"loss": 0.4472,
"step": 880
},
{
"epoch": 1.2175102599179206,
"grad_norm": 0.9359765245371868,
"learning_rate": 9.386677184895513e-06,
"loss": 0.4493,
"step": 890
},
{
"epoch": 1.231190150478796,
"grad_norm": 0.9098467093289945,
"learning_rate": 9.363558904430546e-06,
"loss": 0.4499,
"step": 900
},
{
"epoch": 1.2448700410396718,
"grad_norm": 0.9237606264015138,
"learning_rate": 9.3400425080504e-06,
"loss": 0.4493,
"step": 910
},
{
"epoch": 1.2585499316005473,
"grad_norm": 0.9200466683840953,
"learning_rate": 9.316130141309166e-06,
"loss": 0.466,
"step": 920
},
{
"epoch": 1.2722298221614228,
"grad_norm": 0.9058664962972951,
"learning_rate": 9.291823985887896e-06,
"loss": 0.4491,
"step": 930
},
{
"epoch": 1.2859097127222983,
"grad_norm": 0.8714585241942261,
"learning_rate": 9.267126259395545e-06,
"loss": 0.4424,
"step": 940
},
{
"epoch": 1.2995896032831737,
"grad_norm": 0.8974493475078188,
"learning_rate": 9.242039215166652e-06,
"loss": 0.4407,
"step": 950
},
{
"epoch": 1.3132694938440492,
"grad_norm": 0.8849152680465047,
"learning_rate": 9.216565142055745e-06,
"loss": 0.4422,
"step": 960
},
{
"epoch": 1.3269493844049247,
"grad_norm": 0.8974379058326695,
"learning_rate": 9.190706364228528e-06,
"loss": 0.4456,
"step": 970
},
{
"epoch": 1.3406292749658002,
"grad_norm": 0.9379832480977721,
"learning_rate": 9.164465240949814e-06,
"loss": 0.441,
"step": 980
},
{
"epoch": 1.3543091655266757,
"grad_norm": 0.9335214175194183,
"learning_rate": 9.137844166368289e-06,
"loss": 0.448,
"step": 990
},
{
"epoch": 1.3679890560875512,
"grad_norm": 0.8840460969403874,
"learning_rate": 9.110845569298068e-06,
"loss": 0.4474,
"step": 1000
},
{
"epoch": 1.3679890560875512,
"eval_loss": 0.5775927305221558,
"eval_runtime": 159.7467,
"eval_samples_per_second": 32.539,
"eval_steps_per_second": 4.069,
"step": 1000
}
],
"logging_steps": 10,
"max_steps": 3655,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 228443619328000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}