e-medicine-Llama2-Lora / trainer_state.json
imdiddu's picture
Initial commit
ed8de8a verified
raw
history blame
12.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 6.0,
"eval_steps": 5,
"global_step": 30936,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0969743987587277,
"grad_norm": 5.50258207321167,
"learning_rate": 2.451704876061911e-05,
"loss": 0.7801,
"step": 500
},
{
"epoch": 0.1939487975174554,
"grad_norm": 2.8141658306121826,
"learning_rate": 2.4032157958027852e-05,
"loss": 0.4736,
"step": 1000
},
{
"epoch": 0.29092319627618307,
"grad_norm": 1.9647445678710938,
"learning_rate": 2.3547267155436597e-05,
"loss": 0.3846,
"step": 1500
},
{
"epoch": 0.3878975950349108,
"grad_norm": 0.9280088543891907,
"learning_rate": 2.306334613445052e-05,
"loss": 0.3573,
"step": 2000
},
{
"epoch": 0.4848719937936385,
"grad_norm": 4.660079479217529,
"learning_rate": 2.2578455331859266e-05,
"loss": 0.3135,
"step": 2500
},
{
"epoch": 0.5818463925523661,
"grad_norm": 2.9539144039154053,
"learning_rate": 2.209356452926801e-05,
"loss": 0.2992,
"step": 3000
},
{
"epoch": 0.6788207913110939,
"grad_norm": 1.4715936183929443,
"learning_rate": 2.1608673726676753e-05,
"loss": 0.2836,
"step": 3500
},
{
"epoch": 0.7757951900698216,
"grad_norm": 1.7886435985565186,
"learning_rate": 2.1123782924085498e-05,
"loss": 0.2698,
"step": 4000
},
{
"epoch": 0.8727695888285493,
"grad_norm": 1.0143980979919434,
"learning_rate": 2.063889212149424e-05,
"loss": 0.2549,
"step": 4500
},
{
"epoch": 0.969743987587277,
"grad_norm": 4.67170524597168,
"learning_rate": 2.0154001318902985e-05,
"loss": 0.255,
"step": 5000
},
{
"epoch": 1.0,
"eval_loss": 0.22126668691635132,
"eval_runtime": 1558.3887,
"eval_samples_per_second": 2.836,
"eval_steps_per_second": 0.355,
"step": 5156
},
{
"epoch": 1.0667183863460046,
"grad_norm": 1.17802894115448,
"learning_rate": 1.966911051631173e-05,
"loss": 0.198,
"step": 5500
},
{
"epoch": 1.1636927851047323,
"grad_norm": 2.5160129070281982,
"learning_rate": 1.9184219713720472e-05,
"loss": 0.195,
"step": 6000
},
{
"epoch": 1.26066718386346,
"grad_norm": 2.1824357509613037,
"learning_rate": 1.8699328911129217e-05,
"loss": 0.1864,
"step": 6500
},
{
"epoch": 1.3576415826221877,
"grad_norm": 1.4108847379684448,
"learning_rate": 1.821443810853796e-05,
"loss": 0.1948,
"step": 7000
},
{
"epoch": 1.4546159813809154,
"grad_norm": 1.5844694375991821,
"learning_rate": 1.7730517087551886e-05,
"loss": 0.1793,
"step": 7500
},
{
"epoch": 1.5515903801396431,
"grad_norm": 0.8616577386856079,
"learning_rate": 1.7245626284960627e-05,
"loss": 0.1804,
"step": 8000
},
{
"epoch": 1.6485647788983708,
"grad_norm": 2.6326045989990234,
"learning_rate": 1.6760735482369372e-05,
"loss": 0.1719,
"step": 8500
},
{
"epoch": 1.7455391776570985,
"grad_norm": 1.3256356716156006,
"learning_rate": 1.6275844679778114e-05,
"loss": 0.1768,
"step": 9000
},
{
"epoch": 1.8425135764158262,
"grad_norm": 2.5661003589630127,
"learning_rate": 1.579192365879204e-05,
"loss": 0.171,
"step": 9500
},
{
"epoch": 1.939487975174554,
"grad_norm": 3.0119805335998535,
"learning_rate": 1.5307032856200783e-05,
"loss": 0.1654,
"step": 10000
},
{
"epoch": 2.0,
"eval_loss": 0.16889868676662445,
"eval_runtime": 1543.9946,
"eval_samples_per_second": 2.863,
"eval_steps_per_second": 0.358,
"step": 10312
},
{
"epoch": 2.0364623739332814,
"grad_norm": 2.474855661392212,
"learning_rate": 1.4825051398425075e-05,
"loss": 0.1434,
"step": 10500
},
{
"epoch": 2.133436772692009,
"grad_norm": 1.4645425081253052,
"learning_rate": 1.4343069940649367e-05,
"loss": 0.1296,
"step": 11000
},
{
"epoch": 2.230411171450737,
"grad_norm": 1.1075003147125244,
"learning_rate": 1.3858179138058112e-05,
"loss": 0.1346,
"step": 11500
},
{
"epoch": 2.3273855702094646,
"grad_norm": 2.3721115589141846,
"learning_rate": 1.3373288335466855e-05,
"loss": 0.1247,
"step": 12000
},
{
"epoch": 2.4243599689681923,
"grad_norm": 0.8129625916481018,
"learning_rate": 1.2888397532875595e-05,
"loss": 0.1184,
"step": 12500
},
{
"epoch": 2.52133436772692,
"grad_norm": 2.1738102436065674,
"learning_rate": 1.240350673028434e-05,
"loss": 0.1227,
"step": 13000
},
{
"epoch": 2.6183087664856477,
"grad_norm": 2.0540452003479004,
"learning_rate": 1.1918615927693085e-05,
"loss": 0.1193,
"step": 13500
},
{
"epoch": 2.7152831652443754,
"grad_norm": 1.0573948621749878,
"learning_rate": 1.1433725125101828e-05,
"loss": 0.1184,
"step": 14000
},
{
"epoch": 2.812257564003103,
"grad_norm": 0.6909541487693787,
"learning_rate": 1.094883432251057e-05,
"loss": 0.1133,
"step": 14500
},
{
"epoch": 2.909231962761831,
"grad_norm": 3.4285881519317627,
"learning_rate": 1.0463943519919314e-05,
"loss": 0.1221,
"step": 15000
},
{
"epoch": 3.0,
"eval_loss": 0.1472880095243454,
"eval_runtime": 1543.9214,
"eval_samples_per_second": 2.863,
"eval_steps_per_second": 0.358,
"step": 15468
},
{
"epoch": 3.0062063615205585,
"grad_norm": 1.0781619548797607,
"learning_rate": 9.979052717328059e-06,
"loss": 0.1055,
"step": 15500
},
{
"epoch": 3.1031807602792862,
"grad_norm": 1.153516411781311,
"learning_rate": 9.494161914736802e-06,
"loss": 0.0921,
"step": 16000
},
{
"epoch": 3.200155159038014,
"grad_norm": 0.8690130114555359,
"learning_rate": 9.009271112145545e-06,
"loss": 0.0914,
"step": 16500
},
{
"epoch": 3.2971295577967417,
"grad_norm": 0.9147763848304749,
"learning_rate": 8.524380309554289e-06,
"loss": 0.0955,
"step": 17000
},
{
"epoch": 3.3941039565554694,
"grad_norm": 0.4283031225204468,
"learning_rate": 8.039489506963032e-06,
"loss": 0.0983,
"step": 17500
},
{
"epoch": 3.491078355314197,
"grad_norm": 1.6501576900482178,
"learning_rate": 7.554598704371776e-06,
"loss": 0.0937,
"step": 18000
},
{
"epoch": 3.588052754072925,
"grad_norm": 1.2180219888687134,
"learning_rate": 7.069707901780519e-06,
"loss": 0.0915,
"step": 18500
},
{
"epoch": 3.6850271528316525,
"grad_norm": 1.027410864830017,
"learning_rate": 6.584817099189262e-06,
"loss": 0.091,
"step": 19000
},
{
"epoch": 3.78200155159038,
"grad_norm": 1.701826810836792,
"learning_rate": 6.100896078203189e-06,
"loss": 0.0938,
"step": 19500
},
{
"epoch": 3.878975950349108,
"grad_norm": 1.8526573181152344,
"learning_rate": 5.616005275611932e-06,
"loss": 0.0946,
"step": 20000
},
{
"epoch": 3.9759503491078356,
"grad_norm": 2.527759075164795,
"learning_rate": 5.131114473020676e-06,
"loss": 0.0879,
"step": 20500
},
{
"epoch": 4.0,
"eval_loss": 0.13828447461128235,
"eval_runtime": 1543.3031,
"eval_samples_per_second": 2.864,
"eval_steps_per_second": 0.358,
"step": 20624
},
{
"epoch": 4.072924747866563,
"grad_norm": 1.2997472286224365,
"learning_rate": 1.482330921856514e-05,
"loss": 0.0851,
"step": 21000
},
{
"epoch": 4.169899146625291,
"grad_norm": 2.8086001873016357,
"learning_rate": 1.4580868519560117e-05,
"loss": 0.092,
"step": 21500
},
{
"epoch": 4.266873545384018,
"grad_norm": 1.3651138544082642,
"learning_rate": 1.4338427820555092e-05,
"loss": 0.091,
"step": 22000
},
{
"epoch": 4.363847944142746,
"grad_norm": 1.5494824647903442,
"learning_rate": 1.409598712155007e-05,
"loss": 0.0916,
"step": 22500
},
{
"epoch": 4.460822342901474,
"grad_norm": 1.5494245290756226,
"learning_rate": 1.3854031303943058e-05,
"loss": 0.0919,
"step": 23000
},
{
"epoch": 4.557796741660201,
"grad_norm": 2.636620044708252,
"learning_rate": 1.3611590604938032e-05,
"loss": 0.0936,
"step": 23500
},
{
"epoch": 4.654771140418929,
"grad_norm": 1.1111247539520264,
"learning_rate": 1.3369149905933009e-05,
"loss": 0.0918,
"step": 24000
},
{
"epoch": 4.751745539177657,
"grad_norm": 1.1152452230453491,
"learning_rate": 1.3126709206927986e-05,
"loss": 0.095,
"step": 24500
},
{
"epoch": 4.848719937936385,
"grad_norm": 1.1507107019424438,
"learning_rate": 1.2884268507922962e-05,
"loss": 0.0933,
"step": 25000
},
{
"epoch": 4.945694336695112,
"grad_norm": 0.9500592350959778,
"learning_rate": 1.2641827808917939e-05,
"loss": 0.0951,
"step": 25500
},
{
"epoch": 5.0,
"eval_loss": 0.14213640987873077,
"eval_runtime": 1544.0177,
"eval_samples_per_second": 2.863,
"eval_steps_per_second": 0.358,
"step": 25780
},
{
"epoch": 5.04266873545384,
"grad_norm": 1.6337387561798096,
"learning_rate": 1.2399871991310926e-05,
"loss": 0.0852,
"step": 26000
},
{
"epoch": 5.139643134212568,
"grad_norm": 0.6484285593032837,
"learning_rate": 1.2157431292305903e-05,
"loss": 0.0751,
"step": 26500
},
{
"epoch": 5.236617532971295,
"grad_norm": 1.9428491592407227,
"learning_rate": 1.1914990593300878e-05,
"loss": 0.0789,
"step": 27000
},
{
"epoch": 5.333591931730023,
"grad_norm": 1.7594366073608398,
"learning_rate": 1.1672549894295855e-05,
"loss": 0.0795,
"step": 27500
},
{
"epoch": 5.430566330488751,
"grad_norm": 0.4171479046344757,
"learning_rate": 1.1430109195290833e-05,
"loss": 0.0775,
"step": 28000
},
{
"epoch": 5.5275407292474785,
"grad_norm": 1.192670226097107,
"learning_rate": 1.118766849628581e-05,
"loss": 0.0822,
"step": 28500
},
{
"epoch": 5.624515128006206,
"grad_norm": 1.016116976737976,
"learning_rate": 1.0945227797280785e-05,
"loss": 0.0811,
"step": 29000
},
{
"epoch": 5.721489526764934,
"grad_norm": 0.925920307636261,
"learning_rate": 1.0702787098275762e-05,
"loss": 0.0767,
"step": 29500
},
{
"epoch": 5.818463925523662,
"grad_norm": 1.05020010471344,
"learning_rate": 1.046083128066875e-05,
"loss": 0.0784,
"step": 30000
},
{
"epoch": 5.915438324282389,
"grad_norm": 1.8532640933990479,
"learning_rate": 1.0218390581663725e-05,
"loss": 0.0777,
"step": 30500
},
{
"epoch": 6.0,
"eval_loss": 0.14130142331123352,
"eval_runtime": 1544.1253,
"eval_samples_per_second": 2.862,
"eval_steps_per_second": 0.358,
"step": 30936
}
],
"logging_steps": 500,
"max_steps": 51560,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.2712794309494047e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}