purchase_new / trainer_state.json
Sourabh2's picture
Initial upload of model files
c8966e4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.324324324324325,
"eval_steps": 500,
"global_step": 120,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.036036036036036036,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 2.2548,
"step": 1
},
{
"epoch": 0.07207207207207207,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 1.7083,
"step": 2
},
{
"epoch": 0.10810810810810811,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 1.9566,
"step": 3
},
{
"epoch": 0.14414414414414414,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 2.2875,
"step": 4
},
{
"epoch": 0.18018018018018017,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 2.0179,
"step": 5
},
{
"epoch": 0.21621621621621623,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 2.1465,
"step": 6
},
{
"epoch": 0.25225225225225223,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 2.0991,
"step": 7
},
{
"epoch": 0.2882882882882883,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 2.0996,
"step": 8
},
{
"epoch": 0.32432432432432434,
"grad_norm": 3.127271890640259,
"learning_rate": 2e-05,
"loss": 2.0748,
"step": 9
},
{
"epoch": 0.36036036036036034,
"grad_norm": 2.1929919719696045,
"learning_rate": 4e-05,
"loss": 1.7543,
"step": 10
},
{
"epoch": 0.3963963963963964,
"grad_norm": 3.9285449981689453,
"learning_rate": 6e-05,
"loss": 2.1182,
"step": 11
},
{
"epoch": 0.43243243243243246,
"grad_norm": 3.9492204189300537,
"learning_rate": 8e-05,
"loss": 2.1398,
"step": 12
},
{
"epoch": 0.46846846846846846,
"grad_norm": 4.697811603546143,
"learning_rate": 0.0001,
"loss": 1.944,
"step": 13
},
{
"epoch": 0.5045045045045045,
"grad_norm": 2.257394313812256,
"learning_rate": 0.00012,
"loss": 1.6118,
"step": 14
},
{
"epoch": 0.5405405405405406,
"grad_norm": 1.470400333404541,
"learning_rate": 0.00014,
"loss": 1.6489,
"step": 15
},
{
"epoch": 0.5765765765765766,
"grad_norm": 0.9114196300506592,
"learning_rate": 0.00016,
"loss": 1.3067,
"step": 16
},
{
"epoch": 0.6126126126126126,
"grad_norm": 1.9045084714889526,
"learning_rate": 0.00018,
"loss": 1.4384,
"step": 17
},
{
"epoch": 0.6486486486486487,
"grad_norm": 1.669812560081482,
"learning_rate": 0.0002,
"loss": 1.3614,
"step": 18
},
{
"epoch": 0.6846846846846847,
"grad_norm": 0.982727587223053,
"learning_rate": 0.00019818181818181821,
"loss": 1.29,
"step": 19
},
{
"epoch": 0.7207207207207207,
"grad_norm": 0.5169259309768677,
"learning_rate": 0.00019636363636363636,
"loss": 1.2386,
"step": 20
},
{
"epoch": 0.7567567567567568,
"grad_norm": 0.3028675317764282,
"learning_rate": 0.00019454545454545457,
"loss": 1.1707,
"step": 21
},
{
"epoch": 0.7927927927927928,
"grad_norm": 0.35564807057380676,
"learning_rate": 0.00019272727272727274,
"loss": 0.9072,
"step": 22
},
{
"epoch": 0.8288288288288288,
"grad_norm": 0.30732882022857666,
"learning_rate": 0.00019090909090909092,
"loss": 0.9289,
"step": 23
},
{
"epoch": 0.8648648648648649,
"grad_norm": 0.31778138875961304,
"learning_rate": 0.0001890909090909091,
"loss": 0.9955,
"step": 24
},
{
"epoch": 0.9009009009009009,
"grad_norm": 0.3151562809944153,
"learning_rate": 0.00018727272727272728,
"loss": 0.8705,
"step": 25
},
{
"epoch": 0.9369369369369369,
"grad_norm": 0.3682255744934082,
"learning_rate": 0.00018545454545454545,
"loss": 1.197,
"step": 26
},
{
"epoch": 0.972972972972973,
"grad_norm": 0.25122687220573425,
"learning_rate": 0.00018363636363636366,
"loss": 1.0997,
"step": 27
},
{
"epoch": 1.009009009009009,
"grad_norm": 0.23811328411102295,
"learning_rate": 0.00018181818181818183,
"loss": 1.0265,
"step": 28
},
{
"epoch": 1.045045045045045,
"grad_norm": 0.23290874063968658,
"learning_rate": 0.00018,
"loss": 0.7358,
"step": 29
},
{
"epoch": 1.0810810810810811,
"grad_norm": 0.21789032220840454,
"learning_rate": 0.0001781818181818182,
"loss": 0.7165,
"step": 30
},
{
"epoch": 1.117117117117117,
"grad_norm": 0.3115369975566864,
"learning_rate": 0.00017636363636363637,
"loss": 0.7812,
"step": 31
},
{
"epoch": 1.1531531531531531,
"grad_norm": 0.21534988284111023,
"learning_rate": 0.00017454545454545454,
"loss": 0.706,
"step": 32
},
{
"epoch": 1.1891891891891893,
"grad_norm": 0.21201848983764648,
"learning_rate": 0.00017272727272727275,
"loss": 0.6389,
"step": 33
},
{
"epoch": 1.2252252252252251,
"grad_norm": 0.16613227128982544,
"learning_rate": 0.0001709090909090909,
"loss": 0.7041,
"step": 34
},
{
"epoch": 1.2612612612612613,
"grad_norm": 0.26245370507240295,
"learning_rate": 0.0001690909090909091,
"loss": 0.6188,
"step": 35
},
{
"epoch": 1.2972972972972974,
"grad_norm": 0.17487148940563202,
"learning_rate": 0.00016727272727272728,
"loss": 0.7382,
"step": 36
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.22627073526382446,
"learning_rate": 0.00016545454545454545,
"loss": 0.9381,
"step": 37
},
{
"epoch": 1.3693693693693694,
"grad_norm": 0.19883334636688232,
"learning_rate": 0.00016363636363636366,
"loss": 1.0042,
"step": 38
},
{
"epoch": 1.4054054054054055,
"grad_norm": 0.18648912012577057,
"learning_rate": 0.00016181818181818184,
"loss": 0.7297,
"step": 39
},
{
"epoch": 1.4414414414414414,
"grad_norm": 0.2486204355955124,
"learning_rate": 0.00016,
"loss": 1.0105,
"step": 40
},
{
"epoch": 1.4774774774774775,
"grad_norm": 0.19586724042892456,
"learning_rate": 0.0001581818181818182,
"loss": 0.8904,
"step": 41
},
{
"epoch": 1.5135135135135136,
"grad_norm": 0.1361636519432068,
"learning_rate": 0.00015636363636363637,
"loss": 0.7655,
"step": 42
},
{
"epoch": 1.5495495495495497,
"grad_norm": 0.1422913819551468,
"learning_rate": 0.00015454545454545454,
"loss": 0.884,
"step": 43
},
{
"epoch": 1.5855855855855856,
"grad_norm": 0.12737137079238892,
"learning_rate": 0.00015272727272727275,
"loss": 0.4881,
"step": 44
},
{
"epoch": 1.6216216216216215,
"grad_norm": 0.14031246304512024,
"learning_rate": 0.0001509090909090909,
"loss": 0.4856,
"step": 45
},
{
"epoch": 1.6576576576576576,
"grad_norm": 0.152836412191391,
"learning_rate": 0.0001490909090909091,
"loss": 0.5526,
"step": 46
},
{
"epoch": 1.6936936936936937,
"grad_norm": 0.16750769317150116,
"learning_rate": 0.00014727272727272728,
"loss": 0.6862,
"step": 47
},
{
"epoch": 1.7297297297297298,
"grad_norm": 0.15049736201763153,
"learning_rate": 0.00014545454545454546,
"loss": 0.6051,
"step": 48
},
{
"epoch": 1.7657657657657657,
"grad_norm": 0.1748289316892624,
"learning_rate": 0.00014363636363636363,
"loss": 0.6521,
"step": 49
},
{
"epoch": 1.8018018018018018,
"grad_norm": 0.12289358675479889,
"learning_rate": 0.00014181818181818184,
"loss": 0.5631,
"step": 50
},
{
"epoch": 1.8378378378378377,
"grad_norm": 0.14275340735912323,
"learning_rate": 0.00014,
"loss": 0.3934,
"step": 51
},
{
"epoch": 1.8738738738738738,
"grad_norm": 0.14242391288280487,
"learning_rate": 0.0001381818181818182,
"loss": 0.4372,
"step": 52
},
{
"epoch": 1.90990990990991,
"grad_norm": 0.1477702260017395,
"learning_rate": 0.00013636363636363637,
"loss": 0.7059,
"step": 53
},
{
"epoch": 1.945945945945946,
"grad_norm": 0.14974962174892426,
"learning_rate": 0.00013454545454545455,
"loss": 0.7635,
"step": 54
},
{
"epoch": 1.981981981981982,
"grad_norm": 0.14689475297927856,
"learning_rate": 0.00013272727272727275,
"loss": 0.7075,
"step": 55
},
{
"epoch": 2.018018018018018,
"grad_norm": 0.1843816339969635,
"learning_rate": 0.00013090909090909093,
"loss": 0.6481,
"step": 56
},
{
"epoch": 2.054054054054054,
"grad_norm": 0.14116476476192474,
"learning_rate": 0.0001290909090909091,
"loss": 0.3951,
"step": 57
},
{
"epoch": 2.09009009009009,
"grad_norm": 0.15613681077957153,
"learning_rate": 0.00012727272727272728,
"loss": 0.4265,
"step": 58
},
{
"epoch": 2.126126126126126,
"grad_norm": 0.1918690949678421,
"learning_rate": 0.00012545454545454546,
"loss": 0.9761,
"step": 59
},
{
"epoch": 2.1621621621621623,
"grad_norm": 0.14524200558662415,
"learning_rate": 0.00012363636363636364,
"loss": 0.4906,
"step": 60
},
{
"epoch": 2.1981981981981984,
"grad_norm": 0.16868562996387482,
"learning_rate": 0.00012181818181818183,
"loss": 0.5676,
"step": 61
},
{
"epoch": 2.234234234234234,
"grad_norm": 0.14121738076210022,
"learning_rate": 0.00012,
"loss": 0.5162,
"step": 62
},
{
"epoch": 2.27027027027027,
"grad_norm": 0.180411696434021,
"learning_rate": 0.0001181818181818182,
"loss": 0.7837,
"step": 63
},
{
"epoch": 2.3063063063063063,
"grad_norm": 0.23065026104450226,
"learning_rate": 0.00011636363636363636,
"loss": 0.4959,
"step": 64
},
{
"epoch": 2.3423423423423424,
"grad_norm": 0.1973959058523178,
"learning_rate": 0.00011454545454545456,
"loss": 0.4391,
"step": 65
},
{
"epoch": 2.3783783783783785,
"grad_norm": 0.17862387001514435,
"learning_rate": 0.00011272727272727272,
"loss": 0.4177,
"step": 66
},
{
"epoch": 2.4144144144144146,
"grad_norm": 0.19349342584609985,
"learning_rate": 0.00011090909090909092,
"loss": 0.7445,
"step": 67
},
{
"epoch": 2.4504504504504503,
"grad_norm": 0.1336154341697693,
"learning_rate": 0.00010909090909090909,
"loss": 0.2191,
"step": 68
},
{
"epoch": 2.4864864864864864,
"grad_norm": 0.21199998259544373,
"learning_rate": 0.00010727272727272728,
"loss": 0.4845,
"step": 69
},
{
"epoch": 2.5225225225225225,
"grad_norm": 0.18532893061637878,
"learning_rate": 0.00010545454545454545,
"loss": 0.7252,
"step": 70
},
{
"epoch": 2.5585585585585586,
"grad_norm": 0.14763948321342468,
"learning_rate": 0.00010363636363636364,
"loss": 0.4816,
"step": 71
},
{
"epoch": 2.5945945945945947,
"grad_norm": 0.19213160872459412,
"learning_rate": 0.00010181818181818181,
"loss": 0.4775,
"step": 72
},
{
"epoch": 2.6306306306306304,
"grad_norm": 0.19077633321285248,
"learning_rate": 0.0001,
"loss": 0.451,
"step": 73
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.15648439526557922,
"learning_rate": 9.818181818181818e-05,
"loss": 0.2877,
"step": 74
},
{
"epoch": 2.7027027027027026,
"grad_norm": 0.15609943866729736,
"learning_rate": 9.636363636363637e-05,
"loss": 0.2944,
"step": 75
},
{
"epoch": 2.7387387387387387,
"grad_norm": 0.3236052989959717,
"learning_rate": 9.454545454545455e-05,
"loss": 0.62,
"step": 76
},
{
"epoch": 2.774774774774775,
"grad_norm": 0.222801074385643,
"learning_rate": 9.272727272727273e-05,
"loss": 0.6515,
"step": 77
},
{
"epoch": 2.810810810810811,
"grad_norm": 0.2324117124080658,
"learning_rate": 9.090909090909092e-05,
"loss": 0.5766,
"step": 78
},
{
"epoch": 2.846846846846847,
"grad_norm": 0.20293325185775757,
"learning_rate": 8.90909090909091e-05,
"loss": 0.4531,
"step": 79
},
{
"epoch": 2.8828828828828827,
"grad_norm": 0.20165590941905975,
"learning_rate": 8.727272727272727e-05,
"loss": 0.5847,
"step": 80
},
{
"epoch": 2.918918918918919,
"grad_norm": 0.3000154197216034,
"learning_rate": 8.545454545454545e-05,
"loss": 0.4334,
"step": 81
},
{
"epoch": 2.954954954954955,
"grad_norm": 0.20399592816829681,
"learning_rate": 8.363636363636364e-05,
"loss": 0.2509,
"step": 82
},
{
"epoch": 2.990990990990991,
"grad_norm": 0.2015918642282486,
"learning_rate": 8.181818181818183e-05,
"loss": 0.3644,
"step": 83
},
{
"epoch": 3.027027027027027,
"grad_norm": 0.16283659636974335,
"learning_rate": 8e-05,
"loss": 0.3619,
"step": 84
},
{
"epoch": 3.063063063063063,
"grad_norm": 0.23384951055049896,
"learning_rate": 7.818181818181818e-05,
"loss": 0.4679,
"step": 85
},
{
"epoch": 3.099099099099099,
"grad_norm": 0.3092138171195984,
"learning_rate": 7.636363636363637e-05,
"loss": 0.504,
"step": 86
},
{
"epoch": 3.135135135135135,
"grad_norm": 0.2725450396537781,
"learning_rate": 7.454545454545455e-05,
"loss": 0.3763,
"step": 87
},
{
"epoch": 3.171171171171171,
"grad_norm": 0.2366735339164734,
"learning_rate": 7.272727272727273e-05,
"loss": 0.3513,
"step": 88
},
{
"epoch": 3.2072072072072073,
"grad_norm": 0.2573714554309845,
"learning_rate": 7.090909090909092e-05,
"loss": 0.3202,
"step": 89
},
{
"epoch": 3.2432432432432434,
"grad_norm": 0.39939162135124207,
"learning_rate": 6.90909090909091e-05,
"loss": 0.4374,
"step": 90
},
{
"epoch": 3.279279279279279,
"grad_norm": 0.3089916408061981,
"learning_rate": 6.727272727272727e-05,
"loss": 0.4418,
"step": 91
},
{
"epoch": 3.315315315315315,
"grad_norm": 0.26053813099861145,
"learning_rate": 6.545454545454546e-05,
"loss": 0.3301,
"step": 92
},
{
"epoch": 3.3513513513513513,
"grad_norm": 0.25036731362342834,
"learning_rate": 6.363636363636364e-05,
"loss": 0.3525,
"step": 93
},
{
"epoch": 3.3873873873873874,
"grad_norm": 0.4556732475757599,
"learning_rate": 6.181818181818182e-05,
"loss": 0.3305,
"step": 94
},
{
"epoch": 3.4234234234234235,
"grad_norm": 0.26551949977874756,
"learning_rate": 6e-05,
"loss": 0.2769,
"step": 95
},
{
"epoch": 3.4594594594594597,
"grad_norm": 0.2707691192626953,
"learning_rate": 5.818181818181818e-05,
"loss": 0.2242,
"step": 96
},
{
"epoch": 3.4954954954954953,
"grad_norm": 0.2323060780763626,
"learning_rate": 5.636363636363636e-05,
"loss": 0.4295,
"step": 97
},
{
"epoch": 3.5315315315315314,
"grad_norm": 0.26477906107902527,
"learning_rate": 5.4545454545454546e-05,
"loss": 0.3623,
"step": 98
},
{
"epoch": 3.5675675675675675,
"grad_norm": 0.2650379538536072,
"learning_rate": 5.272727272727272e-05,
"loss": 0.2648,
"step": 99
},
{
"epoch": 3.6036036036036037,
"grad_norm": 0.33467838168144226,
"learning_rate": 5.090909090909091e-05,
"loss": 0.3563,
"step": 100
},
{
"epoch": 3.6396396396396398,
"grad_norm": 0.2872948944568634,
"learning_rate": 4.909090909090909e-05,
"loss": 0.4959,
"step": 101
},
{
"epoch": 3.6756756756756754,
"grad_norm": 0.29369983077049255,
"learning_rate": 4.7272727272727275e-05,
"loss": 0.2455,
"step": 102
},
{
"epoch": 3.7117117117117115,
"grad_norm": 0.3010379374027252,
"learning_rate": 4.545454545454546e-05,
"loss": 0.4271,
"step": 103
},
{
"epoch": 3.7477477477477477,
"grad_norm": 0.31059783697128296,
"learning_rate": 4.3636363636363636e-05,
"loss": 0.4152,
"step": 104
},
{
"epoch": 3.7837837837837838,
"grad_norm": 0.2542209327220917,
"learning_rate": 4.181818181818182e-05,
"loss": 0.382,
"step": 105
},
{
"epoch": 3.81981981981982,
"grad_norm": 0.2900529205799103,
"learning_rate": 4e-05,
"loss": 0.2371,
"step": 106
},
{
"epoch": 3.855855855855856,
"grad_norm": 0.25713253021240234,
"learning_rate": 3.818181818181819e-05,
"loss": 0.2636,
"step": 107
},
{
"epoch": 3.891891891891892,
"grad_norm": 0.2913016378879547,
"learning_rate": 3.6363636363636364e-05,
"loss": 0.4829,
"step": 108
},
{
"epoch": 3.9279279279279278,
"grad_norm": 0.25630098581314087,
"learning_rate": 3.454545454545455e-05,
"loss": 0.2281,
"step": 109
},
{
"epoch": 3.963963963963964,
"grad_norm": 0.29647207260131836,
"learning_rate": 3.272727272727273e-05,
"loss": 0.3197,
"step": 110
},
{
"epoch": 4.0,
"grad_norm": 0.3527073860168457,
"learning_rate": 3.090909090909091e-05,
"loss": 0.3282,
"step": 111
},
{
"epoch": 4.036036036036036,
"grad_norm": 0.482024610042572,
"learning_rate": 2.909090909090909e-05,
"loss": 0.255,
"step": 112
},
{
"epoch": 4.072072072072072,
"grad_norm": 0.22163565456867218,
"learning_rate": 2.7272727272727273e-05,
"loss": 0.1771,
"step": 113
},
{
"epoch": 4.108108108108108,
"grad_norm": 0.25971901416778564,
"learning_rate": 2.5454545454545454e-05,
"loss": 0.2633,
"step": 114
},
{
"epoch": 4.1441441441441444,
"grad_norm": 0.30024221539497375,
"learning_rate": 2.3636363636363637e-05,
"loss": 0.2706,
"step": 115
},
{
"epoch": 4.18018018018018,
"grad_norm": 0.26607680320739746,
"learning_rate": 2.1818181818181818e-05,
"loss": 0.2513,
"step": 116
},
{
"epoch": 4.216216216216216,
"grad_norm": 0.2651140093803406,
"learning_rate": 2e-05,
"loss": 0.2306,
"step": 117
},
{
"epoch": 4.252252252252252,
"grad_norm": 0.29040372371673584,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.348,
"step": 118
},
{
"epoch": 4.288288288288288,
"grad_norm": 0.3690294623374939,
"learning_rate": 1.6363636363636366e-05,
"loss": 0.1826,
"step": 119
},
{
"epoch": 4.324324324324325,
"grad_norm": 0.22308969497680664,
"learning_rate": 1.4545454545454545e-05,
"loss": 0.1656,
"step": 120
}
],
"logging_steps": 1,
"max_steps": 120,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1955401487160934e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}