gpt2-xl-lora-multi-512-k5-7-im-0 / trainer_state.json
MHGanainy's picture
MHGanainy/gpt2-xl-lora-multi-512-k5-7-im-0
9e43247 verified
raw
history blame
18 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 9760,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010245901639344262,
"grad_norm": 0.055161599069833755,
"learning_rate": 2.5618035096708087e-07,
"loss": 2.2315,
"step": 100
},
{
"epoch": 0.020491803278688523,
"grad_norm": 0.05540880188345909,
"learning_rate": 5.123607019341617e-07,
"loss": 2.2296,
"step": 200
},
{
"epoch": 0.030737704918032786,
"grad_norm": 0.0638064593076706,
"learning_rate": 7.685410529012426e-07,
"loss": 2.237,
"step": 300
},
{
"epoch": 0.040983606557377046,
"grad_norm": 0.0683213621377945,
"learning_rate": 1.0247214038683235e-06,
"loss": 2.2204,
"step": 400
},
{
"epoch": 0.05122950819672131,
"grad_norm": 0.08443646132946014,
"learning_rate": 1.2809017548354041e-06,
"loss": 2.2304,
"step": 500
},
{
"epoch": 0.06147540983606557,
"grad_norm": 0.07605498284101486,
"learning_rate": 1.5370821058024852e-06,
"loss": 2.2239,
"step": 600
},
{
"epoch": 0.07172131147540983,
"grad_norm": 0.08266434073448181,
"learning_rate": 1.7932624567695659e-06,
"loss": 2.2219,
"step": 700
},
{
"epoch": 0.08196721311475409,
"grad_norm": 0.10109508037567139,
"learning_rate": 2.049442807736647e-06,
"loss": 2.2004,
"step": 800
},
{
"epoch": 0.09221311475409837,
"grad_norm": 0.11055849492549896,
"learning_rate": 2.3056231587037276e-06,
"loss": 2.1945,
"step": 900
},
{
"epoch": 0.10245901639344263,
"grad_norm": 0.1108393445611,
"learning_rate": 2.5618035096708083e-06,
"loss": 2.1769,
"step": 1000
},
{
"epoch": 0.11270491803278689,
"grad_norm": 0.14474108815193176,
"learning_rate": 2.8179838606378894e-06,
"loss": 2.1904,
"step": 1100
},
{
"epoch": 0.12295081967213115,
"grad_norm": 0.13787466287612915,
"learning_rate": 3.0741642116049704e-06,
"loss": 2.1765,
"step": 1200
},
{
"epoch": 0.13319672131147542,
"grad_norm": 0.14868797361850739,
"learning_rate": 3.330344562572051e-06,
"loss": 2.1609,
"step": 1300
},
{
"epoch": 0.14344262295081966,
"grad_norm": 0.15633544325828552,
"learning_rate": 3.5865249135391318e-06,
"loss": 2.1708,
"step": 1400
},
{
"epoch": 0.15368852459016394,
"grad_norm": 0.17003007233142853,
"learning_rate": 3.842705264506213e-06,
"loss": 2.1521,
"step": 1500
},
{
"epoch": 0.16393442622950818,
"grad_norm": 0.1722685843706131,
"learning_rate": 4.098885615473294e-06,
"loss": 2.1417,
"step": 1600
},
{
"epoch": 0.17418032786885246,
"grad_norm": 0.18862290680408478,
"learning_rate": 4.3550659664403746e-06,
"loss": 2.1328,
"step": 1700
},
{
"epoch": 0.18442622950819673,
"grad_norm": 0.211067795753479,
"learning_rate": 4.611246317407455e-06,
"loss": 2.1222,
"step": 1800
},
{
"epoch": 0.19467213114754098,
"grad_norm": 0.21424412727355957,
"learning_rate": 4.867426668374536e-06,
"loss": 2.1448,
"step": 1900
},
{
"epoch": 0.20491803278688525,
"grad_norm": 0.21230654418468475,
"learning_rate": 5.1236070193416165e-06,
"loss": 2.1189,
"step": 2000
},
{
"epoch": 0.2151639344262295,
"grad_norm": 0.2188699096441269,
"learning_rate": 5.379787370308698e-06,
"loss": 2.1208,
"step": 2100
},
{
"epoch": 0.22540983606557377,
"grad_norm": 0.23276151716709137,
"learning_rate": 5.635967721275779e-06,
"loss": 2.0994,
"step": 2200
},
{
"epoch": 0.23565573770491804,
"grad_norm": 0.25028660893440247,
"learning_rate": 5.892148072242859e-06,
"loss": 2.0921,
"step": 2300
},
{
"epoch": 0.2459016393442623,
"grad_norm": 0.23129107058048248,
"learning_rate": 6.148328423209941e-06,
"loss": 2.0986,
"step": 2400
},
{
"epoch": 0.25614754098360654,
"grad_norm": 0.30324786901474,
"learning_rate": 6.404508774177021e-06,
"loss": 2.0998,
"step": 2500
},
{
"epoch": 0.26639344262295084,
"grad_norm": 0.3137739896774292,
"learning_rate": 6.660689125144102e-06,
"loss": 2.0831,
"step": 2600
},
{
"epoch": 0.2766393442622951,
"grad_norm": 0.29233041405677795,
"learning_rate": 6.916869476111183e-06,
"loss": 2.072,
"step": 2700
},
{
"epoch": 0.28688524590163933,
"grad_norm": 0.2947608232498169,
"learning_rate": 7.1730498270782635e-06,
"loss": 2.0713,
"step": 2800
},
{
"epoch": 0.29713114754098363,
"grad_norm": 0.2987135350704193,
"learning_rate": 7.429230178045345e-06,
"loss": 2.0745,
"step": 2900
},
{
"epoch": 0.3073770491803279,
"grad_norm": 0.3173224627971649,
"learning_rate": 7.685410529012427e-06,
"loss": 2.0552,
"step": 3000
},
{
"epoch": 0.3176229508196721,
"grad_norm": 0.3080308139324188,
"learning_rate": 7.941590879979506e-06,
"loss": 2.058,
"step": 3100
},
{
"epoch": 0.32786885245901637,
"grad_norm": 0.3344276249408722,
"learning_rate": 8.197771230946588e-06,
"loss": 2.0335,
"step": 3200
},
{
"epoch": 0.33811475409836067,
"grad_norm": 0.35821136832237244,
"learning_rate": 8.453951581913668e-06,
"loss": 2.0389,
"step": 3300
},
{
"epoch": 0.3483606557377049,
"grad_norm": 0.34500202536582947,
"learning_rate": 8.710131932880749e-06,
"loss": 2.0473,
"step": 3400
},
{
"epoch": 0.35860655737704916,
"grad_norm": 0.3924405872821808,
"learning_rate": 8.96631228384783e-06,
"loss": 2.0413,
"step": 3500
},
{
"epoch": 0.36885245901639346,
"grad_norm": 0.3596484065055847,
"learning_rate": 9.22249263481491e-06,
"loss": 2.0408,
"step": 3600
},
{
"epoch": 0.3790983606557377,
"grad_norm": 0.3991853892803192,
"learning_rate": 9.478672985781992e-06,
"loss": 2.0143,
"step": 3700
},
{
"epoch": 0.38934426229508196,
"grad_norm": 0.36749976873397827,
"learning_rate": 9.734853336749072e-06,
"loss": 2.0248,
"step": 3800
},
{
"epoch": 0.39959016393442626,
"grad_norm": 0.41294941306114197,
"learning_rate": 9.991033687716153e-06,
"loss": 2.0214,
"step": 3900
},
{
"epoch": 0.4098360655737705,
"grad_norm": 0.40261194109916687,
"learning_rate": 1.0247214038683233e-05,
"loss": 2.0089,
"step": 4000
},
{
"epoch": 0.42008196721311475,
"grad_norm": 0.3836148977279663,
"learning_rate": 1.0503394389650315e-05,
"loss": 2.0109,
"step": 4100
},
{
"epoch": 0.430327868852459,
"grad_norm": 0.38613057136535645,
"learning_rate": 1.0759574740617396e-05,
"loss": 1.9881,
"step": 4200
},
{
"epoch": 0.4405737704918033,
"grad_norm": 0.39355090260505676,
"learning_rate": 1.1015755091584478e-05,
"loss": 1.9919,
"step": 4300
},
{
"epoch": 0.45081967213114754,
"grad_norm": 0.5257683992385864,
"learning_rate": 1.1271935442551557e-05,
"loss": 1.9929,
"step": 4400
},
{
"epoch": 0.4610655737704918,
"grad_norm": 0.43797537684440613,
"learning_rate": 1.1528115793518637e-05,
"loss": 2.0092,
"step": 4500
},
{
"epoch": 0.4713114754098361,
"grad_norm": 0.5497995018959045,
"learning_rate": 1.1784296144485719e-05,
"loss": 1.9967,
"step": 4600
},
{
"epoch": 0.48155737704918034,
"grad_norm": 0.4768752157688141,
"learning_rate": 1.20404764954528e-05,
"loss": 1.9921,
"step": 4700
},
{
"epoch": 0.4918032786885246,
"grad_norm": 0.43218129873275757,
"learning_rate": 1.2296656846419882e-05,
"loss": 1.9883,
"step": 4800
},
{
"epoch": 0.5020491803278688,
"grad_norm": 0.47497090697288513,
"learning_rate": 1.2552837197386962e-05,
"loss": 1.9799,
"step": 4900
},
{
"epoch": 0.5122950819672131,
"grad_norm": 0.4906615912914276,
"learning_rate": 1.2809017548354041e-05,
"loss": 1.9762,
"step": 5000
},
{
"epoch": 0.5225409836065574,
"grad_norm": 0.5236986875534058,
"learning_rate": 1.3065197899321123e-05,
"loss": 1.9736,
"step": 5100
},
{
"epoch": 0.5327868852459017,
"grad_norm": 0.5295742750167847,
"learning_rate": 1.3318816446778533e-05,
"loss": 1.9762,
"step": 5200
},
{
"epoch": 0.5430327868852459,
"grad_norm": 0.4924796521663666,
"learning_rate": 1.3574996797745615e-05,
"loss": 1.9709,
"step": 5300
},
{
"epoch": 0.5532786885245902,
"grad_norm": 0.5359669327735901,
"learning_rate": 1.3831177148712696e-05,
"loss": 1.9594,
"step": 5400
},
{
"epoch": 0.5635245901639344,
"grad_norm": 0.49581173062324524,
"learning_rate": 1.4087357499679775e-05,
"loss": 1.9787,
"step": 5500
},
{
"epoch": 0.5737704918032787,
"grad_norm": 0.49295076727867126,
"learning_rate": 1.4343537850646856e-05,
"loss": 1.9628,
"step": 5600
},
{
"epoch": 0.5840163934426229,
"grad_norm": 0.6119683980941772,
"learning_rate": 1.4599718201613938e-05,
"loss": 1.9622,
"step": 5700
},
{
"epoch": 0.5942622950819673,
"grad_norm": 0.5446974039077759,
"learning_rate": 1.4855898552581019e-05,
"loss": 1.9547,
"step": 5800
},
{
"epoch": 0.6045081967213115,
"grad_norm": 0.5105884671211243,
"learning_rate": 1.5112078903548099e-05,
"loss": 1.9477,
"step": 5900
},
{
"epoch": 0.6147540983606558,
"grad_norm": 0.5642023682594299,
"learning_rate": 1.536825925451518e-05,
"loss": 1.9433,
"step": 6000
},
{
"epoch": 0.625,
"grad_norm": 0.4802306294441223,
"learning_rate": 1.562443960548226e-05,
"loss": 1.984,
"step": 6100
},
{
"epoch": 0.6352459016393442,
"grad_norm": 0.5581754446029663,
"learning_rate": 1.588061995644934e-05,
"loss": 1.9341,
"step": 6200
},
{
"epoch": 0.6454918032786885,
"grad_norm": 0.5883158445358276,
"learning_rate": 1.6136800307416423e-05,
"loss": 1.9394,
"step": 6300
},
{
"epoch": 0.6557377049180327,
"grad_norm": 0.5291092395782471,
"learning_rate": 1.6392980658383503e-05,
"loss": 1.9421,
"step": 6400
},
{
"epoch": 0.6659836065573771,
"grad_norm": 0.6235113739967346,
"learning_rate": 1.6649161009350586e-05,
"loss": 1.9602,
"step": 6500
},
{
"epoch": 0.6762295081967213,
"grad_norm": 0.49288830161094666,
"learning_rate": 1.6905341360317663e-05,
"loss": 1.9346,
"step": 6600
},
{
"epoch": 0.6864754098360656,
"grad_norm": 0.6099859476089478,
"learning_rate": 1.7161521711284746e-05,
"loss": 1.9431,
"step": 6700
},
{
"epoch": 0.6967213114754098,
"grad_norm": 0.5682905316352844,
"learning_rate": 1.7417702062251826e-05,
"loss": 1.9137,
"step": 6800
},
{
"epoch": 0.7069672131147541,
"grad_norm": 0.7166417241096497,
"learning_rate": 1.767388241321891e-05,
"loss": 1.9391,
"step": 6900
},
{
"epoch": 0.7172131147540983,
"grad_norm": 0.5477021336555481,
"learning_rate": 1.793006276418599e-05,
"loss": 1.9429,
"step": 7000
},
{
"epoch": 0.7274590163934426,
"grad_norm": 0.54074627161026,
"learning_rate": 1.818624311515307e-05,
"loss": 1.9384,
"step": 7100
},
{
"epoch": 0.7377049180327869,
"grad_norm": 0.5290340185165405,
"learning_rate": 1.8442423466120148e-05,
"loss": 1.9356,
"step": 7200
},
{
"epoch": 0.7479508196721312,
"grad_norm": 0.5777207612991333,
"learning_rate": 1.869860381708723e-05,
"loss": 1.9184,
"step": 7300
},
{
"epoch": 0.7581967213114754,
"grad_norm": 0.6420373320579529,
"learning_rate": 1.895478416805431e-05,
"loss": 1.9236,
"step": 7400
},
{
"epoch": 0.7684426229508197,
"grad_norm": 0.5742615461349487,
"learning_rate": 1.9210964519021394e-05,
"loss": 1.9222,
"step": 7500
},
{
"epoch": 0.7786885245901639,
"grad_norm": 0.613657534122467,
"learning_rate": 1.946714486998847e-05,
"loss": 1.9362,
"step": 7600
},
{
"epoch": 0.7889344262295082,
"grad_norm": 0.5610764622688293,
"learning_rate": 1.9723325220955554e-05,
"loss": 1.9035,
"step": 7700
},
{
"epoch": 0.7991803278688525,
"grad_norm": 0.6575483083724976,
"learning_rate": 1.9979505571922634e-05,
"loss": 1.9312,
"step": 7800
},
{
"epoch": 0.8094262295081968,
"grad_norm": 0.5548086166381836,
"learning_rate": 1.9890692959274893e-05,
"loss": 1.9221,
"step": 7900
},
{
"epoch": 0.819672131147541,
"grad_norm": 0.6601373553276062,
"learning_rate": 1.952683486776797e-05,
"loss": 1.9076,
"step": 8000
},
{
"epoch": 0.8299180327868853,
"grad_norm": 0.5061026215553284,
"learning_rate": 1.891699257502426e-05,
"loss": 1.9173,
"step": 8100
},
{
"epoch": 0.8401639344262295,
"grad_norm": 0.5650314688682556,
"learning_rate": 1.807691229385665e-05,
"loss": 1.9175,
"step": 8200
},
{
"epoch": 0.8504098360655737,
"grad_norm": 0.7669892311096191,
"learning_rate": 1.7028285013959614e-05,
"loss": 1.9074,
"step": 8300
},
{
"epoch": 0.860655737704918,
"grad_norm": 0.6210095286369324,
"learning_rate": 1.5798186437538642e-05,
"loss": 1.9109,
"step": 8400
},
{
"epoch": 0.8709016393442623,
"grad_norm": 0.6777961254119873,
"learning_rate": 1.4418377880913716e-05,
"loss": 1.9079,
"step": 8500
},
{
"epoch": 0.8811475409836066,
"grad_norm": 0.5773517489433289,
"learning_rate": 1.2924486192914705e-05,
"loss": 1.9298,
"step": 8600
},
{
"epoch": 0.8913934426229508,
"grad_norm": 0.5826138257980347,
"learning_rate": 1.1355083864689736e-05,
"loss": 1.9094,
"step": 8700
},
{
"epoch": 0.9016393442622951,
"grad_norm": 0.6318073868751526,
"learning_rate": 9.750693082619274e-06,
"loss": 1.9103,
"step": 8800
},
{
"epoch": 0.9118852459016393,
"grad_norm": 0.6698086261749268,
"learning_rate": 8.15273943982811e-06,
"loss": 1.9159,
"step": 8900
},
{
"epoch": 0.9221311475409836,
"grad_norm": 0.612259566783905,
"learning_rate": 6.602482321609596e-06,
"loss": 1.9195,
"step": 9000
},
{
"epoch": 0.9323770491803278,
"grad_norm": 0.5766026973724365,
"learning_rate": 5.139949582359195e-06,
"loss": 1.9105,
"step": 9100
},
{
"epoch": 0.9426229508196722,
"grad_norm": 0.6413909196853638,
"learning_rate": 3.8029040208070833e-06,
"loss": 1.8875,
"step": 9200
},
{
"epoch": 0.9528688524590164,
"grad_norm": 0.651620090007782,
"learning_rate": 2.6258683393030516e-06,
"loss": 1.9158,
"step": 9300
},
{
"epoch": 0.9631147540983607,
"grad_norm": 0.6111190319061279,
"learning_rate": 1.6392337628405675e-06,
"loss": 1.9242,
"step": 9400
},
{
"epoch": 0.9733606557377049,
"grad_norm": 0.6417702436447144,
"learning_rate": 8.68475333400769e-07,
"loss": 1.891,
"step": 9500
},
{
"epoch": 0.9836065573770492,
"grad_norm": 0.5989698767662048,
"learning_rate": 3.3762624823906577e-07,
"loss": 1.9093,
"step": 9600
},
{
"epoch": 0.9938524590163934,
"grad_norm": 0.7152329683303833,
"learning_rate": 4.9692246345985905e-08,
"loss": 1.8982,
"step": 9700
},
{
"epoch": 1.0,
"step": 9760,
"total_flos": 1.421397042852266e+18,
"train_loss": 2.0159098046724915,
"train_runtime": 3573.2428,
"train_samples_per_second": 43.7,
"train_steps_per_second": 2.731
}
],
"logging_steps": 100,
"max_steps": 9760,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.421397042852266e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}