kinqsradio's picture
Mistral 7B Instruct v02 Function Calling Adapter
e39a626 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 375,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 4.780884742736816,
"learning_rate": 4.9985889393594345e-05,
"loss": 1.1252,
"step": 5
},
{
"epoch": 0.03,
"grad_norm": 3.6875321865081787,
"learning_rate": 4.992859235371958e-05,
"loss": 0.9595,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 3.3574156761169434,
"learning_rate": 4.9827327946335875e-05,
"loss": 0.5891,
"step": 15
},
{
"epoch": 0.05,
"grad_norm": 1.8378690481185913,
"learning_rate": 4.968227477476554e-05,
"loss": 0.8209,
"step": 20
},
{
"epoch": 0.07,
"grad_norm": 1.445030927658081,
"learning_rate": 4.949368867399567e-05,
"loss": 0.6718,
"step": 25
},
{
"epoch": 0.08,
"grad_norm": 1.878016710281372,
"learning_rate": 4.9261902259453614e-05,
"loss": 0.4986,
"step": 30
},
{
"epoch": 0.09,
"grad_norm": 1.1748273372650146,
"learning_rate": 4.898732434036244e-05,
"loss": 0.5459,
"step": 35
},
{
"epoch": 0.11,
"grad_norm": 2.1013641357421875,
"learning_rate": 4.867043919871076e-05,
"loss": 0.436,
"step": 40
},
{
"epoch": 0.12,
"grad_norm": 0.9981414675712585,
"learning_rate": 4.8311805735108894e-05,
"loss": 0.4354,
"step": 45
},
{
"epoch": 0.13,
"grad_norm": 1.1620482206344604,
"learning_rate": 4.791205648303775e-05,
"loss": 0.3934,
"step": 50
},
{
"epoch": 0.15,
"grad_norm": 0.8525057435035706,
"learning_rate": 4.747189649322894e-05,
"loss": 0.3605,
"step": 55
},
{
"epoch": 0.16,
"grad_norm": 0.8548981547355652,
"learning_rate": 4.699210209014394e-05,
"loss": 0.4358,
"step": 60
},
{
"epoch": 0.17,
"grad_norm": 1.0279943943023682,
"learning_rate": 4.6473519502745476e-05,
"loss": 0.2643,
"step": 65
},
{
"epoch": 0.19,
"grad_norm": 1.06588613986969,
"learning_rate": 4.591706337197597e-05,
"loss": 0.2999,
"step": 70
},
{
"epoch": 0.2,
"grad_norm": 1.4997105598449707,
"learning_rate": 4.532371513757564e-05,
"loss": 0.3291,
"step": 75
},
{
"epoch": 0.21,
"grad_norm": 0.7532416582107544,
"learning_rate": 4.469452130708543e-05,
"loss": 0.3229,
"step": 80
},
{
"epoch": 0.23,
"grad_norm": 1.3268669843673706,
"learning_rate": 4.4030591610087624e-05,
"loss": 0.2938,
"step": 85
},
{
"epoch": 0.24,
"grad_norm": 2.0165152549743652,
"learning_rate": 4.3333097040939764e-05,
"loss": 0.2999,
"step": 90
},
{
"epoch": 0.25,
"grad_norm": 1.5449421405792236,
"learning_rate": 4.260326779345393e-05,
"loss": 0.3433,
"step": 95
},
{
"epoch": 0.27,
"grad_norm": 1.3084044456481934,
"learning_rate": 4.184239109116393e-05,
"loss": 0.3539,
"step": 100
},
{
"epoch": 0.28,
"grad_norm": 1.125744342803955,
"learning_rate": 4.105180891700746e-05,
"loss": 0.3158,
"step": 105
},
{
"epoch": 0.29,
"grad_norm": 1.8335057497024536,
"learning_rate": 4.023291564642711e-05,
"loss": 0.2642,
"step": 110
},
{
"epoch": 0.31,
"grad_norm": 1.0940334796905518,
"learning_rate": 3.938715558806525e-05,
"loss": 0.3195,
"step": 115
},
{
"epoch": 0.32,
"grad_norm": 1.5591059923171997,
"learning_rate": 3.851602043638994e-05,
"loss": 0.3332,
"step": 120
},
{
"epoch": 0.33,
"grad_norm": 1.333927035331726,
"learning_rate": 3.7621046640744975e-05,
"loss": 0.3035,
"step": 125
},
{
"epoch": 0.35,
"grad_norm": 2.269136905670166,
"learning_rate": 3.670381269546429e-05,
"loss": 0.2444,
"step": 130
},
{
"epoch": 0.36,
"grad_norm": 1.834069013595581,
"learning_rate": 3.5765936355830356e-05,
"loss": 0.2488,
"step": 135
},
{
"epoch": 0.37,
"grad_norm": 1.8295272588729858,
"learning_rate": 3.480907178478654e-05,
"loss": 0.3412,
"step": 140
},
{
"epoch": 0.39,
"grad_norm": 1.2500354051589966,
"learning_rate": 3.383490663543635e-05,
"loss": 0.2906,
"step": 145
},
{
"epoch": 0.4,
"grad_norm": 0.8438131213188171,
"learning_rate": 3.2845159074474806e-05,
"loss": 0.3942,
"step": 150
},
{
"epoch": 0.41,
"grad_norm": 1.1667203903198242,
"learning_rate": 3.1841574751802076e-05,
"loss": 0.3231,
"step": 155
},
{
"epoch": 0.43,
"grad_norm": 1.511820912361145,
"learning_rate": 3.082592372166412e-05,
"loss": 0.3024,
"step": 160
},
{
"epoch": 0.44,
"grad_norm": 1.2225658893585205,
"learning_rate": 2.9799997320750507e-05,
"loss": 0.2341,
"step": 165
},
{
"epoch": 0.45,
"grad_norm": 1.8434339761734009,
"learning_rate": 2.87656050087558e-05,
"loss": 0.2748,
"step": 170
},
{
"epoch": 0.47,
"grad_norm": 1.3489751815795898,
"learning_rate": 2.7724571176976732e-05,
"loss": 0.334,
"step": 175
},
{
"epoch": 0.48,
"grad_norm": 1.535201907157898,
"learning_rate": 2.667873193057407e-05,
"loss": 0.2567,
"step": 180
},
{
"epoch": 0.49,
"grad_norm": 1.008681058883667,
"learning_rate": 2.562993185017431e-05,
"loss": 0.2683,
"step": 185
},
{
"epoch": 0.51,
"grad_norm": 1.9973254203796387,
"learning_rate": 2.4580020738523e-05,
"loss": 0.239,
"step": 190
},
{
"epoch": 0.52,
"grad_norm": 1.8730356693267822,
"learning_rate": 2.353085035792756e-05,
"loss": 0.3383,
"step": 195
},
{
"epoch": 0.53,
"grad_norm": 2.763739824295044,
"learning_rate": 2.2484271164243996e-05,
"loss": 0.2523,
"step": 200
},
{
"epoch": 0.55,
"grad_norm": 1.719117522239685,
"learning_rate": 2.1442129043167874e-05,
"loss": 0.3107,
"step": 205
},
{
"epoch": 0.56,
"grad_norm": 1.4132226705551147,
"learning_rate": 2.0406262054585738e-05,
"loss": 0.3588,
"step": 210
},
{
"epoch": 0.57,
"grad_norm": 1.4880174398422241,
"learning_rate": 1.937849719072931e-05,
"loss": 0.2298,
"step": 215
},
{
"epoch": 0.59,
"grad_norm": 1.2804921865463257,
"learning_rate": 1.836064715384989e-05,
"loss": 0.312,
"step": 220
},
{
"epoch": 0.6,
"grad_norm": 1.978888988494873,
"learning_rate": 1.7354507159096647e-05,
"loss": 0.2606,
"step": 225
},
{
"epoch": 0.61,
"grad_norm": 2.160645008087158,
"learning_rate": 1.6361851768237324e-05,
"loss": 0.2912,
"step": 230
},
{
"epoch": 0.63,
"grad_norm": 0.8796546459197998,
"learning_rate": 1.5384431759806083e-05,
"loss": 0.2589,
"step": 235
},
{
"epoch": 0.64,
"grad_norm": 1.5918033123016357,
"learning_rate": 1.4423971041198556e-05,
"loss": 0.3087,
"step": 240
},
{
"epoch": 0.65,
"grad_norm": 1.169569969177246,
"learning_rate": 1.3482163608160408e-05,
"loss": 0.2531,
"step": 245
},
{
"epoch": 0.67,
"grad_norm": 1.3483657836914062,
"learning_rate": 1.2560670557032108e-05,
"loss": 0.2408,
"step": 250
},
{
"epoch": 0.68,
"grad_norm": 1.515302300453186,
"learning_rate": 1.1661117155019293e-05,
"loss": 0.2372,
"step": 255
},
{
"epoch": 0.69,
"grad_norm": 1.2919508218765259,
"learning_rate": 1.0785089973656337e-05,
"loss": 0.3127,
"step": 260
},
{
"epoch": 0.71,
"grad_norm": 2.6820788383483887,
"learning_rate": 9.934134090518593e-06,
"loss": 0.3022,
"step": 265
},
{
"epoch": 0.72,
"grad_norm": 0.8853113055229187,
"learning_rate": 9.109750364118924e-06,
"loss": 0.2447,
"step": 270
},
{
"epoch": 0.73,
"grad_norm": 0.7025986909866333,
"learning_rate": 8.313392786794833e-06,
"loss": 0.26,
"step": 275
},
{
"epoch": 0.75,
"grad_norm": 1.3302998542785645,
"learning_rate": 7.546465920254974e-06,
"loss": 0.3202,
"step": 280
},
{
"epoch": 0.76,
"grad_norm": 2.354292392730713,
"learning_rate": 6.810322418308085e-06,
"loss": 0.2997,
"step": 285
},
{
"epoch": 0.77,
"grad_norm": 2.33747935295105,
"learning_rate": 6.106260641143546e-06,
"loss": 0.2958,
"step": 290
},
{
"epoch": 0.79,
"grad_norm": 1.9870978593826294,
"learning_rate": 5.435522365371376e-06,
"loss": 0.2598,
"step": 295
},
{
"epoch": 0.8,
"grad_norm": 0.8262547254562378,
"learning_rate": 4.799290593860525e-06,
"loss": 0.2752,
"step": 300
},
{
"epoch": 0.81,
"grad_norm": 1.7228032350540161,
"learning_rate": 4.198687469238297e-06,
"loss": 0.2258,
"step": 305
},
{
"epoch": 0.83,
"grad_norm": 1.789536952972412,
"learning_rate": 3.6347722947309843e-06,
"loss": 0.2819,
"step": 310
},
{
"epoch": 0.84,
"grad_norm": 2.036774158477783,
"learning_rate": 3.108539665836388e-06,
"loss": 0.3051,
"step": 315
},
{
"epoch": 0.85,
"grad_norm": 0.786209762096405,
"learning_rate": 2.6209177161234445e-06,
"loss": 0.3378,
"step": 320
},
{
"epoch": 0.87,
"grad_norm": 0.6244500875473022,
"learning_rate": 2.1727664802529216e-06,
"loss": 0.2508,
"step": 325
},
{
"epoch": 0.88,
"grad_norm": 1.9054006338119507,
"learning_rate": 1.7648763771063837e-06,
"loss": 0.2559,
"step": 330
},
{
"epoch": 0.89,
"grad_norm": 2.387683868408203,
"learning_rate": 1.3979668156987425e-06,
"loss": 0.2678,
"step": 335
},
{
"epoch": 0.91,
"grad_norm": 1.6146109104156494,
"learning_rate": 1.0726849263332256e-06,
"loss": 0.1965,
"step": 340
},
{
"epoch": 0.92,
"grad_norm": 3.5136632919311523,
"learning_rate": 7.896044192366586e-07,
"loss": 0.1523,
"step": 345
},
{
"epoch": 0.93,
"grad_norm": 1.1758712530136108,
"learning_rate": 5.492245726881201e-07,
"loss": 0.2632,
"step": 350
},
{
"epoch": 0.95,
"grad_norm": 1.7988349199295044,
"learning_rate": 3.51969352425624e-07,
"loss": 0.2846,
"step": 355
},
{
"epoch": 0.96,
"grad_norm": 2.6087231636047363,
"learning_rate": 1.981866638839952e-07,
"loss": 0.3034,
"step": 360
},
{
"epoch": 0.97,
"grad_norm": 2.0585777759552,
"learning_rate": 8.814773858275004e-08,
"loss": 0.2232,
"step": 365
},
{
"epoch": 0.99,
"grad_norm": 2.155041217803955,
"learning_rate": 2.2046655746280064e-08,
"loss": 0.2931,
"step": 370
},
{
"epoch": 1.0,
"grad_norm": 1.2778571844100952,
"learning_rate": 0.0,
"loss": 0.2683,
"step": 375
},
{
"epoch": 1.0,
"step": 375,
"total_flos": 6.39535360058327e+16,
"train_loss": 0.33599581162134806,
"train_runtime": 7953.7418,
"train_samples_per_second": 0.377,
"train_steps_per_second": 0.047
}
],
"logging_steps": 5,
"max_steps": 375,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"total_flos": 6.39535360058327e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}