|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 144, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 2.065, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 2.0734, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.2e-05, |
|
"loss": 2.0027, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 1.8999, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 2e-05, |
|
"loss": 2.0094, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.999744599547812e-05, |
|
"loss": 2.0019, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9989785286500294e-05, |
|
"loss": 1.979, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.99770217861636e-05, |
|
"loss": 1.9602, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9959162014075553e-05, |
|
"loss": 1.886, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9936215093023884e-05, |
|
"loss": 1.9153, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.990819274431662e-05, |
|
"loss": 1.9168, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9875109281794828e-05, |
|
"loss": 1.899, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9836981604521077e-05, |
|
"loss": 1.884, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9793829188147406e-05, |
|
"loss": 1.8718, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.974567407496712e-05, |
|
"loss": 1.8484, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9692540862655587e-05, |
|
"loss": 1.8129, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9634456691705705e-05, |
|
"loss": 1.8475, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9571451231564523e-05, |
|
"loss": 1.8583, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9503556665478066e-05, |
|
"loss": 1.8149, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9430807674052092e-05, |
|
"loss": 1.8637, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9353241417537216e-05, |
|
"loss": 1.8277, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9270897516847406e-05, |
|
"loss": 1.8263, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9183818033321612e-05, |
|
"loss": 1.8175, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9092047447238775e-05, |
|
"loss": 1.8434, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.899563263509725e-05, |
|
"loss": 1.8388, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.8894622845670282e-05, |
|
"loss": 1.8461, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.878906967484966e-05, |
|
"loss": 1.7963, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.86790270392905e-05, |
|
"loss": 1.7976, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.856455114887056e-05, |
|
"loss": 1.805, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.8445700477978207e-05, |
|
"loss": 1.8068, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.8322535735643604e-05, |
|
"loss": 1.793, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.8195119834528535e-05, |
|
"loss": 1.8285, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.8063517858790517e-05, |
|
"loss": 1.8104, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.792779703083777e-05, |
|
"loss": 1.8032, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.778802667699196e-05, |
|
"loss": 1.7777, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.764427819207624e-05, |
|
"loss": 1.7989, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.7496625002946702e-05, |
|
"loss": 1.7938, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.734514253098589e-05, |
|
"loss": 1.8199, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.7189908153577473e-05, |
|
"loss": 1.7558, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.7031001164581828e-05, |
|
"loss": 1.763, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.6868502733832647e-05, |
|
"loss": 1.8022, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.670249586567531e-05, |
|
"loss": 1.7876, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.6533065356568206e-05, |
|
"loss": 1.7981, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.636029775176862e-05, |
|
"loss": 1.7688, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.618428130112533e-05, |
|
"loss": 1.7708, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.6005105914000508e-05, |
|
"loss": 1.7715, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.5822863113343934e-05, |
|
"loss": 1.7705, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.5637645988943008e-05, |
|
"loss": 1.7814, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.544954914987238e-05, |
|
"loss": 1.7909, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.5258668676167548e-05, |
|
"loss": 1.7873, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.5065102069747117e-05, |
|
"loss": 1.7985, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.48689482046087e-05, |
|
"loss": 1.7832, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.467030727632401e-05, |
|
"loss": 1.7282, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.4469280750858854e-05, |
|
"loss": 1.7772, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.4265971312744252e-05, |
|
"loss": 1.8001, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.4060482812625055e-05, |
|
"loss": 1.8, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.3852920214212966e-05, |
|
"loss": 1.7753, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.3643389540670963e-05, |
|
"loss": 1.7986, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.3431997820456592e-05, |
|
"loss": 1.7699, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.3218853032651719e-05, |
|
"loss": 1.7632, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.3004064051806712e-05, |
|
"loss": 1.7823, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.2787740592327232e-05, |
|
"loss": 1.7726, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.2569993152432028e-05, |
|
"loss": 1.7821, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.2350932957710322e-05, |
|
"loss": 1.7243, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.2130671904307692e-05, |
|
"loss": 1.7454, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.1909322501769407e-05, |
|
"loss": 1.779, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.1686997815570473e-05, |
|
"loss": 1.7664, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.1463811409361667e-05, |
|
"loss": 1.7372, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.1239877286961123e-05, |
|
"loss": 1.7553, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.1015309834121083e-05, |
|
"loss": 1.7569, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.079022376009955e-05, |
|
"loss": 1.6978, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.05647340390667e-05, |
|
"loss": 1.7539, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.0338955851375962e-05, |
|
"loss": 1.7037, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.01130045247298e-05, |
|
"loss": 1.7486, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.886995475270205e-06, |
|
"loss": 1.7398, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.661044148624038e-06, |
|
"loss": 1.7263, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.435265960933304e-06, |
|
"loss": 1.6939, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.209776239900453e-06, |
|
"loss": 1.7249, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.98469016587892e-06, |
|
"loss": 1.704, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.76012271303888e-06, |
|
"loss": 1.7639, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 8.536188590638334e-06, |
|
"loss": 1.7387, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.313002184429529e-06, |
|
"loss": 1.7466, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 8.090677498230598e-06, |
|
"loss": 1.7444, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.869328095692313e-06, |
|
"loss": 1.77, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.649067042289681e-06, |
|
"loss": 1.7414, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.430006847567972e-06, |
|
"loss": 1.7505, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.2122594076727705e-06, |
|
"loss": 1.7738, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 6.995935948193294e-06, |
|
"loss": 1.754, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.781146967348283e-06, |
|
"loss": 1.6997, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.568002179543409e-06, |
|
"loss": 1.7367, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.356610459329038e-06, |
|
"loss": 1.7594, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 6.147079785787038e-06, |
|
"loss": 1.7171, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.93951718737495e-06, |
|
"loss": 1.7117, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.7340286872557515e-06, |
|
"loss": 1.7322, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 5.530719249141148e-06, |
|
"loss": 1.7317, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.329692723675994e-06, |
|
"loss": 1.6866, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.131051795391302e-06, |
|
"loss": 1.7128, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.934897930252887e-06, |
|
"loss": 1.7262, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.7413313238324556e-06, |
|
"loss": 1.7017, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.550450850127626e-06, |
|
"loss": 1.6793, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.3623540110569935e-06, |
|
"loss": 1.7353, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.177136886656067e-06, |
|
"loss": 1.7116, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.9948940859994964e-06, |
|
"loss": 1.73, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.815718698874672e-06, |
|
"loss": 1.6757, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.6397022482313804e-06, |
|
"loss": 1.7118, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.466934643431795e-06, |
|
"loss": 1.6858, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.2975041343246937e-06, |
|
"loss": 1.7531, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.1314972661673572e-06, |
|
"loss": 1.7156, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.9689988354181742e-06, |
|
"loss": 1.7362, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.8100918464225304e-06, |
|
"loss": 1.7195, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 2.654857469014113e-06, |
|
"loss": 1.6935, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.5033749970533015e-06, |
|
"loss": 1.707, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 2.3557218079237608e-06, |
|
"loss": 1.7413, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 2.211973323008041e-06, |
|
"loss": 1.6888, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.072202969162234e-06, |
|
"loss": 1.7327, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.936482141209486e-06, |
|
"loss": 1.7122, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.8048801654714687e-06, |
|
"loss": 1.7263, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.6774642643563955e-06, |
|
"loss": 1.6941, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.5542995220217961e-06, |
|
"loss": 1.6844, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.4354488511294418e-06, |
|
"loss": 1.7062, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.3209729607095022e-06, |
|
"loss": 1.7114, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.2109303251503434e-06, |
|
"loss": 1.6995, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.1053771543297198e-06, |
|
"loss": 1.7501, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.0043673649027519e-06, |
|
"loss": 1.7375, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 9.079525527612321e-07, |
|
"loss": 1.7252, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 8.161819666783888e-07, |
|
"loss": 1.6858, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 7.291024831525961e-07, |
|
"loss": 1.7107, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 6.467585824627886e-07, |
|
"loss": 1.6991, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 5.691923259479093e-07, |
|
"loss": 1.7223, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.964433345219354e-07, |
|
"loss": 1.691, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.285487684354772e-07, |
|
"loss": 1.7522, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.6554330829429716e-07, |
|
"loss": 1.7165, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.0745913734441357e-07, |
|
"loss": 1.7106, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.5432592503288e-07, |
|
"loss": 1.7197, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 2.0617081185259512e-07, |
|
"loss": 1.6987, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.630183954789233e-07, |
|
"loss": 1.6969, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 1.2489071820517394e-07, |
|
"loss": 1.6961, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 9.180725568338045e-08, |
|
"loss": 1.6844, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.378490697611761e-08, |
|
"loss": 1.6977, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.083798592444899e-08, |
|
"loss": 1.6904, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.2978213836400974e-08, |
|
"loss": 1.6904, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.0214713499706596e-08, |
|
"loss": 1.7426, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.5540045218819256e-09, |
|
"loss": 1.7422, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.7323, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 144, |
|
"total_flos": 0.0, |
|
"train_loss": 1.770974467198054, |
|
"train_runtime": 4572.546, |
|
"train_samples_per_second": 3.535, |
|
"train_steps_per_second": 0.031 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 144, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|