|
{ |
|
"best_metric": 0.34317871928215027, |
|
"best_model_checkpoint": "../../saves/LLaMA3-70B-qlora-bnb/lora/sft/A61K/checkpoint-500", |
|
"epoch": 2.9925925925925925, |
|
"eval_steps": 100, |
|
"global_step": 606, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04938271604938271, |
|
"grad_norm": 15.14680004119873, |
|
"learning_rate": 9e-07, |
|
"loss": 12.2914, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09876543209876543, |
|
"grad_norm": 16.71466636657715, |
|
"learning_rate": 3.6e-06, |
|
"loss": 12.285, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.3e-06, |
|
"loss": 12.0986, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19753086419753085, |
|
"grad_norm": 15.837638854980469, |
|
"learning_rate": 9.3e-06, |
|
"loss": 11.846, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.24691358024691357, |
|
"grad_norm": 19.359682083129883, |
|
"learning_rate": 1.2299999999999999e-05, |
|
"loss": 10.9695, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.2962962962962963, |
|
"grad_norm": 33.46400451660156, |
|
"learning_rate": 1.53e-05, |
|
"loss": 9.6625, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.345679012345679, |
|
"grad_norm": 18.000560760498047, |
|
"learning_rate": 1.83e-05, |
|
"loss": 7.6763, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.3950617283950617, |
|
"grad_norm": 23.49736976623535, |
|
"learning_rate": 2.13e-05, |
|
"loss": 5.599, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 24.056060791015625, |
|
"learning_rate": 2.43e-05, |
|
"loss": 2.2361, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.49382716049382713, |
|
"grad_norm": 9.21530532836914, |
|
"learning_rate": 2.7300000000000003e-05, |
|
"loss": 0.5658, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.49382716049382713, |
|
"eval_loss": 0.43361854553222656, |
|
"eval_runtime": 640.3211, |
|
"eval_samples_per_second": 0.281, |
|
"eval_steps_per_second": 0.281, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5432098765432098, |
|
"grad_norm": 11.147759437561035, |
|
"learning_rate": 2.9999710893018065e-05, |
|
"loss": 0.3497, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 8.447771072387695, |
|
"learning_rate": 2.9965031537858088e-05, |
|
"loss": 0.3664, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6419753086419753, |
|
"grad_norm": 5.72174596786499, |
|
"learning_rate": 2.9872683924428126e-05, |
|
"loss": 0.3909, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.691358024691358, |
|
"grad_norm": 8.332149505615234, |
|
"learning_rate": 2.9723023917398982e-05, |
|
"loss": 0.391, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7407407407407407, |
|
"grad_norm": 6.015285491943359, |
|
"learning_rate": 2.9516628236700468e-05, |
|
"loss": 0.3285, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.7901234567901234, |
|
"grad_norm": 8.486276626586914, |
|
"learning_rate": 2.9254292235111508e-05, |
|
"loss": 0.3513, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8395061728395061, |
|
"grad_norm": 19.51106834411621, |
|
"learning_rate": 2.8937026833341146e-05, |
|
"loss": 0.382, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 9.629849433898926, |
|
"learning_rate": 2.85660546244112e-05, |
|
"loss": 0.3862, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9382716049382716, |
|
"grad_norm": 6.940722942352295, |
|
"learning_rate": 2.814280516235244e-05, |
|
"loss": 0.4923, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.9876543209876543, |
|
"grad_norm": 7.83641242980957, |
|
"learning_rate": 2.7668909453369486e-05, |
|
"loss": 0.361, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.9876543209876543, |
|
"eval_loss": 0.3438129127025604, |
|
"eval_runtime": 641.0521, |
|
"eval_samples_per_second": 0.281, |
|
"eval_steps_per_second": 0.281, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.037037037037037, |
|
"grad_norm": 8.031428337097168, |
|
"learning_rate": 2.7146193670702906e-05, |
|
"loss": 0.3862, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.0864197530864197, |
|
"grad_norm": 4.103025436401367, |
|
"learning_rate": 2.6576672117408486e-05, |
|
"loss": 0.3276, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.1358024691358024, |
|
"grad_norm": 6.102336883544922, |
|
"learning_rate": 2.5962539464171864e-05, |
|
"loss": 0.3845, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.1851851851851851, |
|
"grad_norm": 11.080580711364746, |
|
"learning_rate": 2.530616229207034e-05, |
|
"loss": 0.3431, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.2345679012345678, |
|
"grad_norm": 7.723226547241211, |
|
"learning_rate": 2.4610069972872206e-05, |
|
"loss": 0.3262, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.2839506172839505, |
|
"grad_norm": 5.732240200042725, |
|
"learning_rate": 2.3876944922016517e-05, |
|
"loss": 0.3465, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 6.469529151916504, |
|
"learning_rate": 2.3109612261833967e-05, |
|
"loss": 0.3217, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.382716049382716, |
|
"grad_norm": 3.8795225620269775, |
|
"learning_rate": 2.2311028934841804e-05, |
|
"loss": 0.3276, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.4320987654320987, |
|
"grad_norm": 4.8721466064453125, |
|
"learning_rate": 2.1484272309065257e-05, |
|
"loss": 0.3561, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.4814814814814814, |
|
"grad_norm": 6.701916217803955, |
|
"learning_rate": 2.0632528319295113e-05, |
|
"loss": 0.3343, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.4814814814814814, |
|
"eval_loss": 0.3502769470214844, |
|
"eval_runtime": 641.2195, |
|
"eval_samples_per_second": 0.281, |
|
"eval_steps_per_second": 0.281, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.5308641975308643, |
|
"grad_norm": 4.695794582366943, |
|
"learning_rate": 1.9759079189979598e-05, |
|
"loss": 0.3471, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.5802469135802468, |
|
"grad_norm": 4.3271870613098145, |
|
"learning_rate": 1.8867290787060762e-05, |
|
"loss": 0.3365, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.6296296296296298, |
|
"grad_norm": 4.225152969360352, |
|
"learning_rate": 1.7960599647495552e-05, |
|
"loss": 0.3332, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.6790123456790123, |
|
"grad_norm": 5.201157093048096, |
|
"learning_rate": 1.713472257409928e-05, |
|
"loss": 0.3339, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.7283950617283952, |
|
"grad_norm": 5.604823589324951, |
|
"learning_rate": 1.6209378726328168e-05, |
|
"loss": 0.3225, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": 4.6840715408325195, |
|
"learning_rate": 1.5279374496481708e-05, |
|
"loss": 0.3359, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.8271604938271606, |
|
"grad_norm": 7.939080715179443, |
|
"learning_rate": 1.434829368750633e-05, |
|
"loss": 0.3253, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.876543209876543, |
|
"grad_norm": 9.724854469299316, |
|
"learning_rate": 1.3419724250982795e-05, |
|
"loss": 0.3561, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.925925925925926, |
|
"grad_norm": 5.289661407470703, |
|
"learning_rate": 1.2497244460832644e-05, |
|
"loss": 0.3276, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"grad_norm": 5.043463230133057, |
|
"learning_rate": 1.1584409124317906e-05, |
|
"loss": 0.3275, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"eval_loss": 0.3461529314517975, |
|
"eval_runtime": 641.2613, |
|
"eval_samples_per_second": 0.281, |
|
"eval_steps_per_second": 0.281, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.0246913580246915, |
|
"grad_norm": 7.275540828704834, |
|
"learning_rate": 1.0684735883470333e-05, |
|
"loss": 0.3025, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.074074074074074, |
|
"grad_norm": 9.59693717956543, |
|
"learning_rate": 9.80169165973814e-06, |
|
"loss": 0.2947, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.123456790123457, |
|
"grad_norm": 11.817838668823242, |
|
"learning_rate": 8.938679294086226e-06, |
|
"loss": 0.2596, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.1728395061728394, |
|
"grad_norm": 5.468794822692871, |
|
"learning_rate": 8.099024434032719e-06, |
|
"loss": 0.3097, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 8.117734909057617, |
|
"learning_rate": 7.285962718153099e-06, |
|
"loss": 0.306, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.271604938271605, |
|
"grad_norm": 5.816110134124756, |
|
"learning_rate": 6.5026273074368575e-06, |
|
"loss": 0.313, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.3209876543209877, |
|
"grad_norm": 7.187597751617432, |
|
"learning_rate": 5.752036811544973e-06, |
|
"loss": 0.3078, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.3703703703703702, |
|
"grad_norm": 17.781335830688477, |
|
"learning_rate": 5.03708365649491e-06, |
|
"loss": 0.3034, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.419753086419753, |
|
"grad_norm": 6.449460506439209, |
|
"learning_rate": 4.3605229385984915e-06, |
|
"loss": 0.3052, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.4691358024691357, |
|
"grad_norm": 8.452071189880371, |
|
"learning_rate": 3.7249618076045316e-06, |
|
"loss": 0.2916, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.4691358024691357, |
|
"eval_loss": 0.34317871928215027, |
|
"eval_runtime": 641.0511, |
|
"eval_samples_per_second": 0.281, |
|
"eval_steps_per_second": 0.281, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.5185185185185186, |
|
"grad_norm": 7.553008079528809, |
|
"learning_rate": 3.1328494199585704e-06, |
|
"loss": 0.2849, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.567901234567901, |
|
"grad_norm": 7.099052906036377, |
|
"learning_rate": 2.5864675008951393e-06, |
|
"loss": 0.2827, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.617283950617284, |
|
"grad_norm": 11.348811149597168, |
|
"learning_rate": 2.0879215517319634e-06, |
|
"loss": 0.3025, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 11.900992393493652, |
|
"learning_rate": 1.639132736248945e-06, |
|
"loss": 0.2994, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.7160493827160495, |
|
"grad_norm": 8.492768287658691, |
|
"learning_rate": 1.2418304774182077e-06, |
|
"loss": 0.3024, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.765432098765432, |
|
"grad_norm": 4.38970422744751, |
|
"learning_rate": 8.975457930137787e-07, |
|
"loss": 0.3267, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.814814814814815, |
|
"grad_norm": 5.739566326141357, |
|
"learning_rate": 6.076053957825411e-07, |
|
"loss": 0.2997, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.8641975308641974, |
|
"grad_norm": 7.524597644805908, |
|
"learning_rate": 3.7312658091147976e-07, |
|
"loss": 0.2949, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.9135802469135803, |
|
"grad_norm": 5.539340019226074, |
|
"learning_rate": 1.9501292049268593e-07, |
|
"loss": 0.282, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.962962962962963, |
|
"grad_norm": 7.615926742553711, |
|
"learning_rate": 7.395078157753921e-08, |
|
"loss": 0.291, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.962962962962963, |
|
"eval_loss": 0.34410154819488525, |
|
"eval_runtime": 641.306, |
|
"eval_samples_per_second": 0.281, |
|
"eval_steps_per_second": 0.281, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.9925925925925925, |
|
"step": 606, |
|
"total_flos": 1.4871334589840622e+19, |
|
"train_loss": 1.6824197243935992, |
|
"train_runtime": 45546.3315, |
|
"train_samples_per_second": 0.107, |
|
"train_steps_per_second": 0.013 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 606, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.4871334589840622e+19, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|