File size: 2,973 Bytes
1dc1e8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 6411,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"grad_norm": 6479046246400.0,
"learning_rate": 3.3255451713395644e-05,
"loss": 31699221108.3091,
"step": 427
},
{
"epoch": 0.4,
"grad_norm": 8700652158976.0,
"learning_rate": 4.8162593170393484e-05,
"loss": 14378762930.6604,
"step": 854
},
{
"epoch": 0.6,
"grad_norm": 16.28143310546875,
"learning_rate": 4.446177847113885e-05,
"loss": 3635725784.4309,
"step": 1281
},
{
"epoch": 0.8,
"grad_norm": 6.706409454345703,
"learning_rate": 4.076096377188421e-05,
"loss": 1017393142.4075,
"step": 1708
},
{
"epoch": 1.0,
"grad_norm": 9.295268058776855,
"learning_rate": 3.7060149072629574e-05,
"loss": 1230655574.3326,
"step": 2135
},
{
"epoch": 1.2,
"grad_norm": 7.9164958000183105,
"learning_rate": 3.335933437337493e-05,
"loss": 387735187.4848,
"step": 2562
},
{
"epoch": 1.4,
"grad_norm": 18.9678897857666,
"learning_rate": 2.96585196741203e-05,
"loss": 610998305.5738,
"step": 2989
},
{
"epoch": 1.6,
"grad_norm": 22.521636962890625,
"learning_rate": 2.595770497486566e-05,
"loss": 286763645.9016,
"step": 3416
},
{
"epoch": 1.8,
"grad_norm": 6.2225751876831055,
"learning_rate": 2.2256890275611026e-05,
"loss": 146726173.377,
"step": 3843
},
{
"epoch": 2.0,
"grad_norm": 4.1397271156311035,
"learning_rate": 1.8556075576356388e-05,
"loss": 153057112.1311,
"step": 4270
},
{
"epoch": 2.2,
"grad_norm": 20.3540096282959,
"learning_rate": 1.4855260877101752e-05,
"loss": 167597749.0585,
"step": 4697
},
{
"epoch": 2.4,
"grad_norm": 2.9089903831481934,
"learning_rate": 1.1154446177847114e-05,
"loss": 31491796.2342,
"step": 5124
},
{
"epoch": 2.6,
"grad_norm": 11.952803611755371,
"learning_rate": 7.453631478592478e-06,
"loss": 228096436.459,
"step": 5551
},
{
"epoch": 2.8,
"grad_norm": 8.380680084228516,
"learning_rate": 3.7528167793378402e-06,
"loss": 170669039.2131,
"step": 5978
},
{
"epoch": 3.0,
"grad_norm": 11.055083274841309,
"learning_rate": 5.2002080083203335e-08,
"loss": 77525903.2881,
"step": 6405
}
],
"logging_steps": 427,
"max_steps": 6411,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 1.089435891661996e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|