qwen-14b-neurips-a100 / trainer_state.json
quyanh's picture
Upload folder using huggingface_hub
4f2d703
raw
history blame
2.83 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9656004828002414,
"eval_steps": 500,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 1.9325842696629215e-05,
"loss": 1.8646,
"step": 20
},
{
"epoch": 0.1,
"learning_rate": 1.8426966292134835e-05,
"loss": 1.9283,
"step": 40
},
{
"epoch": 0.14,
"learning_rate": 1.752808988764045e-05,
"loss": 1.9027,
"step": 60
},
{
"epoch": 0.19,
"learning_rate": 1.662921348314607e-05,
"loss": 1.8583,
"step": 80
},
{
"epoch": 0.24,
"learning_rate": 1.5730337078651687e-05,
"loss": 1.8618,
"step": 100
},
{
"epoch": 0.29,
"learning_rate": 1.4831460674157305e-05,
"loss": 1.8233,
"step": 120
},
{
"epoch": 0.34,
"learning_rate": 1.3932584269662923e-05,
"loss": 1.9046,
"step": 140
},
{
"epoch": 0.39,
"learning_rate": 1.303370786516854e-05,
"loss": 1.8505,
"step": 160
},
{
"epoch": 0.43,
"learning_rate": 1.213483146067416e-05,
"loss": 1.8608,
"step": 180
},
{
"epoch": 0.48,
"learning_rate": 1.1235955056179778e-05,
"loss": 1.7992,
"step": 200
},
{
"epoch": 0.53,
"learning_rate": 1.0337078651685396e-05,
"loss": 1.8869,
"step": 220
},
{
"epoch": 0.58,
"learning_rate": 9.438202247191012e-06,
"loss": 1.839,
"step": 240
},
{
"epoch": 0.63,
"learning_rate": 8.53932584269663e-06,
"loss": 1.8787,
"step": 260
},
{
"epoch": 0.68,
"learning_rate": 7.640449438202247e-06,
"loss": 1.8828,
"step": 280
},
{
"epoch": 0.72,
"learning_rate": 6.741573033707865e-06,
"loss": 1.8479,
"step": 300
},
{
"epoch": 0.77,
"learning_rate": 5.842696629213483e-06,
"loss": 1.9063,
"step": 320
},
{
"epoch": 0.82,
"learning_rate": 4.943820224719101e-06,
"loss": 1.8693,
"step": 340
},
{
"epoch": 0.87,
"learning_rate": 4.04494382022472e-06,
"loss": 1.827,
"step": 360
},
{
"epoch": 0.92,
"learning_rate": 3.146067415730337e-06,
"loss": 1.8259,
"step": 380
},
{
"epoch": 0.97,
"learning_rate": 2.2471910112359554e-06,
"loss": 1.8419,
"step": 400
}
],
"logging_steps": 20,
"max_steps": 450,
"num_train_epochs": 2,
"save_steps": 20,
"total_flos": 2.786099516669952e+17,
"trial_name": null,
"trial_params": null
}