Abinesh's picture
Upload folder using huggingface_hub
fd78e97
raw
history blame
3.36 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1156136192843517,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4e-05,
"loss": 1.0185,
"step": 20
},
{
"epoch": 0.01,
"learning_rate": 8e-05,
"loss": 0.8649,
"step": 40
},
{
"epoch": 0.01,
"learning_rate": 0.00012,
"loss": 0.7784,
"step": 60
},
{
"epoch": 0.02,
"learning_rate": 0.00016,
"loss": 0.7386,
"step": 80
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 0.7037,
"step": 100
},
{
"epoch": 0.03,
"learning_rate": 0.0001990530303030303,
"loss": 0.7019,
"step": 120
},
{
"epoch": 0.03,
"learning_rate": 0.0001981060606060606,
"loss": 0.7117,
"step": 140
},
{
"epoch": 0.04,
"learning_rate": 0.00019715909090909094,
"loss": 0.672,
"step": 160
},
{
"epoch": 0.04,
"learning_rate": 0.00019621212121212123,
"loss": 0.664,
"step": 180
},
{
"epoch": 0.05,
"learning_rate": 0.00019526515151515152,
"loss": 0.6666,
"step": 200
},
{
"epoch": 0.05,
"learning_rate": 0.0001943181818181818,
"loss": 0.6685,
"step": 220
},
{
"epoch": 0.06,
"learning_rate": 0.00019337121212121213,
"loss": 0.6788,
"step": 240
},
{
"epoch": 0.06,
"learning_rate": 0.00019242424242424245,
"loss": 0.6673,
"step": 260
},
{
"epoch": 0.06,
"learning_rate": 0.00019147727272727274,
"loss": 0.6628,
"step": 280
},
{
"epoch": 0.07,
"learning_rate": 0.00019053030303030303,
"loss": 0.6643,
"step": 300
},
{
"epoch": 0.07,
"learning_rate": 0.00018958333333333332,
"loss": 0.6607,
"step": 320
},
{
"epoch": 0.08,
"learning_rate": 0.00018863636363636364,
"loss": 0.6706,
"step": 340
},
{
"epoch": 0.08,
"learning_rate": 0.00018768939393939396,
"loss": 0.6709,
"step": 360
},
{
"epoch": 0.09,
"learning_rate": 0.00018674242424242425,
"loss": 0.6616,
"step": 380
},
{
"epoch": 0.09,
"learning_rate": 0.00018579545454545454,
"loss": 0.6566,
"step": 400
},
{
"epoch": 0.1,
"learning_rate": 0.00018484848484848484,
"loss": 0.6513,
"step": 420
},
{
"epoch": 0.1,
"learning_rate": 0.00018390151515151518,
"loss": 0.6797,
"step": 440
},
{
"epoch": 0.11,
"learning_rate": 0.00018295454545454547,
"loss": 0.6599,
"step": 460
},
{
"epoch": 0.11,
"learning_rate": 0.00018200757575757577,
"loss": 0.6561,
"step": 480
},
{
"epoch": 0.12,
"learning_rate": 0.00018106060606060606,
"loss": 0.662,
"step": 500
}
],
"logging_steps": 20,
"max_steps": 4324,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 7.92890273426473e+16,
"trial_name": null,
"trial_params": null
}