llamafinetunined / trainer_state.json
greymatter-2024's picture
Upload 8 files
6c84043
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.04024954719259408,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 5.2467,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 3.0896,
"step": 2
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 4.3592,
"step": 3
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 6.2712,
"step": 4
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 4.2501,
"step": 5
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 3.0925,
"step": 6
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 4.5175,
"step": 7
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 3.9675,
"step": 8
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 4.4691,
"step": 9
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 5.3552,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 4.2402,
"step": 11
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 4.1104,
"step": 12
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 6.099,
"step": 13
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 4.1068,
"step": 14
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 4.0469,
"step": 15
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.9073,
"step": 16
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 4.8441,
"step": 17
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.8277,
"step": 18
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.7621,
"step": 19
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.6197,
"step": 20
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.2376,
"step": 21
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 4.3267,
"step": 22
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.2608,
"step": 23
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.0709,
"step": 24
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.9506,
"step": 25
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 4.192,
"step": 26
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.0657,
"step": 27
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.5416,
"step": 28
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.6437,
"step": 29
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.4752,
"step": 30
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 1.8193,
"step": 31
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.3356,
"step": 32
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.628,
"step": 33
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 3.773,
"step": 34
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.4319,
"step": 35
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.4583,
"step": 36
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 2.2557,
"step": 37
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.4415,
"step": 38
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.4824,
"step": 39
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.1092,
"step": 40
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.3055,
"step": 41
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.1746,
"step": 42
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.1699,
"step": 43
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.0121,
"step": 44
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.0935,
"step": 45
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 3.2258,
"step": 46
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.3181,
"step": 47
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 3.8383,
"step": 48
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.4102,
"step": 49
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 3.0456,
"step": 50
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 0.7232,
"step": 51
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 0.7194,
"step": 52
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.4388,
"step": 53
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 3.0284,
"step": 54
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 3.9142,
"step": 55
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.5082,
"step": 56
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.0731,
"step": 57
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.1933,
"step": 58
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.1817,
"step": 59
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.183,
"step": 60
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 2.2929,
"step": 61
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 1.5283,
"step": 62
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.1672,
"step": 63
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 1.7731,
"step": 64
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.0256,
"step": 65
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.0887,
"step": 66
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 1.9872,
"step": 67
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 1.5686,
"step": 68
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.0255,
"step": 69
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.3044,
"step": 70
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 3.9479,
"step": 71
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 1.9349,
"step": 72
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 1.8491,
"step": 73
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.104,
"step": 74
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.2356,
"step": 75
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 1.7054,
"step": 76
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.1982,
"step": 77
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.4007,
"step": 78
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.2993,
"step": 79
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.3089,
"step": 80
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.085,
"step": 81
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.0267,
"step": 82
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.1438,
"step": 83
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.2504,
"step": 84
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.9708,
"step": 85
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 2.2815,
"step": 86
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.1335,
"step": 87
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.0487,
"step": 88
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.0415,
"step": 89
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.9862,
"step": 90
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.2236,
"step": 91
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.1593,
"step": 92
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.1641,
"step": 93
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.5803,
"step": 94
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.2719,
"step": 95
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.3083,
"step": 96
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.5848,
"step": 97
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 2.4667,
"step": 98
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 3.2805,
"step": 99
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 5.3795,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 120,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"total_flos": 974777579077632.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}