|
{ |
|
"best_metric": 2.1534345149993896, |
|
"best_model_checkpoint": "gpt2-open-instruct-v1-Anthropic/hh-rlhf/checkpoint-30000", |
|
"epoch": 4.0, |
|
"global_step": 30000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.009867224080267559, |
|
"loss": 2.4868, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0097, |
|
"loss": 2.3663, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.009532775919732442, |
|
"loss": 2.3524, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.009365551839464883, |
|
"loss": 2.3374, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.009198327759197324, |
|
"loss": 2.3286, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.009031103678929767, |
|
"loss": 2.3082, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.008863879598662208, |
|
"loss": 2.3161, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00869665551839465, |
|
"loss": 2.308, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00852943143812709, |
|
"loss": 2.3175, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.008362541806020068, |
|
"loss": 2.3023, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.008195317725752507, |
|
"loss": 2.3155, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00802809364548495, |
|
"loss": 2.314, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.007860869565217391, |
|
"loss": 2.2981, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.007693645484949833, |
|
"loss": 2.3153, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.007526755852842809, |
|
"loss": 2.3108, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.1798622608184814, |
|
"eval_runtime": 12.4843, |
|
"eval_samples_per_second": 40.05, |
|
"eval_steps_per_second": 40.05, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.007359531772575251, |
|
"loss": 2.2738, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.007192307692307692, |
|
"loss": 2.2885, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.007025083612040134, |
|
"loss": 2.313, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.006858193979933111, |
|
"loss": 2.2854, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.006690969899665552, |
|
"loss": 2.2524, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.006523745819397994, |
|
"loss": 2.2767, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.006356521739130435, |
|
"loss": 2.2627, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.006189297658862877, |
|
"loss": 2.2779, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0060224080267558535, |
|
"loss": 2.2778, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.005855183946488294, |
|
"loss": 2.2588, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.005687959866220735, |
|
"loss": 2.296, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.005520735785953177, |
|
"loss": 2.2451, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.0053538461538461535, |
|
"loss": 2.2831, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.005186622073578596, |
|
"loss": 2.2801, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.005019397993311037, |
|
"loss": 2.265, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.1632304191589355, |
|
"eval_runtime": 12.092, |
|
"eval_samples_per_second": 41.35, |
|
"eval_steps_per_second": 41.35, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.004852173913043478, |
|
"loss": 2.2483, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 0.00468494983277592, |
|
"loss": 2.2928, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 0.004518060200668896, |
|
"loss": 2.2555, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.004350836120401338, |
|
"loss": 2.2761, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00418361204013378, |
|
"loss": 2.2515, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.004016387959866221, |
|
"loss": 2.2636, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 0.003849498327759197, |
|
"loss": 2.2569, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.003682274247491639, |
|
"loss": 2.2728, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 0.0035150501672240805, |
|
"loss": 2.2561, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 0.0033478260869565218, |
|
"loss": 2.2396, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.0031809364548494984, |
|
"loss": 2.2601, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00301371237458194, |
|
"loss": 2.2609, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.002846488294314381, |
|
"loss": 2.2654, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.0026792642140468226, |
|
"loss": 2.2381, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.0025123745819397992, |
|
"loss": 2.2507, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.156663179397583, |
|
"eval_runtime": 12.0987, |
|
"eval_samples_per_second": 41.327, |
|
"eval_steps_per_second": 41.327, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 0.002345150501672241, |
|
"loss": 2.2693, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.002177926421404682, |
|
"loss": 2.2605, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.002010702341137124, |
|
"loss": 2.2272, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.0018434782608695653, |
|
"loss": 2.2394, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 0.0016765886287625417, |
|
"loss": 2.231, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 0.0015093645484949834, |
|
"loss": 2.225, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 0.0013421404682274248, |
|
"loss": 2.2503, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.0011749163879598663, |
|
"loss": 2.2597, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.001008026755852843, |
|
"loss": 2.228, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 0.0008408026755852843, |
|
"loss": 2.2338, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 0.0006735785953177258, |
|
"loss": 2.2478, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.0005063545150501672, |
|
"loss": 2.2672, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.0003391304347826087, |
|
"loss": 2.2513, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.0001722408026755853, |
|
"loss": 2.2776, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 5.016722408026756e-06, |
|
"loss": 2.2519, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.1534345149993896, |
|
"eval_runtime": 11.8174, |
|
"eval_samples_per_second": 42.31, |
|
"eval_steps_per_second": 42.31, |
|
"step": 30000 |
|
} |
|
], |
|
"max_steps": 30000, |
|
"num_train_epochs": 4, |
|
"total_flos": 3.135504384e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|