|
{ |
|
"best_metric": 0.8677751385589866, |
|
"best_model_checkpoint": "/scratch/mrahma45/pixel/finetuned_models/mbert/mbert-base-finetuned-pos-ud-Tamil-TTB/checkpoint-2000", |
|
"epoch": 346.15384615384613, |
|
"global_step": 4500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 7.92e-05, |
|
"loss": 0.9585, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"learning_rate": 7.946845637583894e-05, |
|
"loss": 0.0575, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 23.08, |
|
"learning_rate": 7.893691275167786e-05, |
|
"loss": 0.0242, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 30.77, |
|
"learning_rate": 7.840000000000001e-05, |
|
"loss": 0.0156, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"learning_rate": 7.786308724832216e-05, |
|
"loss": 0.0149, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 38.46, |
|
"eval_accuracy": 0.8661916072842438, |
|
"eval_loss": 0.9775320887565613, |
|
"eval_runtime": 0.4044, |
|
"eval_samples_per_second": 197.82, |
|
"eval_steps_per_second": 24.727, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 46.15, |
|
"learning_rate": 7.73261744966443e-05, |
|
"loss": 0.0111, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 53.85, |
|
"learning_rate": 7.678926174496645e-05, |
|
"loss": 0.0117, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 61.54, |
|
"learning_rate": 7.62523489932886e-05, |
|
"loss": 0.0094, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 69.23, |
|
"learning_rate": 7.571543624161075e-05, |
|
"loss": 0.0111, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 76.92, |
|
"learning_rate": 7.51785234899329e-05, |
|
"loss": 0.0091, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 76.92, |
|
"eval_accuracy": 0.8638163103721298, |
|
"eval_loss": 0.9691746830940247, |
|
"eval_runtime": 0.4022, |
|
"eval_samples_per_second": 198.893, |
|
"eval_steps_per_second": 24.862, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 84.62, |
|
"learning_rate": 7.464161073825505e-05, |
|
"loss": 0.0079, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 92.31, |
|
"learning_rate": 7.410469798657718e-05, |
|
"loss": 0.0065, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 7.356778523489933e-05, |
|
"loss": 0.0043, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 107.69, |
|
"learning_rate": 7.303087248322148e-05, |
|
"loss": 0.0047, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 115.38, |
|
"learning_rate": 7.249395973154363e-05, |
|
"loss": 0.0055, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 115.38, |
|
"eval_accuracy": 0.8582739509105305, |
|
"eval_loss": 1.1557462215423584, |
|
"eval_runtime": 0.4044, |
|
"eval_samples_per_second": 197.81, |
|
"eval_steps_per_second": 24.726, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 123.08, |
|
"learning_rate": 7.195704697986577e-05, |
|
"loss": 0.0051, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 130.77, |
|
"learning_rate": 7.142550335570471e-05, |
|
"loss": 0.0055, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 138.46, |
|
"learning_rate": 7.088859060402686e-05, |
|
"loss": 0.0056, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 146.15, |
|
"learning_rate": 7.0351677852349e-05, |
|
"loss": 0.0055, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 153.85, |
|
"learning_rate": 6.981476510067114e-05, |
|
"loss": 0.0061, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 153.85, |
|
"eval_accuracy": 0.8677751385589866, |
|
"eval_loss": 1.1852538585662842, |
|
"eval_runtime": 0.4041, |
|
"eval_samples_per_second": 197.965, |
|
"eval_steps_per_second": 24.746, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 161.54, |
|
"learning_rate": 6.927785234899329e-05, |
|
"loss": 0.0069, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 169.23, |
|
"learning_rate": 6.874093959731543e-05, |
|
"loss": 0.0055, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 176.92, |
|
"learning_rate": 6.820402684563758e-05, |
|
"loss": 0.0043, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 184.62, |
|
"learning_rate": 6.766711409395973e-05, |
|
"loss": 0.0043, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 192.31, |
|
"learning_rate": 6.713020134228188e-05, |
|
"loss": 0.0041, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 192.31, |
|
"eval_accuracy": 0.8622327790973872, |
|
"eval_loss": 1.1900646686553955, |
|
"eval_runtime": 0.4087, |
|
"eval_samples_per_second": 195.729, |
|
"eval_steps_per_second": 24.466, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"learning_rate": 6.659328859060403e-05, |
|
"loss": 0.0029, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 207.69, |
|
"learning_rate": 6.605637583892618e-05, |
|
"loss": 0.0021, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 215.38, |
|
"learning_rate": 6.551946308724832e-05, |
|
"loss": 0.0047, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 223.08, |
|
"learning_rate": 6.498255033557047e-05, |
|
"loss": 0.0058, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 230.77, |
|
"learning_rate": 6.444563758389262e-05, |
|
"loss": 0.0045, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 230.77, |
|
"eval_accuracy": 0.8566904196357878, |
|
"eval_loss": 1.263396978378296, |
|
"eval_runtime": 0.4055, |
|
"eval_samples_per_second": 197.27, |
|
"eval_steps_per_second": 24.659, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 238.46, |
|
"learning_rate": 6.390872483221477e-05, |
|
"loss": 0.0043, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 246.15, |
|
"learning_rate": 6.337181208053692e-05, |
|
"loss": 0.0028, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 253.85, |
|
"learning_rate": 6.283489932885907e-05, |
|
"loss": 0.0025, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 261.54, |
|
"learning_rate": 6.229798657718121e-05, |
|
"loss": 0.0055, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 269.23, |
|
"learning_rate": 6.176107382550336e-05, |
|
"loss": 0.0031, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 269.23, |
|
"eval_accuracy": 0.8574821852731591, |
|
"eval_loss": 1.2649881839752197, |
|
"eval_runtime": 0.4055, |
|
"eval_samples_per_second": 197.278, |
|
"eval_steps_per_second": 24.66, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 276.92, |
|
"learning_rate": 6.122416107382551e-05, |
|
"loss": 0.0057, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 284.62, |
|
"learning_rate": 6.068724832214766e-05, |
|
"loss": 0.0036, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 292.31, |
|
"learning_rate": 6.015033557046981e-05, |
|
"loss": 0.0017, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 300.0, |
|
"learning_rate": 5.9613422818791955e-05, |
|
"loss": 0.003, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 307.69, |
|
"learning_rate": 5.90765100671141e-05, |
|
"loss": 0.0052, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 307.69, |
|
"eval_accuracy": 0.8661916072842438, |
|
"eval_loss": 1.126146674156189, |
|
"eval_runtime": 0.4039, |
|
"eval_samples_per_second": 198.092, |
|
"eval_steps_per_second": 24.761, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 315.38, |
|
"learning_rate": 5.853959731543625e-05, |
|
"loss": 0.0029, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 323.08, |
|
"learning_rate": 5.80026845637584e-05, |
|
"loss": 0.0048, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 330.77, |
|
"learning_rate": 5.7465771812080534e-05, |
|
"loss": 0.0037, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 338.46, |
|
"learning_rate": 5.692885906040268e-05, |
|
"loss": 0.0018, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 346.15, |
|
"learning_rate": 5.6397315436241616e-05, |
|
"loss": 0.0021, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 346.15, |
|
"eval_accuracy": 0.8630245447347585, |
|
"eval_loss": 1.288573980331421, |
|
"eval_runtime": 0.4065, |
|
"eval_samples_per_second": 196.825, |
|
"eval_steps_per_second": 24.603, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 346.15, |
|
"step": 4500, |
|
"total_flos": 1.809255033426739e+16, |
|
"train_loss": 0.028387814574771457, |
|
"train_runtime": 1178.9995, |
|
"train_samples_per_second": 407.125, |
|
"train_steps_per_second": 12.723 |
|
} |
|
], |
|
"max_steps": 15000, |
|
"num_train_epochs": 1154, |
|
"total_flos": 1.809255033426739e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|