full-train-openai / trainer_state.json
anthonymeo's picture
Upload folder using huggingface_hub
b005065 verified
raw
history blame contribute delete
No virus
10.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 100,
"global_step": 486,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06172839506172839,
"grad_norm": 10.125808206289982,
"learning_rate": 2.0408163265306125e-06,
"loss": 1.0772,
"step": 10
},
{
"epoch": 0.12345679012345678,
"grad_norm": 2.5386203860784136,
"learning_rate": 4.081632653061225e-06,
"loss": 0.5972,
"step": 20
},
{
"epoch": 0.18518518518518517,
"grad_norm": 2.0806228443763923,
"learning_rate": 6.122448979591837e-06,
"loss": 0.3747,
"step": 30
},
{
"epoch": 0.24691358024691357,
"grad_norm": 1.5238040517024116,
"learning_rate": 8.16326530612245e-06,
"loss": 0.3125,
"step": 40
},
{
"epoch": 0.30864197530864196,
"grad_norm": 1.0455816905588915,
"learning_rate": 9.999870796282452e-06,
"loss": 0.3415,
"step": 50
},
{
"epoch": 0.37037037037037035,
"grad_norm": 1.4397690389950966,
"learning_rate": 9.984374428250894e-06,
"loss": 0.3261,
"step": 60
},
{
"epoch": 0.43209876543209874,
"grad_norm": 1.4784124049840264,
"learning_rate": 9.943129053516176e-06,
"loss": 0.2905,
"step": 70
},
{
"epoch": 0.49382716049382713,
"grad_norm": 1.4073358258964512,
"learning_rate": 9.876347743436758e-06,
"loss": 0.3551,
"step": 80
},
{
"epoch": 0.5555555555555556,
"grad_norm": 1.3358444097895485,
"learning_rate": 9.78437548662167e-06,
"loss": 0.3212,
"step": 90
},
{
"epoch": 0.6172839506172839,
"grad_norm": 1.196559054025763,
"learning_rate": 9.66768740673815e-06,
"loss": 0.3022,
"step": 100
},
{
"epoch": 0.6172839506172839,
"eval_loss": 0.28118011355400085,
"eval_runtime": 33.8848,
"eval_samples_per_second": 16.999,
"eval_steps_per_second": 8.499,
"step": 100
},
{
"epoch": 0.6790123456790124,
"grad_norm": 1.4169927804373623,
"learning_rate": 9.52688630804867e-06,
"loss": 0.3117,
"step": 110
},
{
"epoch": 0.7407407407407407,
"grad_norm": 1.2415828541125944,
"learning_rate": 9.362699561356957e-06,
"loss": 0.3195,
"step": 120
},
{
"epoch": 0.8024691358024691,
"grad_norm": 1.2836969169427264,
"learning_rate": 9.175975346450063e-06,
"loss": 0.333,
"step": 130
},
{
"epoch": 0.8641975308641975,
"grad_norm": 1.2566606887109202,
"learning_rate": 8.9676782704478e-06,
"loss": 0.2938,
"step": 140
},
{
"epoch": 0.9259259259259259,
"grad_norm": 1.3623865418664496,
"learning_rate": 8.738884384694905e-06,
"loss": 0.2986,
"step": 150
},
{
"epoch": 0.9876543209876543,
"grad_norm": 1.4233923905829797,
"learning_rate": 8.490775625938452e-06,
"loss": 0.2574,
"step": 160
},
{
"epoch": 1.0493827160493827,
"grad_norm": 1.190370194761653,
"learning_rate": 8.224633710506997e-06,
"loss": 0.1849,
"step": 170
},
{
"epoch": 1.1111111111111112,
"grad_norm": 1.099933924299963,
"learning_rate": 7.941833513033873e-06,
"loss": 0.2084,
"step": 180
},
{
"epoch": 1.1728395061728394,
"grad_norm": 1.3411794107712403,
"learning_rate": 7.643835963929747e-06,
"loss": 0.239,
"step": 190
},
{
"epoch": 1.2345679012345678,
"grad_norm": 1.006333035037764,
"learning_rate": 7.332180502295729e-06,
"loss": 0.2176,
"step": 200
},
{
"epoch": 1.2345679012345678,
"eval_loss": 0.28219473361968994,
"eval_runtime": 33.6239,
"eval_samples_per_second": 17.131,
"eval_steps_per_second": 8.565,
"step": 200
},
{
"epoch": 1.2962962962962963,
"grad_norm": 1.139264768106286,
"learning_rate": 7.008477123264849e-06,
"loss": 0.2283,
"step": 210
},
{
"epoch": 1.3580246913580247,
"grad_norm": 1.1371107931484845,
"learning_rate": 6.674398060854931e-06,
"loss": 0.1902,
"step": 220
},
{
"epoch": 1.4197530864197532,
"grad_norm": 1.3730452135228395,
"learning_rate": 6.331669149298781e-06,
"loss": 0.2333,
"step": 230
},
{
"epoch": 1.4814814814814814,
"grad_norm": 1.1201678992731283,
"learning_rate": 5.982060907478568e-06,
"loss": 0.2069,
"step": 240
},
{
"epoch": 1.5432098765432098,
"grad_norm": 1.3356992773689496,
"learning_rate": 5.627379392521758e-06,
"loss": 0.2263,
"step": 250
},
{
"epoch": 1.6049382716049383,
"grad_norm": 1.4337310673277455,
"learning_rate": 5.2694568698084085e-06,
"loss": 0.2573,
"step": 260
},
{
"epoch": 1.6666666666666665,
"grad_norm": 1.5488778192587145,
"learning_rate": 4.910142347588041e-06,
"loss": 0.2173,
"step": 270
},
{
"epoch": 1.7283950617283952,
"grad_norm": 1.0832492592974947,
"learning_rate": 4.551292025103789e-06,
"loss": 0.1861,
"step": 280
},
{
"epoch": 1.7901234567901234,
"grad_norm": 1.2664457162444653,
"learning_rate": 4.1947597035682355e-06,
"loss": 0.2185,
"step": 290
},
{
"epoch": 1.8518518518518519,
"grad_norm": 1.5475480527449588,
"learning_rate": 3.842387209527374e-06,
"loss": 0.2157,
"step": 300
},
{
"epoch": 1.8518518518518519,
"eval_loss": 0.27247950434684753,
"eval_runtime": 33.6482,
"eval_samples_per_second": 17.118,
"eval_steps_per_second": 8.559,
"step": 300
},
{
"epoch": 1.9135802469135803,
"grad_norm": 1.1103498380478658,
"learning_rate": 3.4959948800850253e-06,
"loss": 0.1885,
"step": 310
},
{
"epoch": 1.9753086419753085,
"grad_norm": 1.4161875006518951,
"learning_rate": 3.1573721591405405e-06,
"loss": 0.2,
"step": 320
},
{
"epoch": 2.037037037037037,
"grad_norm": 1.1185504039182512,
"learning_rate": 2.8282683532191333e-06,
"loss": 0.1642,
"step": 330
},
{
"epoch": 2.0987654320987654,
"grad_norm": 1.498197806201236,
"learning_rate": 2.5103835946496846e-06,
"loss": 0.1336,
"step": 340
},
{
"epoch": 2.1604938271604937,
"grad_norm": 1.3374025404689762,
"learning_rate": 2.205360058773764e-06,
"loss": 0.103,
"step": 350
},
{
"epoch": 2.2222222222222223,
"grad_norm": 1.1254711504767878,
"learning_rate": 1.914773480557304e-06,
"loss": 0.1322,
"step": 360
},
{
"epoch": 2.2839506172839505,
"grad_norm": 1.556059015124062,
"learning_rate": 1.6401250144296239e-06,
"loss": 0.1179,
"step": 370
},
{
"epoch": 2.3456790123456788,
"grad_norm": 1.335036517801684,
"learning_rate": 1.382833479401438e-06,
"loss": 0.1292,
"step": 380
},
{
"epoch": 2.4074074074074074,
"grad_norm": 1.0684099709675043,
"learning_rate": 1.1442280295231656e-06,
"loss": 0.1332,
"step": 390
},
{
"epoch": 2.4691358024691357,
"grad_norm": 1.0579913108425518,
"learning_rate": 9.255412875475256e-07,
"loss": 0.126,
"step": 400
},
{
"epoch": 2.4691358024691357,
"eval_loss": 0.30315688252449036,
"eval_runtime": 33.6087,
"eval_samples_per_second": 17.138,
"eval_steps_per_second": 8.569,
"step": 400
},
{
"epoch": 2.5308641975308643,
"grad_norm": 1.2810598384548977,
"learning_rate": 7.279029772675572e-07,
"loss": 0.1301,
"step": 410
},
{
"epoch": 2.5925925925925926,
"grad_norm": 1.0566255697761466,
"learning_rate": 5.523340874250704e-07,
"loss": 0.1266,
"step": 420
},
{
"epoch": 2.6543209876543212,
"grad_norm": 1.2053538383503994,
"learning_rate": 3.997415973384311e-07,
"loss": 0.1176,
"step": 430
},
{
"epoch": 2.7160493827160495,
"grad_norm": 1.032781331569874,
"learning_rate": 2.7091379149682683e-07,
"loss": 0.1289,
"step": 440
},
{
"epoch": 2.7777777777777777,
"grad_norm": 1.1729691522785313,
"learning_rate": 1.6651618732554774e-07,
"loss": 0.1032,
"step": 450
},
{
"epoch": 2.8395061728395063,
"grad_norm": 1.3896397201247754,
"learning_rate": 8.708809715922973e-08,
"loss": 0.118,
"step": 460
},
{
"epoch": 2.9012345679012346,
"grad_norm": 1.2233166514446852,
"learning_rate": 3.303984218372136e-08,
"loss": 0.1521,
"step": 470
},
{
"epoch": 2.962962962962963,
"grad_norm": 1.3019829879665807,
"learning_rate": 4.650632739194305e-09,
"loss": 0.1211,
"step": 480
},
{
"epoch": 3.0,
"step": 486,
"total_flos": 11979215732736.0,
"train_loss": 0.23958166477120954,
"train_runtime": 3326.588,
"train_samples_per_second": 4.672,
"train_steps_per_second": 0.146
}
],
"logging_steps": 10,
"max_steps": 486,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 11979215732736.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}