Llama-2-7b-hf-LoRA-wikitext2-raw-MIA-ArXiv2311.06062-settings-r32-dropout0.05
/
checkpoint-833
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.0, | |
"eval_steps": 100, | |
"global_step": 833, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.012004801920768308, | |
"grad_norm": 0.3538109064102173, | |
"learning_rate": 9.879951980792317e-05, | |
"loss": 2.3075, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.024009603841536616, | |
"grad_norm": 0.3431814908981323, | |
"learning_rate": 9.759903961584634e-05, | |
"loss": 2.1468, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.03601440576230492, | |
"grad_norm": 0.6265680193901062, | |
"learning_rate": 9.639855942376951e-05, | |
"loss": 2.0353, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.04801920768307323, | |
"grad_norm": 0.3244292736053467, | |
"learning_rate": 9.519807923169268e-05, | |
"loss": 2.0219, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.060024009603841535, | |
"grad_norm": 0.6210302114486694, | |
"learning_rate": 9.399759903961585e-05, | |
"loss": 1.9809, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.07202881152460984, | |
"grad_norm": 0.4695661962032318, | |
"learning_rate": 9.279711884753903e-05, | |
"loss": 1.9376, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.08403361344537816, | |
"grad_norm": 0.27379557490348816, | |
"learning_rate": 9.159663865546218e-05, | |
"loss": 1.923, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.09603841536614646, | |
"grad_norm": 0.4497829079627991, | |
"learning_rate": 9.039615846338536e-05, | |
"loss": 1.9875, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.10804321728691477, | |
"grad_norm": 0.3675350844860077, | |
"learning_rate": 8.919567827130852e-05, | |
"loss": 1.953, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.12004801920768307, | |
"grad_norm": 0.293821781873703, | |
"learning_rate": 8.79951980792317e-05, | |
"loss": 1.8549, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.12004801920768307, | |
"eval_loss": 1.889587640762329, | |
"eval_runtime": 39.6181, | |
"eval_samples_per_second": 25.241, | |
"eval_steps_per_second": 4.215, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.13205282112845138, | |
"grad_norm": 0.25892049074172974, | |
"learning_rate": 8.679471788715487e-05, | |
"loss": 1.8372, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.14405762304921968, | |
"grad_norm": 0.33091431856155396, | |
"learning_rate": 8.559423769507804e-05, | |
"loss": 1.9107, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.15606242496998798, | |
"grad_norm": 0.3052417039871216, | |
"learning_rate": 8.43937575030012e-05, | |
"loss": 1.8123, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.16806722689075632, | |
"grad_norm": 0.3017849028110504, | |
"learning_rate": 8.319327731092437e-05, | |
"loss": 1.8966, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.18007202881152462, | |
"grad_norm": 0.28657111525535583, | |
"learning_rate": 8.199279711884754e-05, | |
"loss": 1.8395, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.19207683073229292, | |
"grad_norm": 0.3324660062789917, | |
"learning_rate": 8.079231692677071e-05, | |
"loss": 1.8841, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.20408163265306123, | |
"grad_norm": 0.28194233775138855, | |
"learning_rate": 7.959183673469388e-05, | |
"loss": 1.8627, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.21608643457382953, | |
"grad_norm": 0.45277833938598633, | |
"learning_rate": 7.839135654261706e-05, | |
"loss": 1.8989, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.22809123649459784, | |
"grad_norm": 0.3656202256679535, | |
"learning_rate": 7.719087635054022e-05, | |
"loss": 1.911, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.24009603841536614, | |
"grad_norm": 0.33644187450408936, | |
"learning_rate": 7.599039615846338e-05, | |
"loss": 1.7805, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.24009603841536614, | |
"eval_loss": 1.8699440956115723, | |
"eval_runtime": 39.0269, | |
"eval_samples_per_second": 25.623, | |
"eval_steps_per_second": 4.279, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.25210084033613445, | |
"grad_norm": 0.24649439752101898, | |
"learning_rate": 7.478991596638657e-05, | |
"loss": 1.841, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.26410564225690275, | |
"grad_norm": 0.30980348587036133, | |
"learning_rate": 7.358943577430972e-05, | |
"loss": 1.9065, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.27611044417767105, | |
"grad_norm": 0.2731610834598541, | |
"learning_rate": 7.23889555822329e-05, | |
"loss": 1.8713, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.28811524609843936, | |
"grad_norm": 0.3211459517478943, | |
"learning_rate": 7.118847539015606e-05, | |
"loss": 1.8719, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.30012004801920766, | |
"grad_norm": 0.3356248736381531, | |
"learning_rate": 6.998799519807924e-05, | |
"loss": 1.8211, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.31212484993997597, | |
"grad_norm": 0.35724079608917236, | |
"learning_rate": 6.878751500600241e-05, | |
"loss": 1.8773, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.3241296518607443, | |
"grad_norm": 0.5718697905540466, | |
"learning_rate": 6.758703481392558e-05, | |
"loss": 1.8231, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.33613445378151263, | |
"grad_norm": 0.32961568236351013, | |
"learning_rate": 6.638655462184874e-05, | |
"loss": 1.9494, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.34813925570228094, | |
"grad_norm": 0.3693414628505707, | |
"learning_rate": 6.518607442977191e-05, | |
"loss": 1.835, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.36014405762304924, | |
"grad_norm": 0.4198884665966034, | |
"learning_rate": 6.398559423769508e-05, | |
"loss": 1.8722, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.36014405762304924, | |
"eval_loss": 1.8641259670257568, | |
"eval_runtime": 39.2717, | |
"eval_samples_per_second": 25.464, | |
"eval_steps_per_second": 4.252, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.37214885954381755, | |
"grad_norm": 0.3070141077041626, | |
"learning_rate": 6.278511404561825e-05, | |
"loss": 1.8771, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.38415366146458585, | |
"grad_norm": 0.43982160091400146, | |
"learning_rate": 6.158463385354142e-05, | |
"loss": 1.8954, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.39615846338535415, | |
"grad_norm": 0.2642657458782196, | |
"learning_rate": 6.038415366146459e-05, | |
"loss": 1.8977, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.40816326530612246, | |
"grad_norm": 0.25595635175704956, | |
"learning_rate": 5.918367346938776e-05, | |
"loss": 1.8949, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.42016806722689076, | |
"grad_norm": 0.27742770314216614, | |
"learning_rate": 5.7983193277310935e-05, | |
"loss": 1.8223, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.43217286914765907, | |
"grad_norm": 0.2831798791885376, | |
"learning_rate": 5.6782713085234096e-05, | |
"loss": 1.8477, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.44417767106842737, | |
"grad_norm": 0.3077690005302429, | |
"learning_rate": 5.558223289315727e-05, | |
"loss": 1.9376, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.4561824729891957, | |
"grad_norm": 0.5014916658401489, | |
"learning_rate": 5.438175270108043e-05, | |
"loss": 1.9241, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.468187274909964, | |
"grad_norm": 0.7313567399978638, | |
"learning_rate": 5.31812725090036e-05, | |
"loss": 1.8995, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.4801920768307323, | |
"grad_norm": 0.26589518785476685, | |
"learning_rate": 5.1980792316926776e-05, | |
"loss": 1.95, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.4801920768307323, | |
"eval_loss": 1.8611316680908203, | |
"eval_runtime": 39.4525, | |
"eval_samples_per_second": 25.347, | |
"eval_steps_per_second": 4.233, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.4921968787515006, | |
"grad_norm": 0.293312668800354, | |
"learning_rate": 5.078031212484994e-05, | |
"loss": 1.8592, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.5042016806722689, | |
"grad_norm": 0.35831350088119507, | |
"learning_rate": 4.957983193277311e-05, | |
"loss": 1.9295, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.5162064825930373, | |
"grad_norm": 0.33949533104896545, | |
"learning_rate": 4.837935174069628e-05, | |
"loss": 1.8581, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.5282112845138055, | |
"grad_norm": 0.5358735918998718, | |
"learning_rate": 4.717887154861945e-05, | |
"loss": 1.8824, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.5402160864345739, | |
"grad_norm": 0.3915724456310272, | |
"learning_rate": 4.5978391356542624e-05, | |
"loss": 1.8699, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.5522208883553421, | |
"grad_norm": 0.3336695730686188, | |
"learning_rate": 4.477791116446579e-05, | |
"loss": 1.9749, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.5642256902761105, | |
"grad_norm": 0.3887428641319275, | |
"learning_rate": 4.3577430972388954e-05, | |
"loss": 1.8134, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.5762304921968787, | |
"grad_norm": 0.26423540711402893, | |
"learning_rate": 4.237695078031212e-05, | |
"loss": 1.8495, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.5882352941176471, | |
"grad_norm": 0.30970582365989685, | |
"learning_rate": 4.11764705882353e-05, | |
"loss": 1.8386, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.6002400960384153, | |
"grad_norm": 0.38180047273635864, | |
"learning_rate": 3.9975990396158466e-05, | |
"loss": 1.8729, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.6002400960384153, | |
"eval_loss": 1.8590781688690186, | |
"eval_runtime": 39.0438, | |
"eval_samples_per_second": 25.612, | |
"eval_steps_per_second": 4.277, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.6122448979591837, | |
"grad_norm": 0.4111509621143341, | |
"learning_rate": 3.8775510204081634e-05, | |
"loss": 1.833, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.6242496998799519, | |
"grad_norm": 0.515201210975647, | |
"learning_rate": 3.75750300120048e-05, | |
"loss": 1.8744, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.6362545018007203, | |
"grad_norm": 0.2900288701057434, | |
"learning_rate": 3.637454981992797e-05, | |
"loss": 1.8364, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.6482593037214885, | |
"grad_norm": 0.2963515520095825, | |
"learning_rate": 3.517406962785114e-05, | |
"loss": 1.8469, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.6602641056422569, | |
"grad_norm": 0.41035813093185425, | |
"learning_rate": 3.3973589435774314e-05, | |
"loss": 1.9474, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.6722689075630253, | |
"grad_norm": 0.246292382478714, | |
"learning_rate": 3.277310924369748e-05, | |
"loss": 1.8128, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.6842737094837935, | |
"grad_norm": 0.3911287188529968, | |
"learning_rate": 3.157262905162065e-05, | |
"loss": 1.8663, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.6962785114045619, | |
"grad_norm": 0.3035126328468323, | |
"learning_rate": 3.037214885954382e-05, | |
"loss": 1.8921, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.7082833133253301, | |
"grad_norm": 0.2977759540081024, | |
"learning_rate": 2.917166866746699e-05, | |
"loss": 1.8288, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.7202881152460985, | |
"grad_norm": 0.3059289753437042, | |
"learning_rate": 2.797118847539016e-05, | |
"loss": 1.8482, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.7202881152460985, | |
"eval_loss": 1.8568615913391113, | |
"eval_runtime": 39.5808, | |
"eval_samples_per_second": 25.265, | |
"eval_steps_per_second": 4.219, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.7322929171668667, | |
"grad_norm": 0.36612552404403687, | |
"learning_rate": 2.6770708283313327e-05, | |
"loss": 1.8577, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.7442977190876351, | |
"grad_norm": 0.2968533933162689, | |
"learning_rate": 2.5570228091236498e-05, | |
"loss": 1.8295, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.7563025210084033, | |
"grad_norm": 0.40536123514175415, | |
"learning_rate": 2.4369747899159663e-05, | |
"loss": 1.9242, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.7683073229291717, | |
"grad_norm": 0.3592563271522522, | |
"learning_rate": 2.3169267707082835e-05, | |
"loss": 1.8677, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.78031212484994, | |
"grad_norm": 0.3828364908695221, | |
"learning_rate": 2.1968787515006003e-05, | |
"loss": 1.9055, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.7923169267707083, | |
"grad_norm": 0.26257479190826416, | |
"learning_rate": 2.076830732292917e-05, | |
"loss": 1.9383, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.8043217286914766, | |
"grad_norm": 0.3253800570964813, | |
"learning_rate": 1.9567827130852343e-05, | |
"loss": 1.8142, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.8163265306122449, | |
"grad_norm": 0.3129618167877197, | |
"learning_rate": 1.836734693877551e-05, | |
"loss": 1.8447, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.8283313325330132, | |
"grad_norm": 0.32753467559814453, | |
"learning_rate": 1.7166866746698683e-05, | |
"loss": 1.8617, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.8403361344537815, | |
"grad_norm": 0.28356197476387024, | |
"learning_rate": 1.5966386554621848e-05, | |
"loss": 1.768, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.8403361344537815, | |
"eval_loss": 1.8553369045257568, | |
"eval_runtime": 39.3665, | |
"eval_samples_per_second": 25.402, | |
"eval_steps_per_second": 4.242, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.8523409363745498, | |
"grad_norm": 0.42871856689453125, | |
"learning_rate": 1.4765906362545018e-05, | |
"loss": 1.8756, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.8643457382953181, | |
"grad_norm": 0.32104918360710144, | |
"learning_rate": 1.3565426170468188e-05, | |
"loss": 1.8396, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.8763505402160864, | |
"grad_norm": 0.3462695777416229, | |
"learning_rate": 1.2364945978391356e-05, | |
"loss": 1.8499, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.8883553421368547, | |
"grad_norm": 0.38381311297416687, | |
"learning_rate": 1.1164465786314526e-05, | |
"loss": 1.8353, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.9003601440576231, | |
"grad_norm": 0.48733091354370117, | |
"learning_rate": 9.963985594237696e-06, | |
"loss": 1.8566, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.9123649459783914, | |
"grad_norm": 0.35725510120391846, | |
"learning_rate": 8.763505402160866e-06, | |
"loss": 1.9016, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.9243697478991597, | |
"grad_norm": 0.2945646047592163, | |
"learning_rate": 7.563025210084033e-06, | |
"loss": 1.7727, | |
"step": 770 | |
}, | |
{ | |
"epoch": 0.936374549819928, | |
"grad_norm": 0.2922873795032501, | |
"learning_rate": 6.362545018007203e-06, | |
"loss": 1.8482, | |
"step": 780 | |
}, | |
{ | |
"epoch": 0.9483793517406963, | |
"grad_norm": 0.29431605339050293, | |
"learning_rate": 5.162064825930372e-06, | |
"loss": 1.8867, | |
"step": 790 | |
}, | |
{ | |
"epoch": 0.9603841536614646, | |
"grad_norm": 0.32220232486724854, | |
"learning_rate": 3.9615846338535415e-06, | |
"loss": 1.8904, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.9603841536614646, | |
"eval_loss": 1.854928970336914, | |
"eval_runtime": 39.3819, | |
"eval_samples_per_second": 25.392, | |
"eval_steps_per_second": 4.241, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.9723889555822329, | |
"grad_norm": 0.2991810142993927, | |
"learning_rate": 2.7611044417767106e-06, | |
"loss": 1.8523, | |
"step": 810 | |
}, | |
{ | |
"epoch": 0.9843937575030012, | |
"grad_norm": 0.3944421112537384, | |
"learning_rate": 1.56062424969988e-06, | |
"loss": 1.8502, | |
"step": 820 | |
}, | |
{ | |
"epoch": 0.9963985594237695, | |
"grad_norm": 0.3155655264854431, | |
"learning_rate": 3.601440576230492e-07, | |
"loss": 1.9278, | |
"step": 830 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 833, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 100, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": true | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.3760243809940275e+17, | |
"train_batch_size": 12, | |
"trial_name": null, | |
"trial_params": null | |
} | |