|
{ |
|
"best_metric": 0.0837364188034316, |
|
"best_model_checkpoint": "esm2_t6_8M_lora_ptm_sites_2023-10-09_03-08-23/checkpoint-23309", |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 23309, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00037014933433791045, |
|
"loss": 0.4563, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0003701270704479516, |
|
"loss": 0.2291, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00037008979195075377, |
|
"loss": 0.1662, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0003700375763811688, |
|
"loss": 0.1448, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00036997042795487093, |
|
"loss": 0.1194, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0003698887995939965, |
|
"loss": 0.1104, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00036979187750890046, |
|
"loss": 0.1126, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00036968004240382616, |
|
"loss": 0.0939, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0003695533033078884, |
|
"loss": 0.0985, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00036941167045348997, |
|
"loss": 0.088, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0003692551552754955, |
|
"loss": 0.0839, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0003690837704103084, |
|
"loss": 0.0898, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00036889752969485046, |
|
"loss": 0.0825, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00036869644816544503, |
|
"loss": 0.0825, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00036848054205660264, |
|
"loss": 0.0798, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00036824982879971063, |
|
"loss": 0.0692, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0003680055912846709, |
|
"loss": 0.0622, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00036774539459843937, |
|
"loss": 0.0686, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00036747045011699506, |
|
"loss": 0.0755, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00036718078003824503, |
|
"loss": 0.0675, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0003668764077489815, |
|
"loss": 0.0695, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00036655735782299405, |
|
"loss": 0.0738, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00036622365601908513, |
|
"loss": 0.0601, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0003658753292789907, |
|
"loss": 0.0707, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.000365512405725205, |
|
"loss": 0.0642, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0003651349146587103, |
|
"loss": 0.0529, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00036474288655661055, |
|
"loss": 0.0527, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0003643384217654436, |
|
"loss": 0.0625, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00036391748799477383, |
|
"loss": 0.0546, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00036348211547861136, |
|
"loss": 0.069, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00036303233936717483, |
|
"loss": 0.0597, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00036256819597357105, |
|
"loss": 0.0598, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00036208972277086324, |
|
"loss": 0.0499, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00036159695838904537, |
|
"loss": 0.0526, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0003610899426119236, |
|
"loss": 0.0708, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00036056871637390423, |
|
"loss": 0.0506, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0003600333217566887, |
|
"loss": 0.0654, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00035948380198587616, |
|
"loss": 0.0529, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0003589230543814262, |
|
"loss": 0.0698, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00035834548859962624, |
|
"loss": 0.0499, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0003577539339330621, |
|
"loss": 0.0475, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0003571484381414685, |
|
"loss": 0.0573, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00035652905011013025, |
|
"loss": 0.0445, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00035589581984593567, |
|
"loss": 0.0572, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00035524879847333905, |
|
"loss": 0.0552, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00035458803823023286, |
|
"loss": 0.0368, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0003539135924637307, |
|
"loss": 0.0464, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00035322551562586, |
|
"loss": 0.0478, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0003525238632691658, |
|
"loss": 0.064, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0003518086920422257, |
|
"loss": 0.0501, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00035108373623456347, |
|
"loss": 0.0554, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000350341768437432, |
|
"loss": 0.0433, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00034958645794357904, |
|
"loss": 0.0546, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0003488178657337234, |
|
"loss": 0.0556, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0003480360538608961, |
|
"loss": 0.0597, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0003472410854454305, |
|
"loss": 0.0526, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00034643302466986575, |
|
"loss": 0.0559, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0003456119367737657, |
|
"loss": 0.0514, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00034477788804845107, |
|
"loss": 0.056, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00034393094583164754, |
|
"loss": 0.0491, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00034307117850204926, |
|
"loss": 0.0386, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0003421986554737983, |
|
"loss": 0.047, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0003413134471908803, |
|
"loss": 0.0587, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00034041562512143693, |
|
"loss": 0.0442, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00033950526175199636, |
|
"loss": 0.0517, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0003385824305816204, |
|
"loss": 0.0446, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0003376472061159709, |
|
"loss": 0.0525, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00033670443208693313, |
|
"loss": 0.0482, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00033574470955847287, |
|
"loss": 0.0489, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0003347728228408742, |
|
"loss": 0.0684, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0003337888504003458, |
|
"loss": 0.0506, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00033279287167884867, |
|
"loss": 0.0563, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00033178496708768257, |
|
"loss": 0.0356, |
|
"step": 14600 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0003307652180009936, |
|
"loss": 0.0573, |
|
"step": 14800 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0003297337067492049, |
|
"loss": 0.0498, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00032869051661236907, |
|
"loss": 0.0493, |
|
"step": 15200 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00032763573181344475, |
|
"loss": 0.0479, |
|
"step": 15400 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0003265694375114969, |
|
"loss": 0.0497, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.0003254917197948212, |
|
"loss": 0.0543, |
|
"step": 15800 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0003244081389988359, |
|
"loss": 0.0537, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00032330789242185896, |
|
"loss": 0.0743, |
|
"step": 16200 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00032219648575413284, |
|
"loss": 0.0346, |
|
"step": 16400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0003210740087261434, |
|
"loss": 0.0454, |
|
"step": 16600 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00031994055196215285, |
|
"loss": 0.0563, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0003187962069728827, |
|
"loss": 0.0456, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.000317641066148126, |
|
"loss": 0.0445, |
|
"step": 17200 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0003164752227492878, |
|
"loss": 0.0416, |
|
"step": 17400 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00031529877090185585, |
|
"loss": 0.0547, |
|
"step": 17600 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00031411180558780117, |
|
"loss": 0.0504, |
|
"step": 17800 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0003129144226379097, |
|
"loss": 0.0437, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00031171278275606484, |
|
"loss": 0.0479, |
|
"step": 18200 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0003104949062567452, |
|
"loss": 0.0512, |
|
"step": 18400 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0003092669041354337, |
|
"loss": 0.0477, |
|
"step": 18600 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003080288755360624, |
|
"loss": 0.0431, |
|
"step": 18800 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0003067809204120605, |
|
"loss": 0.05, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00030552313951828493, |
|
"loss": 0.0557, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0003042556344028853, |
|
"loss": 0.0388, |
|
"step": 19400 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00030297850739910594, |
|
"loss": 0.0515, |
|
"step": 19600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00030169186161702326, |
|
"loss": 0.0438, |
|
"step": 19800 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0003003958009352217, |
|
"loss": 0.0512, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002990904299924064, |
|
"loss": 0.0499, |
|
"step": 20200 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00029778244977988057, |
|
"loss": 0.0505, |
|
"step": 20400 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.000296458820457796, |
|
"loss": 0.0458, |
|
"step": 20600 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0002951261987306019, |
|
"loss": 0.0368, |
|
"step": 20800 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0002937846921887947, |
|
"loss": 0.0575, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00029243440914019487, |
|
"loss": 0.0444, |
|
"step": 21200 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0002910754586012025, |
|
"loss": 0.0467, |
|
"step": 21400 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00028970795028799613, |
|
"loss": 0.0383, |
|
"step": 21600 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0002883319946076741, |
|
"loss": 0.0486, |
|
"step": 21800 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0002869477026493413, |
|
"loss": 0.0576, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002855551861751398, |
|
"loss": 0.0551, |
|
"step": 22200 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00028415455761122624, |
|
"loss": 0.0451, |
|
"step": 22400 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00028274593003869416, |
|
"loss": 0.0479, |
|
"step": 22600 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0002813365191744383, |
|
"loss": 0.0519, |
|
"step": 22800 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0002799122739710694, |
|
"loss": 0.049, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00027848037226393073, |
|
"loss": 0.0489, |
|
"step": 23200 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9363692882415535, |
|
"eval_auc": 0.853910045782194, |
|
"eval_f1": 0.0837364188034316, |
|
"eval_loss": 0.6555151343345642, |
|
"eval_mcc": 0.17516389459528245, |
|
"eval_precision": 0.04427294640937209, |
|
"eval_recall": 0.7708239966353413, |
|
"eval_runtime": 1080.6969, |
|
"eval_samples_per_second": 102.587, |
|
"eval_steps_per_second": 5.399, |
|
"step": 23309 |
|
} |
|
], |
|
"logging_steps": 200, |
|
"max_steps": 69927, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 1.97635804552818e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|