user
Release
8c00a8e
raw
history blame
No virus
23.9 kB
{
"best_metric": 0.9558648467063904,
"best_model_checkpoint": "./lora-alpaca/checkpoint-1800",
"epoch": 2.2886204704386524,
"global_step": 1800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2.6999999999999996e-05,
"loss": 1.6482,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 5.399999999999999e-05,
"loss": 1.5884,
"step": 20
},
{
"epoch": 0.04,
"learning_rate": 8.4e-05,
"loss": 1.5166,
"step": 30
},
{
"epoch": 0.05,
"learning_rate": 0.00011399999999999999,
"loss": 1.3197,
"step": 40
},
{
"epoch": 0.06,
"learning_rate": 0.00014399999999999998,
"loss": 1.1732,
"step": 50
},
{
"epoch": 0.08,
"learning_rate": 0.00017399999999999997,
"loss": 1.1358,
"step": 60
},
{
"epoch": 0.09,
"learning_rate": 0.000204,
"loss": 1.0792,
"step": 70
},
{
"epoch": 0.1,
"learning_rate": 0.000234,
"loss": 1.062,
"step": 80
},
{
"epoch": 0.11,
"learning_rate": 0.00026399999999999997,
"loss": 1.0665,
"step": 90
},
{
"epoch": 0.13,
"learning_rate": 0.000294,
"loss": 1.0641,
"step": 100
},
{
"epoch": 0.14,
"learning_rate": 0.00029893711248892826,
"loss": 1.0449,
"step": 110
},
{
"epoch": 0.15,
"learning_rate": 0.00029760850310008854,
"loss": 1.0356,
"step": 120
},
{
"epoch": 0.17,
"learning_rate": 0.0002962798937112489,
"loss": 1.0537,
"step": 130
},
{
"epoch": 0.18,
"learning_rate": 0.0002949512843224092,
"loss": 1.0285,
"step": 140
},
{
"epoch": 0.19,
"learning_rate": 0.0002936226749335695,
"loss": 1.0197,
"step": 150
},
{
"epoch": 0.2,
"learning_rate": 0.0002922940655447298,
"loss": 1.0171,
"step": 160
},
{
"epoch": 0.22,
"learning_rate": 0.00029096545615589015,
"loss": 1.0112,
"step": 170
},
{
"epoch": 0.23,
"learning_rate": 0.0002896368467670505,
"loss": 1.0299,
"step": 180
},
{
"epoch": 0.24,
"learning_rate": 0.00028830823737821076,
"loss": 1.0243,
"step": 190
},
{
"epoch": 0.25,
"learning_rate": 0.0002869796279893711,
"loss": 1.0118,
"step": 200
},
{
"epoch": 0.25,
"eval_loss": 1.0055800676345825,
"eval_runtime": 82.8279,
"eval_samples_per_second": 24.146,
"eval_steps_per_second": 0.761,
"step": 200
},
{
"epoch": 0.27,
"learning_rate": 0.00028565101860053143,
"loss": 0.9956,
"step": 210
},
{
"epoch": 0.28,
"learning_rate": 0.00028432240921169176,
"loss": 1.0039,
"step": 220
},
{
"epoch": 0.29,
"learning_rate": 0.00028299379982285204,
"loss": 1.0026,
"step": 230
},
{
"epoch": 0.31,
"learning_rate": 0.00028166519043401237,
"loss": 1.0148,
"step": 240
},
{
"epoch": 0.32,
"learning_rate": 0.0002803365810451727,
"loss": 1.0065,
"step": 250
},
{
"epoch": 0.33,
"learning_rate": 0.00027900797165633304,
"loss": 0.9965,
"step": 260
},
{
"epoch": 0.34,
"learning_rate": 0.0002776793622674933,
"loss": 1.0037,
"step": 270
},
{
"epoch": 0.36,
"learning_rate": 0.00027635075287865365,
"loss": 1.0149,
"step": 280
},
{
"epoch": 0.37,
"learning_rate": 0.000275022143489814,
"loss": 1.0136,
"step": 290
},
{
"epoch": 0.38,
"learning_rate": 0.00027369353410097426,
"loss": 0.9937,
"step": 300
},
{
"epoch": 0.39,
"learning_rate": 0.0002723649247121346,
"loss": 0.987,
"step": 310
},
{
"epoch": 0.41,
"learning_rate": 0.0002710363153232949,
"loss": 1.016,
"step": 320
},
{
"epoch": 0.42,
"learning_rate": 0.00026970770593445526,
"loss": 1.0136,
"step": 330
},
{
"epoch": 0.43,
"learning_rate": 0.00026837909654561554,
"loss": 0.9926,
"step": 340
},
{
"epoch": 0.45,
"learning_rate": 0.00026705048715677587,
"loss": 0.9922,
"step": 350
},
{
"epoch": 0.46,
"learning_rate": 0.0002657218777679362,
"loss": 0.9883,
"step": 360
},
{
"epoch": 0.47,
"learning_rate": 0.00026439326837909654,
"loss": 1.0076,
"step": 370
},
{
"epoch": 0.48,
"learning_rate": 0.0002630646589902568,
"loss": 0.9873,
"step": 380
},
{
"epoch": 0.5,
"learning_rate": 0.0002617360496014172,
"loss": 0.988,
"step": 390
},
{
"epoch": 0.51,
"learning_rate": 0.0002604074402125775,
"loss": 0.9902,
"step": 400
},
{
"epoch": 0.51,
"eval_loss": 0.9863091111183167,
"eval_runtime": 82.9481,
"eval_samples_per_second": 24.111,
"eval_steps_per_second": 0.76,
"step": 400
},
{
"epoch": 0.52,
"learning_rate": 0.00025907883082373776,
"loss": 0.9966,
"step": 410
},
{
"epoch": 0.53,
"learning_rate": 0.00025775022143489815,
"loss": 0.9914,
"step": 420
},
{
"epoch": 0.55,
"learning_rate": 0.0002564216120460584,
"loss": 0.9924,
"step": 430
},
{
"epoch": 0.56,
"learning_rate": 0.00025509300265721876,
"loss": 0.9948,
"step": 440
},
{
"epoch": 0.57,
"learning_rate": 0.0002537643932683791,
"loss": 0.9879,
"step": 450
},
{
"epoch": 0.58,
"learning_rate": 0.0002524357838795394,
"loss": 0.988,
"step": 460
},
{
"epoch": 0.6,
"learning_rate": 0.0002511071744906997,
"loss": 0.9928,
"step": 470
},
{
"epoch": 0.61,
"learning_rate": 0.00024977856510186003,
"loss": 0.9916,
"step": 480
},
{
"epoch": 0.62,
"learning_rate": 0.00024844995571302037,
"loss": 0.994,
"step": 490
},
{
"epoch": 0.64,
"learning_rate": 0.0002471213463241807,
"loss": 0.996,
"step": 500
},
{
"epoch": 0.65,
"learning_rate": 0.000245792736935341,
"loss": 0.9783,
"step": 510
},
{
"epoch": 0.66,
"learning_rate": 0.0002444641275465013,
"loss": 0.9798,
"step": 520
},
{
"epoch": 0.67,
"learning_rate": 0.00024313551815766162,
"loss": 0.9791,
"step": 530
},
{
"epoch": 0.69,
"learning_rate": 0.00024180690876882195,
"loss": 0.9894,
"step": 540
},
{
"epoch": 0.7,
"learning_rate": 0.00024047829937998225,
"loss": 0.9904,
"step": 550
},
{
"epoch": 0.71,
"learning_rate": 0.00023914968999114256,
"loss": 0.9942,
"step": 560
},
{
"epoch": 0.72,
"learning_rate": 0.0002378210806023029,
"loss": 0.9897,
"step": 570
},
{
"epoch": 0.74,
"learning_rate": 0.0002364924712134632,
"loss": 0.9741,
"step": 580
},
{
"epoch": 0.75,
"learning_rate": 0.00023516386182462353,
"loss": 0.9631,
"step": 590
},
{
"epoch": 0.76,
"learning_rate": 0.00023383525243578384,
"loss": 0.9784,
"step": 600
},
{
"epoch": 0.76,
"eval_loss": 0.9769166111946106,
"eval_runtime": 78.8243,
"eval_samples_per_second": 25.373,
"eval_steps_per_second": 0.799,
"step": 600
},
{
"epoch": 0.78,
"learning_rate": 0.0002325066430469442,
"loss": 0.9703,
"step": 610
},
{
"epoch": 0.79,
"learning_rate": 0.00023117803365810448,
"loss": 0.9914,
"step": 620
},
{
"epoch": 0.8,
"learning_rate": 0.00022984942426926484,
"loss": 0.9522,
"step": 630
},
{
"epoch": 0.81,
"learning_rate": 0.00022852081488042514,
"loss": 0.9773,
"step": 640
},
{
"epoch": 0.83,
"learning_rate": 0.00022719220549158547,
"loss": 0.9954,
"step": 650
},
{
"epoch": 0.84,
"learning_rate": 0.00022586359610274578,
"loss": 0.9768,
"step": 660
},
{
"epoch": 0.85,
"learning_rate": 0.0002245349867139061,
"loss": 0.9908,
"step": 670
},
{
"epoch": 0.86,
"learning_rate": 0.00022320637732506642,
"loss": 0.9944,
"step": 680
},
{
"epoch": 0.88,
"learning_rate": 0.00022187776793622672,
"loss": 0.9772,
"step": 690
},
{
"epoch": 0.89,
"learning_rate": 0.00022054915854738706,
"loss": 0.9697,
"step": 700
},
{
"epoch": 0.9,
"learning_rate": 0.00021922054915854736,
"loss": 0.9652,
"step": 710
},
{
"epoch": 0.92,
"learning_rate": 0.0002178919397697077,
"loss": 0.9738,
"step": 720
},
{
"epoch": 0.93,
"learning_rate": 0.000216563330380868,
"loss": 0.9892,
"step": 730
},
{
"epoch": 0.94,
"learning_rate": 0.00021523472099202833,
"loss": 0.9731,
"step": 740
},
{
"epoch": 0.95,
"learning_rate": 0.00021390611160318864,
"loss": 0.985,
"step": 750
},
{
"epoch": 0.97,
"learning_rate": 0.00021257750221434897,
"loss": 0.9918,
"step": 760
},
{
"epoch": 0.98,
"learning_rate": 0.00021124889282550928,
"loss": 0.9603,
"step": 770
},
{
"epoch": 0.99,
"learning_rate": 0.0002099202834366696,
"loss": 0.9686,
"step": 780
},
{
"epoch": 1.0,
"learning_rate": 0.00020859167404782992,
"loss": 0.9761,
"step": 790
},
{
"epoch": 1.02,
"learning_rate": 0.00020726306465899025,
"loss": 0.9812,
"step": 800
},
{
"epoch": 1.02,
"eval_loss": 0.970382571220398,
"eval_runtime": 78.9078,
"eval_samples_per_second": 25.346,
"eval_steps_per_second": 0.798,
"step": 800
},
{
"epoch": 1.03,
"learning_rate": 0.00020593445527015056,
"loss": 0.95,
"step": 810
},
{
"epoch": 1.04,
"learning_rate": 0.0002046058458813109,
"loss": 0.9575,
"step": 820
},
{
"epoch": 1.06,
"learning_rate": 0.0002032772364924712,
"loss": 0.9826,
"step": 830
},
{
"epoch": 1.07,
"learning_rate": 0.0002019486271036315,
"loss": 0.973,
"step": 840
},
{
"epoch": 1.08,
"learning_rate": 0.00020062001771479183,
"loss": 0.9623,
"step": 850
},
{
"epoch": 1.09,
"learning_rate": 0.00019929140832595214,
"loss": 0.9618,
"step": 860
},
{
"epoch": 1.11,
"learning_rate": 0.00019796279893711247,
"loss": 0.9594,
"step": 870
},
{
"epoch": 1.12,
"learning_rate": 0.00019663418954827278,
"loss": 0.9827,
"step": 880
},
{
"epoch": 1.13,
"learning_rate": 0.0001953055801594331,
"loss": 0.9608,
"step": 890
},
{
"epoch": 1.14,
"learning_rate": 0.00019397697077059342,
"loss": 0.967,
"step": 900
},
{
"epoch": 1.16,
"learning_rate": 0.00019264836138175375,
"loss": 0.9498,
"step": 910
},
{
"epoch": 1.17,
"learning_rate": 0.00019131975199291405,
"loss": 0.972,
"step": 920
},
{
"epoch": 1.18,
"learning_rate": 0.0001899911426040744,
"loss": 0.9677,
"step": 930
},
{
"epoch": 1.2,
"learning_rate": 0.0001886625332152347,
"loss": 0.9715,
"step": 940
},
{
"epoch": 1.21,
"learning_rate": 0.00018733392382639503,
"loss": 0.9758,
"step": 950
},
{
"epoch": 1.22,
"learning_rate": 0.00018600531443755533,
"loss": 0.9667,
"step": 960
},
{
"epoch": 1.23,
"learning_rate": 0.00018467670504871564,
"loss": 0.9719,
"step": 970
},
{
"epoch": 1.25,
"learning_rate": 0.00018334809565987597,
"loss": 0.9584,
"step": 980
},
{
"epoch": 1.26,
"learning_rate": 0.00018201948627103628,
"loss": 0.9591,
"step": 990
},
{
"epoch": 1.27,
"learning_rate": 0.00018069087688219664,
"loss": 0.959,
"step": 1000
},
{
"epoch": 1.27,
"eval_loss": 0.9663413166999817,
"eval_runtime": 83.6093,
"eval_samples_per_second": 23.921,
"eval_steps_per_second": 0.754,
"step": 1000
},
{
"epoch": 1.28,
"learning_rate": 0.00017936226749335691,
"loss": 0.9545,
"step": 1010
},
{
"epoch": 1.3,
"learning_rate": 0.00017803365810451727,
"loss": 0.9675,
"step": 1020
},
{
"epoch": 1.31,
"learning_rate": 0.00017670504871567758,
"loss": 0.961,
"step": 1030
},
{
"epoch": 1.32,
"learning_rate": 0.0001753764393268379,
"loss": 0.9575,
"step": 1040
},
{
"epoch": 1.34,
"learning_rate": 0.00017404782993799822,
"loss": 0.9636,
"step": 1050
},
{
"epoch": 1.35,
"learning_rate": 0.00017271922054915855,
"loss": 0.9687,
"step": 1060
},
{
"epoch": 1.36,
"learning_rate": 0.00017139061116031886,
"loss": 0.9656,
"step": 1070
},
{
"epoch": 1.37,
"learning_rate": 0.0001700620017714792,
"loss": 0.9493,
"step": 1080
},
{
"epoch": 1.39,
"learning_rate": 0.0001687333923826395,
"loss": 0.9681,
"step": 1090
},
{
"epoch": 1.4,
"learning_rate": 0.00016740478299379983,
"loss": 0.9537,
"step": 1100
},
{
"epoch": 1.41,
"learning_rate": 0.00016607617360496013,
"loss": 0.9468,
"step": 1110
},
{
"epoch": 1.42,
"learning_rate": 0.00016474756421612044,
"loss": 0.957,
"step": 1120
},
{
"epoch": 1.44,
"learning_rate": 0.00016341895482728077,
"loss": 0.9697,
"step": 1130
},
{
"epoch": 1.45,
"learning_rate": 0.00016209034543844108,
"loss": 0.9689,
"step": 1140
},
{
"epoch": 1.46,
"learning_rate": 0.0001607617360496014,
"loss": 0.9691,
"step": 1150
},
{
"epoch": 1.47,
"learning_rate": 0.00015943312666076172,
"loss": 0.9678,
"step": 1160
},
{
"epoch": 1.49,
"learning_rate": 0.00015810451727192205,
"loss": 0.967,
"step": 1170
},
{
"epoch": 1.5,
"learning_rate": 0.00015677590788308235,
"loss": 0.9742,
"step": 1180
},
{
"epoch": 1.51,
"learning_rate": 0.0001554472984942427,
"loss": 0.9438,
"step": 1190
},
{
"epoch": 1.53,
"learning_rate": 0.000154118689105403,
"loss": 0.9688,
"step": 1200
},
{
"epoch": 1.53,
"eval_loss": 0.9622219204902649,
"eval_runtime": 78.9396,
"eval_samples_per_second": 25.336,
"eval_steps_per_second": 0.798,
"step": 1200
},
{
"epoch": 1.54,
"learning_rate": 0.00015279007971656333,
"loss": 0.9655,
"step": 1210
},
{
"epoch": 1.55,
"learning_rate": 0.00015146147032772363,
"loss": 0.957,
"step": 1220
},
{
"epoch": 1.56,
"learning_rate": 0.00015013286093888396,
"loss": 0.9667,
"step": 1230
},
{
"epoch": 1.58,
"learning_rate": 0.00014880425155004427,
"loss": 0.9554,
"step": 1240
},
{
"epoch": 1.59,
"learning_rate": 0.0001474756421612046,
"loss": 0.9691,
"step": 1250
},
{
"epoch": 1.6,
"learning_rate": 0.0001461470327723649,
"loss": 0.9587,
"step": 1260
},
{
"epoch": 1.61,
"learning_rate": 0.00014481842338352524,
"loss": 0.9461,
"step": 1270
},
{
"epoch": 1.63,
"learning_rate": 0.00014348981399468555,
"loss": 0.9607,
"step": 1280
},
{
"epoch": 1.64,
"learning_rate": 0.00014216120460584588,
"loss": 0.9528,
"step": 1290
},
{
"epoch": 1.65,
"learning_rate": 0.00014083259521700619,
"loss": 0.9469,
"step": 1300
},
{
"epoch": 1.67,
"learning_rate": 0.00013950398582816652,
"loss": 0.9415,
"step": 1310
},
{
"epoch": 1.68,
"learning_rate": 0.00013817537643932682,
"loss": 0.9628,
"step": 1320
},
{
"epoch": 1.69,
"learning_rate": 0.00013684676705048713,
"loss": 0.9562,
"step": 1330
},
{
"epoch": 1.7,
"learning_rate": 0.00013551815766164746,
"loss": 0.9579,
"step": 1340
},
{
"epoch": 1.72,
"learning_rate": 0.00013418954827280777,
"loss": 0.9578,
"step": 1350
},
{
"epoch": 1.73,
"learning_rate": 0.0001328609388839681,
"loss": 0.9508,
"step": 1360
},
{
"epoch": 1.74,
"learning_rate": 0.0001315323294951284,
"loss": 0.9596,
"step": 1370
},
{
"epoch": 1.75,
"learning_rate": 0.00013020372010628874,
"loss": 0.9446,
"step": 1380
},
{
"epoch": 1.77,
"learning_rate": 0.00012887511071744907,
"loss": 0.9418,
"step": 1390
},
{
"epoch": 1.78,
"learning_rate": 0.00012754650132860938,
"loss": 0.9447,
"step": 1400
},
{
"epoch": 1.78,
"eval_loss": 0.9596577286720276,
"eval_runtime": 78.8874,
"eval_samples_per_second": 25.353,
"eval_steps_per_second": 0.799,
"step": 1400
},
{
"epoch": 1.79,
"learning_rate": 0.0001262178919397697,
"loss": 0.9597,
"step": 1410
},
{
"epoch": 1.81,
"learning_rate": 0.00012488928255093002,
"loss": 0.9601,
"step": 1420
},
{
"epoch": 1.82,
"learning_rate": 0.00012356067316209035,
"loss": 0.9794,
"step": 1430
},
{
"epoch": 1.83,
"learning_rate": 0.00012223206377325066,
"loss": 0.9295,
"step": 1440
},
{
"epoch": 1.84,
"learning_rate": 0.00012090345438441097,
"loss": 0.9418,
"step": 1450
},
{
"epoch": 1.86,
"learning_rate": 0.00011957484499557128,
"loss": 0.941,
"step": 1460
},
{
"epoch": 1.87,
"learning_rate": 0.0001182462356067316,
"loss": 0.9504,
"step": 1470
},
{
"epoch": 1.88,
"learning_rate": 0.00011691762621789192,
"loss": 0.9489,
"step": 1480
},
{
"epoch": 1.89,
"learning_rate": 0.00011558901682905224,
"loss": 0.9482,
"step": 1490
},
{
"epoch": 1.91,
"learning_rate": 0.00011426040744021257,
"loss": 0.9435,
"step": 1500
},
{
"epoch": 1.92,
"learning_rate": 0.00011293179805137289,
"loss": 0.953,
"step": 1510
},
{
"epoch": 1.93,
"learning_rate": 0.00011160318866253321,
"loss": 0.9468,
"step": 1520
},
{
"epoch": 1.95,
"learning_rate": 0.00011027457927369353,
"loss": 0.9289,
"step": 1530
},
{
"epoch": 1.96,
"learning_rate": 0.00010894596988485385,
"loss": 0.9533,
"step": 1540
},
{
"epoch": 1.97,
"learning_rate": 0.00010761736049601417,
"loss": 0.9591,
"step": 1550
},
{
"epoch": 1.98,
"learning_rate": 0.00010628875110717449,
"loss": 0.951,
"step": 1560
},
{
"epoch": 2.0,
"learning_rate": 0.0001049601417183348,
"loss": 0.9171,
"step": 1570
},
{
"epoch": 2.01,
"learning_rate": 0.00010363153232949512,
"loss": 0.9375,
"step": 1580
},
{
"epoch": 2.02,
"learning_rate": 0.00010230292294065544,
"loss": 0.9568,
"step": 1590
},
{
"epoch": 2.03,
"learning_rate": 0.00010097431355181575,
"loss": 0.9633,
"step": 1600
},
{
"epoch": 2.03,
"eval_loss": 0.9573116302490234,
"eval_runtime": 79.0309,
"eval_samples_per_second": 25.307,
"eval_steps_per_second": 0.797,
"step": 1600
},
{
"epoch": 2.05,
"learning_rate": 9.964570416297607e-05,
"loss": 0.9424,
"step": 1610
},
{
"epoch": 2.06,
"learning_rate": 9.831709477413639e-05,
"loss": 0.9243,
"step": 1620
},
{
"epoch": 2.07,
"learning_rate": 9.698848538529671e-05,
"loss": 0.9415,
"step": 1630
},
{
"epoch": 2.09,
"learning_rate": 9.565987599645703e-05,
"loss": 0.9434,
"step": 1640
},
{
"epoch": 2.1,
"learning_rate": 9.433126660761735e-05,
"loss": 0.9467,
"step": 1650
},
{
"epoch": 2.11,
"learning_rate": 9.300265721877767e-05,
"loss": 0.9516,
"step": 1660
},
{
"epoch": 2.12,
"learning_rate": 9.167404782993798e-05,
"loss": 0.9617,
"step": 1670
},
{
"epoch": 2.14,
"learning_rate": 9.034543844109832e-05,
"loss": 0.9379,
"step": 1680
},
{
"epoch": 2.15,
"learning_rate": 8.901682905225864e-05,
"loss": 0.9455,
"step": 1690
},
{
"epoch": 2.16,
"learning_rate": 8.768821966341896e-05,
"loss": 0.9477,
"step": 1700
},
{
"epoch": 2.17,
"learning_rate": 8.635961027457928e-05,
"loss": 0.9448,
"step": 1710
},
{
"epoch": 2.19,
"learning_rate": 8.50310008857396e-05,
"loss": 0.9361,
"step": 1720
},
{
"epoch": 2.2,
"learning_rate": 8.370239149689991e-05,
"loss": 0.922,
"step": 1730
},
{
"epoch": 2.21,
"learning_rate": 8.237378210806022e-05,
"loss": 0.9442,
"step": 1740
},
{
"epoch": 2.23,
"learning_rate": 8.104517271922054e-05,
"loss": 0.9575,
"step": 1750
},
{
"epoch": 2.24,
"learning_rate": 7.971656333038086e-05,
"loss": 0.929,
"step": 1760
},
{
"epoch": 2.25,
"learning_rate": 7.838795394154118e-05,
"loss": 0.9312,
"step": 1770
},
{
"epoch": 2.26,
"learning_rate": 7.70593445527015e-05,
"loss": 0.943,
"step": 1780
},
{
"epoch": 2.28,
"learning_rate": 7.573073516386182e-05,
"loss": 0.9416,
"step": 1790
},
{
"epoch": 2.29,
"learning_rate": 7.440212577502214e-05,
"loss": 0.9455,
"step": 1800
},
{
"epoch": 2.29,
"eval_loss": 0.9558648467063904,
"eval_runtime": 79.0765,
"eval_samples_per_second": 25.292,
"eval_steps_per_second": 0.797,
"step": 1800
}
],
"max_steps": 2358,
"num_train_epochs": 3,
"total_flos": 5.911772696235999e+18,
"trial_name": null,
"trial_params": null
}