pyramids / run_logs /timers.json
hythyt's picture
yup
f71f247 verified
raw
history blame
18.7 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3167525827884674,
"min": 0.3167525827884674,
"max": 1.3282514810562134,
"count": 45
},
"Pyramids.Policy.Entropy.sum": {
"value": 9497.509765625,
"min": 9497.509765625,
"max": 40293.8359375,
"count": 45
},
"Pyramids.Step.mean": {
"value": 1349934.0,
"min": 29999.0,
"max": 1349934.0,
"count": 45
},
"Pyramids.Step.sum": {
"value": 1349934.0,
"min": 29999.0,
"max": 1349934.0,
"count": 45
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7149131298065186,
"min": -0.09940466284751892,
"max": 0.7285493612289429,
"count": 45
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 205.8949737548828,
"min": -23.956523895263672,
"max": 210.944091796875,
"count": 45
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013993170112371445,
"min": -0.03173583000898361,
"max": 0.5168747305870056,
"count": 45
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.030033111572266,
"min": -8.981240272521973,
"max": 123.01618957519531,
"count": 45
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0679142276279717,
"min": 0.06605978302244615,
"max": 0.07343552512609666,
"count": 45
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9507991867916038,
"min": 0.5080930966838403,
"max": 1.0550689952603232,
"count": 45
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014197500764220465,
"min": 0.0005210101510204605,
"max": 0.01764622042052603,
"count": 45
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1987650106990865,
"min": 0.0072941421142864475,
"max": 0.24704708588736443,
"count": 45
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00016654085877210956,
"min": 0.00016654085877210956,
"max": 0.00029841212910071906,
"count": 45
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.002331572022809534,
"min": 0.0020888849037050336,
"max": 0.0040527834490722,
"count": 45
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1555136047619048,
"min": 0.1555136047619048,
"max": 0.19947070952380955,
"count": 45
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.177190466666667,
"min": 1.3962949666666669,
"max": 2.7523924666666666,
"count": 45
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0055558091157142855,
"min": 0.0055558091157142855,
"max": 0.009947123881428571,
"count": 45
},
"Pyramids.Policy.Beta.sum": {
"value": 0.07778132762,
"min": 0.06962986717,
"max": 0.13509768722,
"count": 45
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008170568384230137,
"min": 0.006660111714154482,
"max": 0.4152999520301819,
"count": 45
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11438795924186707,
"min": 0.09324156492948532,
"max": 2.907099723815918,
"count": 45
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 274.5,
"min": 254.94017094017093,
"max": 998.75,
"count": 45
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30195.0,
"min": 16670.0,
"max": 33168.0,
"count": 45
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.690837823055886,
"min": -0.9370188026223332,
"max": 1.743905157364648,
"count": 45
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 187.68299835920334,
"min": -29.98460168391466,
"max": 202.29299825429916,
"count": 45
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.690837823055886,
"min": -0.9370188026223332,
"max": 1.743905157364648,
"count": 45
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 187.68299835920334,
"min": -29.98460168391466,
"max": 202.29299825429916,
"count": 45
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02314725231770206,
"min": 0.01985042568163148,
"max": 7.671826935866299,
"count": 45
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.5693450072649284,
"min": 2.0758601336710854,
"max": 130.4210579097271,
"count": 45
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 45
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 45
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708452728",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1708455722"
},
"total": 2994.21582929,
"count": 1,
"self": 0.5800379889997203,
"children": {
"run_training.setup": {
"total": 0.0690948589999607,
"count": 1,
"self": 0.0690948589999607
},
"TrainerController.start_learning": {
"total": 2993.566696442,
"count": 1,
"self": 1.9040499141351575,
"children": {
"TrainerController._reset_env": {
"total": 2.817332537999846,
"count": 1,
"self": 2.817332537999846
},
"TrainerController.advance": {
"total": 2988.8451917208654,
"count": 87175,
"self": 1.9733178476390094,
"children": {
"env_step": {
"total": 2145.1048334829934,
"count": 87175,
"self": 1962.0632417189154,
"children": {
"SubprocessEnvManager._take_step": {
"total": 181.86222587105794,
"count": 87175,
"self": 6.596621490120015,
"children": {
"TorchPolicy.evaluate": {
"total": 175.26560438093793,
"count": 84737,
"self": 175.26560438093793
}
}
},
"workers": {
"total": 1.1793658930200763,
"count": 87174,
"self": 0.0,
"children": {
"worker_root": {
"total": 2986.769386699989,
"count": 87174,
"is_parallel": true,
"self": 1187.7653881350388,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003026042999863421,
"count": 1,
"is_parallel": true,
"self": 0.0008700200005478109,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00215602299931561,
"count": 8,
"is_parallel": true,
"self": 0.00215602299931561
}
}
},
"UnityEnvironment.step": {
"total": 0.04870142700019642,
"count": 1,
"is_parallel": true,
"self": 0.000580281000566174,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005262729996502458,
"count": 1,
"is_parallel": true,
"self": 0.0005262729996502458
},
"communicator.exchange": {
"total": 0.045961736000208475,
"count": 1,
"is_parallel": true,
"self": 0.045961736000208475
},
"steps_from_proto": {
"total": 0.0016331369997715228,
"count": 1,
"is_parallel": true,
"self": 0.00037260799945215695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012605290003193659,
"count": 8,
"is_parallel": true,
"self": 0.0012605290003193659
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1799.00399856495,
"count": 87173,
"is_parallel": true,
"self": 47.62089760097251,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 35.165474063924194,
"count": 87173,
"is_parallel": true,
"self": 35.165474063924194
},
"communicator.exchange": {
"total": 1576.1375383689979,
"count": 87173,
"is_parallel": true,
"self": 1576.1375383689979
},
"steps_from_proto": {
"total": 140.08008853105548,
"count": 87173,
"is_parallel": true,
"self": 28.773405101110257,
"children": {
"_process_rank_one_or_two_observation": {
"total": 111.30668342994522,
"count": 697384,
"is_parallel": true,
"self": 111.30668342994522
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 841.767040390233,
"count": 87174,
"self": 3.7603928002386056,
"children": {
"process_trajectory": {
"total": 175.2177874980107,
"count": 87174,
"self": 174.9652226670105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2525648310001998,
"count": 2,
"self": 0.2525648310001998
}
}
},
"_update_policy": {
"total": 662.7888600919837,
"count": 623,
"self": 386.84931964294356,
"children": {
"TorchPPOOptimizer.update": {
"total": 275.9395404490401,
"count": 30858,
"self": 275.9395404490401
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3859998944099061e-06,
"count": 1,
"self": 1.3859998944099061e-06
},
"TrainerController._save_models": {
"total": 0.0001208829999086447,
"count": 1,
"self": 2.2500000341096893e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 9.838299956754781e-05,
"count": 1,
"self": 9.838299956754781e-05
}
}
}
}
}
}
}