ppo-Pyramids / run_logs /timers.json
Dorian-T's picture
First Push
dada9e5 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.359508752822876,
"min": 0.34822776913642883,
"max": 1.476664423942566,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10750.75,
"min": 10413.4033203125,
"max": 44796.09375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989943.0,
"min": 29952.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989943.0,
"min": 29952.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.36237576603889465,
"min": -0.10081224888563156,
"max": 0.45428138971328735,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 95.66720581054688,
"min": -24.094127655029297,
"max": 119.47600555419922,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01337360218167305,
"min": 0.003808769164606929,
"max": 0.2585873305797577,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.5306310653686523,
"min": 0.9826624393463135,
"max": 61.36722183227539,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06900113667962356,
"min": 0.06601375369818135,
"max": 0.07342804942987025,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9660159135147297,
"min": 0.4973633598433219,
"max": 1.063147431249187,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.009818019565563274,
"min": 0.0011736706208861556,
"max": 0.014311311577676816,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13745227391788584,
"min": 0.015257718071520023,
"max": 0.20035836208747543,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.261233293907138e-06,
"min": 7.261233293907138e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010165726611469993,
"min": 0.00010165726611469993,
"max": 0.0035075933308022994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242037857142858,
"min": 0.10242037857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338853,
"min": 1.3886848,
"max": 2.5691976999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025179581928571417,
"min": 0.00025179581928571417,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003525141469999998,
"min": 0.003525141469999998,
"max": 0.11694285023,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015484215691685677,
"min": 0.015484215691685677,
"max": 0.459351122379303,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21677902340888977,
"min": 0.21677902340888977,
"max": 3.2154579162597656,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 502.53333333333336,
"min": 426.5492957746479,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30152.0,
"min": 15984.0,
"max": 32614.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2894203141331673,
"min": -1.0000000521540642,
"max": 1.504479391829056,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 76.07579853385687,
"min": -28.336401626467705,
"max": 102.3045986443758,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2894203141331673,
"min": -1.0000000521540642,
"max": 1.504479391829056,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 76.07579853385687,
"min": -28.336401626467705,
"max": 102.3045986443758,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0810127073521055,
"min": 0.07529577830540751,
"max": 9.221514103934169,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.779749733774224,
"min": 4.7717982804751955,
"max": 147.5442256629467,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1727076167",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1727079133"
},
"total": 2965.7518021469996,
"count": 1,
"self": 0.6542106919996513,
"children": {
"run_training.setup": {
"total": 0.24344144500014409,
"count": 1,
"self": 0.24344144500014409
},
"TrainerController.start_learning": {
"total": 2964.85415001,
"count": 1,
"self": 2.3378362889798154,
"children": {
"TrainerController._reset_env": {
"total": 4.025659385999916,
"count": 1,
"self": 4.025659385999916
},
"TrainerController.advance": {
"total": 2958.4080452520216,
"count": 63549,
"self": 2.48969378302354,
"children": {
"env_step": {
"total": 1964.0672352540034,
"count": 63549,
"self": 1813.4807992150809,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.23313975793144,
"count": 63549,
"self": 6.389285686954736,
"children": {
"TorchPolicy.evaluate": {
"total": 142.8438540709767,
"count": 62569,
"self": 142.8438540709767
}
}
},
"workers": {
"total": 1.3532962809911169,
"count": 63549,
"self": 0.0,
"children": {
"worker_root": {
"total": 2958.5943868020545,
"count": 63549,
"is_parallel": true,
"self": 1318.1576680431235,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004540205000012065,
"count": 1,
"is_parallel": true,
"self": 0.001743535000287011,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027966699997250544,
"count": 8,
"is_parallel": true,
"self": 0.0027966699997250544
}
}
},
"UnityEnvironment.step": {
"total": 0.0651145980000365,
"count": 1,
"is_parallel": true,
"self": 0.0007845810002891085,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005476929998167179,
"count": 1,
"is_parallel": true,
"self": 0.0005476929998167179
},
"communicator.exchange": {
"total": 0.06150889099990309,
"count": 1,
"is_parallel": true,
"self": 0.06150889099990309
},
"steps_from_proto": {
"total": 0.0022734330000275804,
"count": 1,
"is_parallel": true,
"self": 0.0004202509996957815,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001853182000331799,
"count": 8,
"is_parallel": true,
"self": 0.001853182000331799
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1640.436718758931,
"count": 63548,
"is_parallel": true,
"self": 48.05058888600365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.758519646955847,
"count": 63548,
"is_parallel": true,
"self": 28.758519646955847
},
"communicator.exchange": {
"total": 1438.4572441540472,
"count": 63548,
"is_parallel": true,
"self": 1438.4572441540472
},
"steps_from_proto": {
"total": 125.17036607192426,
"count": 63548,
"is_parallel": true,
"self": 26.89256624312202,
"children": {
"_process_rank_one_or_two_observation": {
"total": 98.27779982880224,
"count": 508384,
"is_parallel": true,
"self": 98.27779982880224
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 991.8511162149946,
"count": 63549,
"self": 4.383852298964257,
"children": {
"process_trajectory": {
"total": 157.55749755703482,
"count": 63549,
"self": 157.2563762720356,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3011212849992262,
"count": 2,
"self": 0.3011212849992262
}
}
},
"_update_policy": {
"total": 829.9097663589955,
"count": 451,
"self": 345.2641838850393,
"children": {
"TorchPPOOptimizer.update": {
"total": 484.6455824739562,
"count": 22743,
"self": 484.6455824739562
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1539996194187552e-06,
"count": 1,
"self": 1.1539996194187552e-06
},
"TrainerController._save_models": {
"total": 0.08260792899909575,
"count": 1,
"self": 0.0025676249997559353,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08004030399933981,
"count": 1,
"self": 0.08004030399933981
}
}
}
}
}
}
}