ppo-Huggy / run_logs /timers.json
danie94-lml's picture
Huggy
69388fe
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.403355360031128,
"min": 1.403355360031128,
"max": 1.4260923862457275,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69366.453125,
"min": 67910.0703125,
"max": 75619.8828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.74280230326296,
"min": 84.29301533219761,
"max": 396.53968253968253,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49361.0,
"min": 48798.0,
"max": 50113.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999985.0,
"min": 49660.0,
"max": 1999985.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999985.0,
"min": 49660.0,
"max": 1999985.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4250643253326416,
"min": 0.053867630660533905,
"max": 2.436007261276245,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1265.883544921875,
"min": 6.733453750610352,
"max": 1409.6845703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.776759564648186,
"min": 1.8754722151756287,
"max": 3.8834007571069455,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1971.4684927463531,
"min": 234.43402689695358,
"max": 2220.1046253442764,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.776759564648186,
"min": 1.8754722151756287,
"max": 3.8834007571069455,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1971.4684927463531,
"min": 234.43402689695358,
"max": 2220.1046253442764,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014172759192943987,
"min": 0.012580829847138375,
"max": 0.019604330212799444,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04251827757883196,
"min": 0.02516165969427675,
"max": 0.05414017073635478,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055465740834673254,
"min": 0.022891575129081805,
"max": 0.060889813800652816,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16639722250401975,
"min": 0.04578315025816361,
"max": 0.18266944140195845,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.191198936300002e-06,
"min": 3.191198936300002e-06,
"max": 0.000295317676560775,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.573596808900007e-06,
"min": 9.573596808900007e-06,
"max": 0.0008440872186375999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106369999999999,
"min": 0.10106369999999999,
"max": 0.198439225,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3031911,
"min": 0.20729610000000007,
"max": 0.5813624,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.307863000000004e-05,
"min": 6.307863000000004e-05,
"max": 0.004922117327499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001892358900000001,
"min": 0.0001892358900000001,
"max": 0.014069983760000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670291381",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670293627"
},
"total": 2245.5813345630004,
"count": 1,
"self": 0.39416882500063366,
"children": {
"run_training.setup": {
"total": 0.11564255999996931,
"count": 1,
"self": 0.11564255999996931
},
"TrainerController.start_learning": {
"total": 2245.071523178,
"count": 1,
"self": 3.9346199699161843,
"children": {
"TrainerController._reset_env": {
"total": 9.976140203000114,
"count": 1,
"self": 9.976140203000114
},
"TrainerController.advance": {
"total": 2231.0339772790835,
"count": 232034,
"self": 4.098370546101705,
"children": {
"env_step": {
"total": 1749.2441268710136,
"count": 232034,
"self": 1461.0650545438252,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.57268183793485,
"count": 232034,
"self": 15.040533736917268,
"children": {
"TorchPolicy.evaluate": {
"total": 270.5321481010176,
"count": 222990,
"self": 68.02447951110094,
"children": {
"TorchPolicy.sample_actions": {
"total": 202.50766858991665,
"count": 222990,
"self": 202.50766858991665
}
}
}
}
},
"workers": {
"total": 2.6063904892534993,
"count": 232034,
"self": 0.0,
"children": {
"worker_root": {
"total": 2236.6970124829327,
"count": 232034,
"is_parallel": true,
"self": 1037.1912694109485,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019423779999669932,
"count": 1,
"is_parallel": true,
"self": 0.00033646799965936225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001605910000307631,
"count": 2,
"is_parallel": true,
"self": 0.001605910000307631
}
}
},
"UnityEnvironment.step": {
"total": 0.027788609000026554,
"count": 1,
"is_parallel": true,
"self": 0.0003039340001578239,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020561399992402585,
"count": 1,
"is_parallel": true,
"self": 0.00020561399992402585
},
"communicator.exchange": {
"total": 0.026542190000100163,
"count": 1,
"is_parallel": true,
"self": 0.026542190000100163
},
"steps_from_proto": {
"total": 0.0007368709998445411,
"count": 1,
"is_parallel": true,
"self": 0.00025190899987137527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004849619999731658,
"count": 2,
"is_parallel": true,
"self": 0.0004849619999731658
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1199.5057430719842,
"count": 232033,
"is_parallel": true,
"self": 34.593392697259105,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.8314612518941,
"count": 232033,
"is_parallel": true,
"self": 75.8314612518941
},
"communicator.exchange": {
"total": 996.321943297944,
"count": 232033,
"is_parallel": true,
"self": 996.321943297944
},
"steps_from_proto": {
"total": 92.75894582488695,
"count": 232033,
"is_parallel": true,
"self": 38.11862169909591,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.64032412579104,
"count": 464066,
"is_parallel": true,
"self": 54.64032412579104
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 477.69147986196845,
"count": 232034,
"self": 6.293100921968062,
"children": {
"process_trajectory": {
"total": 149.181536951,
"count": 232034,
"self": 148.5734042639997,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6081326870003068,
"count": 4,
"self": 0.6081326870003068
}
}
},
"_update_policy": {
"total": 322.2168419890004,
"count": 97,
"self": 267.49363706998884,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.72320491901155,
"count": 2910,
"self": 54.72320491901155
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.730000106676016e-07,
"count": 1,
"self": 7.730000106676016e-07
},
"TrainerController._save_models": {
"total": 0.1267849529999694,
"count": 1,
"self": 0.001973164999981236,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12481178799998816,
"count": 1,
"self": 0.12481178799998816
}
}
}
}
}
}
}