{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.404165267944336, "min": 1.404165267944336, "max": 1.4292501211166382, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69118.6328125, "min": 68039.9453125, "max": 77934.890625, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 89.28108108108108, "min": 86.70877192982456, "max": 387.4573643410853, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49551.0, "min": 48867.0, "max": 50158.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999397.0, "min": 49699.0, "max": 1999397.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999397.0, "min": 49699.0, "max": 1999397.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.3954017162323, "min": 0.057206615805625916, "max": 2.4433817863464355, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1329.447998046875, "min": 7.322446823120117, "max": 1392.5416259765625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.675060738851358, "min": 1.8111433521844447, "max": 3.8081099381501025, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2039.6587100625038, "min": 231.82634907960892, "max": 2150.6619777083397, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.675060738851358, "min": 1.8111433521844447, "max": 3.8081099381501025, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2039.6587100625038, "min": 231.82634907960892, "max": 2150.6619777083397, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.015331682759278919, "min": 0.013081209104469357, "max": 0.021642544624288954, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.04599504827783676, "min": 0.026162418208938713, "max": 0.05902238560684055, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05913079790771008, "min": 0.020800542024274667, "max": 0.060055038664076056, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.17739239372313023, "min": 0.041601084048549335, "max": 0.18016511599222818, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.229148923649999e-06, "min": 3.229148923649999e-06, "max": 0.0002953014015662, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.687446770949997e-06, "min": 9.687446770949997e-06, "max": 0.0008439162186945996, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10107634999999998, "min": 0.10107634999999998, "max": 0.19843379999999994, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30322904999999994, "min": 0.20729135000000004, "max": 0.5813054, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.370986500000002e-05, "min": 6.370986500000002e-05, "max": 0.004921846619999999, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00019112959500000007, "min": 0.00019112959500000007, "max": 0.01406713946, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1679577774", "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1679580495" }, "total": 2721.405870816, "count": 1, "self": 0.7945675589999155, "children": { "run_training.setup": { "total": 0.1904387189999852, "count": 1, "self": 0.1904387189999852 }, "TrainerController.start_learning": { "total": 2720.4208645380004, "count": 1, "self": 5.526476119061044, "children": { "TrainerController._reset_env": { "total": 8.63486167799988, "count": 1, "self": 8.63486167799988 }, "TrainerController.advance": { "total": 2706.0586629089394, "count": 231790, "self": 6.20476866591207, "children": { "env_step": { "total": 2139.9866529060378, "count": 231790, "self": 1795.6445440889697, "children": { "SubprocessEnvManager._take_step": { "total": 340.86516089401994, "count": 231790, "self": 20.817550518105236, "children": { "TorchPolicy.evaluate": { "total": 320.0476103759147, "count": 222862, "self": 320.0476103759147 } } }, "workers": { "total": 3.476947923048101, "count": 231790, "self": 0.0, "children": { "worker_root": { "total": 2710.7829474949854, "count": 231790, "is_parallel": true, "self": 1257.7372628308185, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0044084559999646444, "count": 1, "is_parallel": true, "self": 0.002654191999909017, "children": { "_process_rank_one_or_two_observation": { "total": 0.0017542640000556275, "count": 2, "is_parallel": true, "self": 0.0017542640000556275 } } }, "UnityEnvironment.step": { "total": 0.03053193599998849, "count": 1, "is_parallel": true, "self": 0.0003655560001334379, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0002679429999261629, "count": 1, "is_parallel": true, "self": 0.0002679429999261629 }, "communicator.exchange": { "total": 0.02918125299993335, "count": 1, "is_parallel": true, "self": 0.02918125299993335 }, "steps_from_proto": { "total": 0.0007171839999955409, "count": 1, "is_parallel": true, "self": 0.000220244999809438, "children": { "_process_rank_one_or_two_observation": { "total": 0.0004969390001861029, "count": 2, "is_parallel": true, "self": 0.0004969390001861029 } } } } } } }, "UnityEnvironment.step": { "total": 1453.0456846641669, "count": 231789, "is_parallel": true, "self": 42.55114270432205, "children": { "UnityEnvironment._generate_step_input": { "total": 88.03504920594196, "count": 231789, "is_parallel": true, "self": 88.03504920594196 }, "communicator.exchange": { "total": 1221.4810073479564, "count": 231789, "is_parallel": true, "self": 1221.4810073479564 }, "steps_from_proto": { "total": 100.97848540594646, "count": 231789, "is_parallel": true, "self": 39.38525327408888, "children": { "_process_rank_one_or_two_observation": { "total": 61.593232131857576, "count": 463578, "is_parallel": true, "self": 61.593232131857576 } } } } } } } } } } }, "trainer_advance": { "total": 559.8672413369895, "count": 231790, "self": 8.60280852002279, "children": { "process_trajectory": { "total": 161.29991670196455, "count": 231790, "self": 159.73008924896544, "children": { "RLTrainer._checkpoint": { "total": 1.5698274529991068, "count": 10, "self": 1.5698274529991068 } } }, "_update_policy": { "total": 389.9645161150022, "count": 97, "self": 327.51923837501363, "children": { "TorchPPOOptimizer.update": { "total": 62.44527773998857, "count": 2910, "self": 62.44527773998857 } } } } } } }, "trainer_threads": { "total": 1.2729997251881287e-06, "count": 1, "self": 1.2729997251881287e-06 }, "TrainerController._save_models": { "total": 0.20086255900059768, "count": 1, "self": 0.0029128910000508768, "children": { "RLTrainer._checkpoint": { "total": 0.1979496680005468, "count": 1, "self": 0.1979496680005468 } } } } } } }