{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4095683097839355, "min": 1.4095683097839355, "max": 1.4320658445358276, "count": 35 }, "Huggy.Policy.Entropy.sum": { "value": 70189.453125, "min": 68865.5390625, "max": 74922.9453125, "count": 35 }, "Huggy.Environment.EpisodeLength.mean": { "value": 90.79227941176471, "min": 81.50909090909092, "max": 380.3484848484849, "count": 35 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49391.0, "min": 48852.0, "max": 50206.0, "count": 35 }, "Huggy.Step.mean": { "value": 1749969.0, "min": 49880.0, "max": 1749969.0, "count": 35 }, "Huggy.Step.sum": { "value": 1749969.0, "min": 49880.0, "max": 1749969.0, "count": 35 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4033875465393066, "min": 0.11368799954652786, "max": 2.4103121757507324, "count": 35 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1307.44287109375, "min": 14.893128395080566, "max": 1440.44580078125, "count": 35 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7260060874635683, "min": 1.8765528657054174, "max": 3.8963470876773942, "count": 35 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2026.9473115801811, "min": 245.82842540740967, "max": 2307.696560382843, "count": 35 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7260060874635683, "min": 1.8765528657054174, "max": 3.8963470876773942, "count": 35 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2026.9473115801811, "min": 245.82842540740967, "max": 2307.696560382843, "count": 35 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.017523454667146627, "min": 0.014150131892852691, "max": 0.02058650235552098, "count": 35 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.035046909334293254, "min": 0.029747261120670977, "max": 0.05135410752263851, "count": 35 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05260498753438393, "min": 0.023043013426164785, "max": 0.06755352995047967, "count": 35 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.10520997506876786, "min": 0.04608602685232957, "max": 0.18472308243314425, "count": 35 }, "Huggy.Policy.LearningRate.mean": { "value": 4.208161097282502e-05, "min": 4.208161097282502e-05, "max": 0.00029529292656902496, "count": 35 }, "Huggy.Policy.LearningRate.sum": { "value": 8.416322194565004e-05, "min": 8.416322194565004e-05, "max": 0.0008438518687160498, "count": 35 }, "Huggy.Policy.Epsilon.mean": { "value": 0.11402717499999998, "min": 0.11402717499999998, "max": 0.198430975, "count": 35 }, "Huggy.Policy.Epsilon.sum": { "value": 0.22805434999999996, "min": 0.22805434999999996, "max": 0.5812839500000001, "count": 35 }, "Huggy.Policy.Beta.mean": { "value": 0.0007099560325000002, "min": 0.0007099560325000002, "max": 0.0049217056525, "count": 35 }, "Huggy.Policy.Beta.sum": { "value": 0.0014199120650000003, "min": 0.0014199120650000003, "max": 0.014066069105, "count": 35 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 35 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 35 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1677085962", "python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1677088172" }, "total": 2210.825396496, "count": 1, "self": 0.4642907469997226, "children": { "run_training.setup": { "total": 0.12123382700002594, "count": 1, "self": 0.12123382700002594 }, "TrainerController.start_learning": { "total": 2210.239871922, "count": 1, "self": 4.107062842138021, "children": { "TrainerController._reset_env": { "total": 11.655896841000015, "count": 1, "self": 11.655896841000015 }, "TrainerController.advance": { "total": 2194.197214393862, "count": 203811, "self": 4.190427981828634, "children": { "env_step": { "total": 1717.6880179940324, "count": 203811, "self": 1431.3915638250833, "children": { "SubprocessEnvManager._take_step": { "total": 283.64588375102386, "count": 203811, "self": 14.79081214899304, "children": { "TorchPolicy.evaluate": { "total": 268.8550716020308, "count": 196322, "self": 67.21015494003183, "children": { "TorchPolicy.sample_actions": { "total": 201.644916661999, "count": 196322, "self": 201.644916661999 } } } } }, "workers": { "total": 2.6505704179252803, "count": 203811, "self": 0.0, "children": { "worker_root": { "total": 2202.5628290289264, "count": 203811, "is_parallel": true, "self": 1040.2125924219504, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002358488000027137, "count": 1, "is_parallel": true, "self": 0.0004665990000489728, "children": { "_process_rank_one_or_two_observation": { "total": 0.001891888999978164, "count": 2, "is_parallel": true, "self": 0.001891888999978164 } } }, "UnityEnvironment.step": { "total": 0.032279004999963945, "count": 1, "is_parallel": true, "self": 0.00032704299991337393, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00022117100002105872, "count": 1, "is_parallel": true, "self": 0.00022117100002105872 }, "communicator.exchange": { "total": 0.030751764000001458, "count": 1, "is_parallel": true, "self": 0.030751764000001458 }, "steps_from_proto": { "total": 0.0009790270000280543, "count": 1, "is_parallel": true, "self": 0.00047323199999027565, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005057950000377787, "count": 2, "is_parallel": true, "self": 0.0005057950000377787 } } } } } } }, "UnityEnvironment.step": { "total": 1162.350236606976, "count": 203810, "is_parallel": true, "self": 34.25846905488561, "children": { "UnityEnvironment._generate_step_input": { "total": 75.61855437401533, "count": 203810, "is_parallel": true, "self": 75.61855437401533 }, "communicator.exchange": { "total": 966.5407022150566, "count": 203810, "is_parallel": true, "self": 966.5407022150566 }, "steps_from_proto": { "total": 85.93251096301867, "count": 203810, "is_parallel": true, "self": 37.15237664406476, "children": { "_process_rank_one_or_two_observation": { "total": 48.78013431895391, "count": 407620, "is_parallel": true, "self": 48.78013431895391 } } } } } } } } } } }, "trainer_advance": { "total": 472.31876841800084, "count": 203811, "self": 6.351399798023124, "children": { "process_trajectory": { "total": 149.53663238697584, "count": 203811, "self": 148.5119096409761, "children": { "RLTrainer._checkpoint": { "total": 1.0247227459997248, "count": 8, "self": 1.0247227459997248 } } }, "_update_policy": { "total": 316.4307362330019, "count": 85, "self": 265.3727298980109, "children": { "TorchPPOOptimizer.update": { "total": 51.05800633499098, "count": 2550, "self": 51.05800633499098 } } } } } } }, "trainer_threads": { "total": 2.868000137823401e-06, "count": 1, "self": 2.868000137823401e-06 }, "TrainerController._save_models": { "total": 0.2796949769999628, "count": 1, "self": 0.0033060239998121688, "children": { "RLTrainer._checkpoint": { "total": 0.27638895300015065, "count": 1, "self": 0.27638895300015065 } } } } } } }