{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.3079421818256378, "min": 0.3079421818256378, "max": 1.4653654098510742, "count": 37 }, "Pyramids.Policy.Entropy.sum": { "value": 9208.703125, "min": 9208.703125, "max": 44453.32421875, "count": 37 }, "Pyramids.Step.mean": { "value": 1109886.0, "min": 29904.0, "max": 1109886.0, "count": 37 }, "Pyramids.Step.sum": { "value": 1109886.0, "min": 29904.0, "max": 1109886.0, "count": 37 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.7282056212425232, "min": -0.14689964056015015, "max": 0.7282056212425232, "count": 37 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 208.99501037597656, "min": -34.815216064453125, "max": 208.99501037597656, "count": 37 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.005693688057363033, "min": -0.010266827419400215, "max": 0.2772565186023712, "count": 37 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 1.6340885162353516, "min": -2.813110828399658, "max": 65.70979309082031, "count": 37 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06502705050626813, "min": 0.06502705050626813, "max": 0.07391043653402757, "count": 37 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9103787070877538, "min": 0.5674283807558784, "max": 1.1086565480104136, "count": 37 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.013246345796644509, "min": 0.0021518852914151, "max": 0.016139538479841405, "count": 37 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.18544884115302313, "min": 0.025822623496981202, "max": 0.22595353871777968, "count": 37 }, "Pyramids.Policy.LearningRate.mean": { "value": 0.0001904964579297643, "min": 0.0001904964579297643, "max": 0.0002984034005322, "count": 37 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.0026669504110167002, "min": 0.0023872272042576, "max": 0.0041172984275672335, "count": 37 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.16349880714285714, "min": 0.16349880714285714, "max": 0.1994678, "count": 37 }, "Pyramids.Policy.Epsilon.sum": { "value": 2.2889833, "min": 1.5957424, "max": 2.872432766666667, "count": 37 }, "Pyramids.Policy.Beta.mean": { "value": 0.0063535308335714285, "min": 0.0063535308335714285, "max": 0.00994683322, "count": 37 }, "Pyramids.Policy.Beta.sum": { "value": 0.08894943167, "min": 0.07957466576, "max": 0.13725603339, "count": 37 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.010376361198723316, "min": 0.008851609192788601, "max": 0.3764885365962982, "count": 37 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.14526905119419098, "min": 0.12392253428697586, "max": 3.0119082927703857, "count": 37 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 286.50961538461536, "min": 272.875, "max": 990.0588235294117, "count": 37 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 29797.0, "min": 16831.0, "max": 32144.0, "count": 37 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.6365480582731275, "min": -0.8733412207049482, "max": 1.7104761789242426, "count": 37 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 170.20099806040525, "min": -27.456601679325104, "max": 189.43639809638262, "count": 37 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.6365480582731275, "min": -0.8733412207049482, "max": 1.7104761789242426, "count": 37 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 170.20099806040525, "min": -27.456601679325104, "max": 189.43639809638262, "count": 37 }, "Pyramids.Policy.RndReward.mean": { "value": 0.030860550049720378, "min": 0.028261452546238224, "max": 7.589311210548177, "count": 37 }, "Pyramids.Policy.RndReward.sum": { "value": 3.209497205170919, "min": 2.604007639703923, "max": 129.018290579319, "count": 37 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 37 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 37 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1660202942", "python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1660205219" }, "total": 2277.361686525, "count": 1, "self": 0.2899749149996751, "children": { "run_training.setup": { "total": 0.0420872849999796, "count": 1, "self": 0.0420872849999796 }, "TrainerController.start_learning": { "total": 2277.0296243250004, "count": 1, "self": 1.4522744300061277, "children": { "TrainerController._reset_env": { "total": 10.568040686000018, "count": 1, "self": 10.568040686000018 }, "TrainerController.advance": { "total": 2264.8508460519943, "count": 71867, "self": 1.5563831289855443, "children": { "env_step": { "total": 1481.2367061579791, "count": 71867, "self": 1365.9924279429506, "children": { "SubprocessEnvManager._take_step": { "total": 114.46112863100251, "count": 71867, "self": 5.038649549951799, "children": { "TorchPolicy.evaluate": { "total": 109.42247908105071, "count": 69874, "self": 38.21339551906328, "children": { "TorchPolicy.sample_actions": { "total": 71.20908356198743, "count": 69874, "self": 71.20908356198743 } } } } }, "workers": { "total": 0.7831495840259208, "count": 71867, "self": 0.0, "children": { "worker_root": { "total": 2272.6336747399914, "count": 71867, "is_parallel": true, "self": 1017.0100531099179, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.005756088999987696, "count": 1, "is_parallel": true, "self": 0.0045572900000365735, "children": { "_process_rank_one_or_two_observation": { "total": 0.0011987989999511228, "count": 8, "is_parallel": true, "self": 0.0011987989999511228 } } }, "UnityEnvironment.step": { "total": 0.0451832190000232, "count": 1, "is_parallel": true, "self": 0.0005898320000596868, "children": { "UnityEnvironment._generate_step_input": { "total": 0.000449232999983451, "count": 1, "is_parallel": true, "self": 0.000449232999983451 }, "communicator.exchange": { "total": 0.04247218799997654, "count": 1, "is_parallel": true, "self": 0.04247218799997654 }, "steps_from_proto": { "total": 0.001671966000003522, "count": 1, "is_parallel": true, "self": 0.000462084000048435, "children": { "_process_rank_one_or_two_observation": { "total": 0.001209881999955087, "count": 8, "is_parallel": true, "self": 0.001209881999955087 } } } } } } }, "UnityEnvironment.step": { "total": 1255.6236216300736, "count": 71866, "is_parallel": true, "self": 30.0061024970546, "children": { "UnityEnvironment._generate_step_input": { "total": 25.06209682701001, "count": 71866, "is_parallel": true, "self": 25.06209682701001 }, "communicator.exchange": { "total": 1099.315978173021, "count": 71866, "is_parallel": true, "self": 1099.315978173021 }, "steps_from_proto": { "total": 101.23944413298807, "count": 71866, "is_parallel": true, "self": 24.808650029035277, "children": { "_process_rank_one_or_two_observation": { "total": 76.43079410395279, "count": 574928, "is_parallel": true, "self": 76.43079410395279 } } } } } } } } } } }, "trainer_advance": { "total": 782.0577567650298, "count": 71867, "self": 2.7755640250568376, "children": { "process_trajectory": { "total": 179.90654594497306, "count": 71867, "self": 179.70380031997348, "children": { "RLTrainer._checkpoint": { "total": 0.20274562499957938, "count": 2, "self": 0.20274562499957938 } } }, "_update_policy": { "total": 599.375646795, "count": 517, "self": 232.01140013197562, "children": { "TorchPPOOptimizer.update": { "total": 367.36424666302435, "count": 25429, "self": 367.36424666302435 } } } } } } }, "trainer_threads": { "total": 1.7219999790540896e-06, "count": 1, "self": 1.7219999790540896e-06 }, "TrainerController._save_models": { "total": 0.1584614349999356, "count": 1, "self": 0.0022537059999194753, "children": { "RLTrainer._checkpoint": { "total": 0.15620772900001612, "count": 1, "self": 0.15620772900001612 } } } } } } }