{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.882203221321106, "min": 1.8612483739852905, "max": 3.295743227005005, "count": 500 }, "SoccerTwos.Policy.Entropy.sum": { "value": 36198.53125, "min": 16666.59765625, "max": 129452.9921875, "count": 500 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 55.81609195402299, "min": 42.504347826086956, "max": 999.0, "count": 500 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19424.0, "min": 9020.0, "max": 28056.0, "count": 500 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1532.9940693279034, "min": 1192.5880962714887, "max": 1550.8519443299538, "count": 488 }, "SoccerTwos.Self-play.ELO.sum": { "value": 266740.9680630552, "min": 2385.1761925429773, "max": 350170.0594485042, "count": 488 }, "SoccerTwos.Step.mean": { "value": 4999961.0, "min": 9292.0, "max": 4999961.0, "count": 500 }, "SoccerTwos.Step.sum": { "value": 4999961.0, "min": 9292.0, "max": 4999961.0, "count": 500 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": 0.02867129258811474, "min": -0.09449267387390137, "max": 0.17719267308712006, "count": 500 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": 5.0174760818481445, "min": -18.80404281616211, "max": 21.79469871520996, "count": 500 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": 0.029419634491205215, "min": -0.0921693742275238, "max": 0.18176759779453278, "count": 500 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": 5.148436069488525, "min": -18.341705322265625, "max": 22.35741424560547, "count": 500 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 500 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 500 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": 0.09034971645900182, "min": -0.5833333333333334, "max": 0.5522971403252865, "count": 500 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": 15.811200380325317, "min": -46.8439998626709, "max": 47.87239998579025, "count": 500 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": 0.09034971645900182, "min": -0.5833333333333334, "max": 0.5522971403252865, "count": 500 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": 15.811200380325317, "min": -46.8439998626709, "max": 47.87239998579025, "count": 500 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 500 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 500 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.02000823740963824, "min": 0.012191595459201683, "max": 0.02483156641246751, "count": 239 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.02000823740963824, "min": 0.012191595459201683, "max": 0.02483156641246751, "count": 239 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.105947607755661, "min": 0.00013259289535199058, "max": 0.12320006340742111, "count": 239 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.105947607755661, "min": 0.00013259289535199058, "max": 0.12320006340742111, "count": 239 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.1071174738307794, "min": 0.00013133458499699677, "max": 0.12502761756380398, "count": 239 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.1071174738307794, "min": 0.00013133458499699677, "max": 0.12502761756380398, "count": 239 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 239 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 239 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.2, "max": 0.20000000000000007, "count": 239 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.2, "max": 0.20000000000000007, "count": 239 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005, "max": 0.005000000000000001, "count": 239 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005, "max": 0.005000000000000001, "count": 239 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1675807883", "python_version": "3.9.16 (main, Jan 11 2023, 16:05:54) \n[GCC 11.2.0]", "command_line_arguments": "/home/maciekov01/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1675833599" }, "total": 25716.332303826002, "count": 1, "self": 1.1485165980011516, "children": { "run_training.setup": { "total": 0.019997298999442137, "count": 1, "self": 0.019997298999442137 }, "TrainerController.start_learning": { "total": 25715.163789929, "count": 1, "self": 12.556267988911713, "children": { "TrainerController._reset_env": { "total": 4.2488228940128465, "count": 25, "self": 4.2488228940128465 }, "TrainerController.advance": { "total": 25697.935168447075, "count": 337697, "self": 14.221048326835444, "children": { "env_step": { "total": 9929.97184363007, "count": 337697, "self": 8141.895851522866, "children": { "SubprocessEnvManager._take_step": { "total": 1780.7801999301864, "count": 337697, "self": 86.28152512508859, "children": { "TorchPolicy.evaluate": { "total": 1694.4986748050978, "count": 633378, "self": 1694.4986748050978 } } }, "workers": { "total": 7.295792177017574, "count": 337697, "self": 0.0, "children": { "worker_root": { "total": 25679.449056064594, "count": 337697, "is_parallel": true, "self": 18919.530088860865, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.007782900001984672, "count": 2, "is_parallel": true, "self": 0.0023646000045118853, "children": { "_process_rank_one_or_two_observation": { "total": 0.005418299997472786, "count": 8, "is_parallel": true, "self": 0.005418299997472786 } } }, "UnityEnvironment.step": { "total": 0.04449399800068932, "count": 1, "is_parallel": true, "self": 0.0008400000042456668, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0012563999989652075, "count": 1, "is_parallel": true, "self": 0.0012563999989652075 }, "communicator.exchange": { "total": 0.04004709799846751, "count": 1, "is_parallel": true, "self": 0.04004709799846751 }, "steps_from_proto": { "total": 0.0023504999990109354, "count": 2, "is_parallel": true, "self": 0.0006520999995700549, "children": { "_process_rank_one_or_two_observation": { "total": 0.0016983999994408805, "count": 8, "is_parallel": true, "self": 0.0016983999994408805 } } } } } } }, "UnityEnvironment.step": { "total": 6759.849652103736, "count": 337696, "is_parallel": true, "self": 356.2722894806993, "children": { "UnityEnvironment._generate_step_input": { "total": 221.33935447104705, "count": 337696, "is_parallel": true, "self": 221.33935447104705 }, "communicator.exchange": { "total": 5135.113995884023, "count": 337696, "is_parallel": true, "self": 5135.113995884023 }, "steps_from_proto": { "total": 1047.1240122679665, "count": 675392, "is_parallel": true, "self": 211.52202593877882, "children": { "_process_rank_one_or_two_observation": { "total": 835.6019863291876, "count": 2701568, "is_parallel": true, "self": 835.6019863291876 } } } } }, "steps_from_proto": { "total": 0.06931509999230911, "count": 48, "is_parallel": true, "self": 0.014554199953636271, "children": { "_process_rank_one_or_two_observation": { "total": 0.054760900038672844, "count": 192, "is_parallel": true, "self": 0.054760900038672844 } } } } } } } } }, "trainer_advance": { "total": 15753.74227649017, "count": 337697, "self": 84.70858203272655, "children": { "process_trajectory": { "total": 2004.1265926254564, "count": 337697, "self": 1998.22110506046, "children": { "RLTrainer._checkpoint": { "total": 5.905487564996292, "count": 10, "self": 5.905487564996292 } } }, "_update_policy": { "total": 13664.907101831986, "count": 239, "self": 1074.0584044907828, "children": { "TorchPOCAOptimizer.update": { "total": 12590.848697341204, "count": 7182, "self": 12590.848697341204 } } } } } } }, "trainer_threads": { "total": 1.500004145782441e-06, "count": 1, "self": 1.500004145782441e-06 }, "TrainerController._save_models": { "total": 0.4235290989963687, "count": 1, "self": 0.004211899999063462, "children": { "RLTrainer._checkpoint": { "total": 0.41931719899730524, "count": 1, "self": 0.41931719899730524 } } } } } } }