poca-SoccerTwos / run_logs /timers.json
mazayo's picture
First Push
cfb1aad
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.708724856376648,
"min": 1.7064248323440552,
"max": 3.2957451343536377,
"count": 732
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32971.5546875,
"min": 24871.140625,
"max": 117462.03125,
"count": 732
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.86046511627907,
"min": 42.623931623931625,
"max": 999.0,
"count": 732
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19560.0,
"min": 15684.0,
"max": 24116.0,
"count": 732
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1685.0793199363181,
"min": 1193.4910462215628,
"max": 1695.0795841448596,
"count": 724
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 289833.6430290467,
"min": 2388.434782969469,
"max": 359412.9854283597,
"count": 724
},
"SoccerTwos.Step.mean": {
"value": 7319971.0,
"min": 9388.0,
"max": 7319971.0,
"count": 732
},
"SoccerTwos.Step.sum": {
"value": 7319971.0,
"min": 9388.0,
"max": 7319971.0,
"count": 732
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0049272398464381695,
"min": -0.11839887499809265,
"max": 0.28475695848464966,
"count": 732
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.8474852442741394,
"min": -18.907867431640625,
"max": 33.38478088378906,
"count": 732
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0020954858046025038,
"min": -0.12036605179309845,
"max": 0.2840828597545624,
"count": 732
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.36042356491088867,
"min": -18.86254119873047,
"max": 35.508243560791016,
"count": 732
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 732
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 732
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0034000003753706467,
"min": -0.5798500031232834,
"max": 0.7870892286300659,
"count": 732
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.5848000645637512,
"min": -61.92040002346039,
"max": 82.3407998085022,
"count": 732
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0034000003753706467,
"min": -0.5798500031232834,
"max": 0.7870892286300659,
"count": 732
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.5848000645637512,
"min": -61.92040002346039,
"max": 82.3407998085022,
"count": 732
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 732
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 732
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018750305784245334,
"min": 0.009751158613168324,
"max": 0.02430557073870053,
"count": 352
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018750305784245334,
"min": 0.009751158613168324,
"max": 0.02430557073870053,
"count": 352
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0903317871193091,
"min": 0.00022427343671248915,
"max": 0.12271761645873387,
"count": 352
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0903317871193091,
"min": 0.00022427343671248915,
"max": 0.12271761645873387,
"count": 352
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09221188401182492,
"min": 0.00023013590713768888,
"max": 0.125553181519111,
"count": 352
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09221188401182492,
"min": 0.00023013590713768888,
"max": 0.125553181519111,
"count": 352
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 352
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 352
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 352
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 352
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 352
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 352
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697628129",
"python_version": "3.9.18 (main, Oct 12 2023, 10:28:59) \n[GCC 12.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1697670855"
},
"total": 42726.52308258999,
"count": 1,
"self": 5.807829872996081,
"children": {
"run_training.setup": {
"total": 0.049611725999056944,
"count": 1,
"self": 0.049611725999056944
},
"TrainerController.start_learning": {
"total": 42720.665640991,
"count": 1,
"self": 13.592370800433855,
"children": {
"TrainerController._reset_env": {
"total": 2.942965879015901,
"count": 37,
"self": 2.942965879015901
},
"TrainerController.advance": {
"total": 42703.64870191455,
"count": 497538,
"self": 15.770513132614724,
"children": {
"env_step": {
"total": 11328.624999491014,
"count": 497538,
"self": 9004.069677347408,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2315.999565461385,
"count": 497538,
"self": 96.48767579956439,
"children": {
"TorchPolicy.evaluate": {
"total": 2219.5118896618205,
"count": 923806,
"self": 2219.5118896618205
}
}
},
"workers": {
"total": 8.555756682220817,
"count": 497538,
"self": 0.0,
"children": {
"worker_root": {
"total": 42657.193368692744,
"count": 497538,
"is_parallel": true,
"self": 37187.09496959218,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022913849988981383,
"count": 2,
"is_parallel": true,
"self": 0.0005575740015046904,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001733810997393448,
"count": 8,
"is_parallel": true,
"self": 0.001733810997393448
}
}
},
"UnityEnvironment.step": {
"total": 0.5914581790002558,
"count": 1,
"is_parallel": true,
"self": 0.0005390770002122736,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003947080003854353,
"count": 1,
"is_parallel": true,
"self": 0.0003947080003854353
},
"communicator.exchange": {
"total": 0.5887967029993888,
"count": 1,
"is_parallel": true,
"self": 0.5887967029993888
},
"steps_from_proto": {
"total": 0.0017276910002692603,
"count": 2,
"is_parallel": true,
"self": 0.0003886720005539246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013390189997153357,
"count": 8,
"is_parallel": true,
"self": 0.0013390189997153357
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5470.038002831581,
"count": 497537,
"is_parallel": true,
"self": 270.53645578644864,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 194.69159653869792,
"count": 497537,
"is_parallel": true,
"self": 194.69159653869792
},
"communicator.exchange": {
"total": 4198.232304760157,
"count": 497537,
"is_parallel": true,
"self": 4198.232304760157
},
"steps_from_proto": {
"total": 806.577645746278,
"count": 995074,
"is_parallel": true,
"self": 177.63399145457151,
"children": {
"_process_rank_one_or_two_observation": {
"total": 628.9436542917065,
"count": 3980296,
"is_parallel": true,
"self": 628.9436542917065
}
}
}
}
},
"steps_from_proto": {
"total": 0.060396268982003676,
"count": 72,
"is_parallel": true,
"self": 0.01267577093494765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.047720498047056026,
"count": 288,
"is_parallel": true,
"self": 0.047720498047056026
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 31359.253189290925,
"count": 497538,
"self": 94.78139855978588,
"children": {
"process_trajectory": {
"total": 4191.7285584602,
"count": 497538,
"self": 4185.71359065821,
"children": {
"RLTrainer._checkpoint": {
"total": 6.014967801989769,
"count": 14,
"self": 6.014967801989769
}
}
},
"_update_policy": {
"total": 27072.74323227094,
"count": 353,
"self": 1527.76111380976,
"children": {
"TorchPOCAOptimizer.update": {
"total": 25544.98211846118,
"count": 10584,
"self": 25544.98211846118
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1309966794215143e-06,
"count": 1,
"self": 1.1309966794215143e-06
},
"TrainerController._save_models": {
"total": 0.48160126600123476,
"count": 1,
"self": 0.03382706000411417,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4477742059971206,
"count": 1,
"self": 0.4477742059971206
}
}
}
}
}
}
}