NielsV's picture
Init`
663574e
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6685035228729248,
"min": 1.576264500617981,
"max": 3.2957048416137695,
"count": 1125
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32782.7578125,
"min": 27321.90625,
"max": 110772.0625,
"count": 1125
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.674157303370784,
"min": 39.235772357723576,
"max": 999.0,
"count": 1125
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19464.0,
"min": 13020.0,
"max": 27040.0,
"count": 1125
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1626.8769388625344,
"min": 1193.6688393011011,
"max": 1666.2055540957253,
"count": 1122
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 289584.09511753113,
"min": 2396.1785651367168,
"max": 393239.04171339865,
"count": 1122
},
"SoccerTwos.Step.mean": {
"value": 11249973.0,
"min": 9538.0,
"max": 11249973.0,
"count": 1125
},
"SoccerTwos.Step.sum": {
"value": 11249973.0,
"min": 9538.0,
"max": 11249973.0,
"count": 1125
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.02289056032896042,
"min": -0.12461207062005997,
"max": 0.16715063154697418,
"count": 1125
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.097410202026367,
"min": -24.050128936767578,
"max": 27.579853057861328,
"count": 1125
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.024793121963739395,
"min": -0.12570492923259735,
"max": 0.1686450093984604,
"count": 1125
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.437968730926514,
"min": -22.261478424072266,
"max": 27.826427459716797,
"count": 1125
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1125
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1125
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.19148156496399607,
"min": -0.6915294116034227,
"max": 0.4790105243262492,
"count": 1125
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -34.2752001285553,
"min": -56.238800168037415,
"max": 54.51539969444275,
"count": 1125
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.19148156496399607,
"min": -0.6915294116034227,
"max": 0.4790105243262492,
"count": 1125
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -34.2752001285553,
"min": -56.238800168037415,
"max": 54.51539969444275,
"count": 1125
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1125
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1125
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016712228224302333,
"min": 0.010718649214444062,
"max": 0.026978419835601623,
"count": 544
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016712228224302333,
"min": 0.010718649214444062,
"max": 0.026978419835601623,
"count": 544
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09912082180380821,
"min": 0.0009114766047180941,
"max": 0.12794776608546574,
"count": 544
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09912082180380821,
"min": 0.0009114766047180941,
"max": 0.12794776608546574,
"count": 544
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10044970785578092,
"min": 0.0009156788029940799,
"max": 0.12996982062856358,
"count": 544
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10044970785578092,
"min": 0.0009156788029940799,
"max": 0.12996982062856358,
"count": 544
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 544
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 544
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 544
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 544
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 544
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 544
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677619977",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "F:\\Users\\Niels\\Anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1677654237"
},
"total": 34259.90498570001,
"count": 1,
"self": 0.005812200004584156,
"children": {
"run_training.setup": {
"total": 0.06886420000000015,
"count": 1,
"self": 0.06886420000000015
},
"TrainerController.start_learning": {
"total": 34259.8303093,
"count": 1,
"self": 12.870827198777988,
"children": {
"TrainerController._reset_env": {
"total": 3.21658669999282,
"count": 57,
"self": 3.21658669999282
},
"TrainerController.advance": {
"total": 34243.62446980123,
"count": 779453,
"self": 11.162809000554262,
"children": {
"env_step": {
"total": 9686.743012900537,
"count": 779453,
"self": 7489.50477690216,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2188.6810138002306,
"count": 779453,
"self": 70.00135830212639,
"children": {
"TorchPolicy.evaluate": {
"total": 2118.6796554981042,
"count": 1414136,
"self": 2118.6796554981042
}
}
},
"workers": {
"total": 8.55722219814554,
"count": 779453,
"self": 0.0,
"children": {
"worker_root": {
"total": 34229.93478920081,
"count": 779453,
"is_parallel": true,
"self": 28230.72708430112,
"children": {
"steps_from_proto": {
"total": 0.08877500000701932,
"count": 114,
"is_parallel": true,
"self": 0.015110099952523104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07366490005449622,
"count": 456,
"is_parallel": true,
"self": 0.07366490005449622
}
}
},
"UnityEnvironment.step": {
"total": 5999.118929899678,
"count": 779453,
"is_parallel": true,
"self": 342.9582357968493,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 303.02656130022547,
"count": 779453,
"is_parallel": true,
"self": 303.02656130022547
},
"communicator.exchange": {
"total": 4242.5582872005,
"count": 779453,
"is_parallel": true,
"self": 4242.5582872005
},
"steps_from_proto": {
"total": 1110.5758456021028,
"count": 1558906,
"is_parallel": true,
"self": 190.7607114994496,
"children": {
"_process_rank_one_or_two_observation": {
"total": 919.8151341026532,
"count": 6235624,
"is_parallel": true,
"self": 919.8151341026532
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 24545.718647900136,
"count": 779453,
"self": 107.36829729984674,
"children": {
"process_trajectory": {
"total": 6366.099705400297,
"count": 779453,
"self": 6363.710259900289,
"children": {
"RLTrainer._checkpoint": {
"total": 2.3894455000081507,
"count": 22,
"self": 2.3894455000081507
}
}
},
"_update_policy": {
"total": 18072.25064519999,
"count": 545,
"self": 1594.3519775001696,
"children": {
"TorchPOCAOptimizer.update": {
"total": 16477.89866769982,
"count": 16337,
"self": 16477.89866769982
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.999988156370819e-07,
"count": 1,
"self": 7.999988156370819e-07
},
"TrainerController._save_models": {
"total": 0.11842479999904754,
"count": 1,
"self": 0.001890599996841047,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11653420000220649,
"count": 1,
"self": 0.11653420000220649
}
}
}
}
}
}
}