poca-SoccerTwos / run_logs /timers.json
Segamboam's picture
First Push XD
9dc057d
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8472766876220703,
"min": 1.8125964403152466,
"max": 3.2957024574279785,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36709.08203125,
"min": 19024.939453125,
"max": 105462.4765625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 67.08108108108108,
"min": 41.99145299145299,
"max": 955.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19856.0,
"min": 3728.0,
"max": 29388.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1578.1082421814567,
"min": 1199.0014391117093,
"max": 1621.51053721304,
"count": 500
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 233560.0198428556,
"min": 2398.0028782234185,
"max": 371584.588301979,
"count": 500
},
"SoccerTwos.Step.mean": {
"value": 4999964.0,
"min": 9710.0,
"max": 4999964.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999964.0,
"min": 9710.0,
"max": 4999964.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03346812352538109,
"min": -0.10147440433502197,
"max": 0.18483717739582062,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.953282356262207,
"min": -20.19340705871582,
"max": 25.692367553710938,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.037251055240631104,
"min": -0.10180693119764328,
"max": 0.18707722425460815,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.513155937194824,
"min": -20.259578704833984,
"max": 26.003734588623047,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.21308378190607638,
"min": -0.5,
"max": 0.4011777780122227,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -31.536399722099304,
"min": -45.79879975318909,
"max": 61.599599957466125,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.21308378190607638,
"min": -0.5,
"max": 0.4011777780122227,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -31.536399722099304,
"min": -45.79879975318909,
"max": 61.599599957466125,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017255713748939645,
"min": 0.011526322570474198,
"max": 0.02349549486146619,
"count": 241
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017255713748939645,
"min": 0.011526322570474198,
"max": 0.02349549486146619,
"count": 241
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09383611952265104,
"min": 0.0006294909268035554,
"max": 0.12253104994694392,
"count": 241
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09383611952265104,
"min": 0.0006294909268035554,
"max": 0.12253104994694392,
"count": 241
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09437257324655851,
"min": 0.000640032676650056,
"max": 0.12375060990452766,
"count": 241
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09437257324655851,
"min": 0.000640032676650056,
"max": 0.12375060990452766,
"count": 241
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 241
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 241
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 241
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 241
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 241
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 241
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675455279",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos --run-id=SoccerTwos1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675465546"
},
"total": 10266.897011236,
"count": 1,
"self": 0.38967903400043724,
"children": {
"run_training.setup": {
"total": 0.09854491800001597,
"count": 1,
"self": 0.09854491800001597
},
"TrainerController.start_learning": {
"total": 10266.408787284,
"count": 1,
"self": 6.86918512520424,
"children": {
"TrainerController._reset_env": {
"total": 6.544008632001805,
"count": 25,
"self": 6.544008632001805
},
"TrainerController.advance": {
"total": 10252.735253358795,
"count": 342937,
"self": 7.650966929882998,
"children": {
"env_step": {
"total": 7698.2844725632685,
"count": 342937,
"self": 6009.374341474902,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1684.922113751939,
"count": 342937,
"self": 48.45838916502498,
"children": {
"TorchPolicy.evaluate": {
"total": 1636.463724586914,
"count": 629196,
"self": 321.2006332662877,
"children": {
"TorchPolicy.sample_actions": {
"total": 1315.2630913206262,
"count": 629196,
"self": 1315.2630913206262
}
}
}
}
},
"workers": {
"total": 3.9880173364276743,
"count": 342937,
"self": 0.0,
"children": {
"worker_root": {
"total": 10246.81622835373,
"count": 342937,
"is_parallel": true,
"self": 5138.324095843226,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028940809997948236,
"count": 2,
"is_parallel": true,
"self": 0.0008200769993891299,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020740040004056937,
"count": 8,
"is_parallel": true,
"self": 0.0020740040004056937
}
}
},
"UnityEnvironment.step": {
"total": 0.08233596399986709,
"count": 1,
"is_parallel": true,
"self": 0.003028599999652215,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006843710000339343,
"count": 1,
"is_parallel": true,
"self": 0.0006843710000339343
},
"communicator.exchange": {
"total": 0.07060301000001346,
"count": 1,
"is_parallel": true,
"self": 0.07060301000001346
},
"steps_from_proto": {
"total": 0.008019983000167485,
"count": 2,
"is_parallel": true,
"self": 0.005617966000045271,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002402017000122214,
"count": 8,
"is_parallel": true,
"self": 0.002402017000122214
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5108.443098561501,
"count": 342936,
"is_parallel": true,
"self": 285.5037480975352,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 202.70484117832802,
"count": 342936,
"is_parallel": true,
"self": 202.70484117832802
},
"communicator.exchange": {
"total": 3658.152320977799,
"count": 342936,
"is_parallel": true,
"self": 3658.152320977799
},
"steps_from_proto": {
"total": 962.0821883078388,
"count": 685872,
"is_parallel": true,
"self": 195.61923662575896,
"children": {
"_process_rank_one_or_two_observation": {
"total": 766.4629516820798,
"count": 2743488,
"is_parallel": true,
"self": 766.4629516820798
}
}
}
}
},
"steps_from_proto": {
"total": 0.04903394900247804,
"count": 48,
"is_parallel": true,
"self": 0.010349801998245312,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03868414700423273,
"count": 192,
"is_parallel": true,
"self": 0.03868414700423273
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2546.799813865643,
"count": 342937,
"self": 47.23491536578058,
"children": {
"process_trajectory": {
"total": 1121.255497017871,
"count": 342937,
"self": 1118.9879654338727,
"children": {
"RLTrainer._checkpoint": {
"total": 2.267531583998334,
"count": 10,
"self": 2.267531583998334
}
}
},
"_update_policy": {
"total": 1378.3094014819917,
"count": 241,
"self": 846.4225084789098,
"children": {
"TorchPOCAOptimizer.update": {
"total": 531.8868930030819,
"count": 7239,
"self": 531.8868930030819
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2389991752570495e-06,
"count": 1,
"self": 1.2389991752570495e-06
},
"TrainerController._save_models": {
"total": 0.2603389289997722,
"count": 1,
"self": 0.0017856539998319931,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2585532749999402,
"count": 1,
"self": 0.2585532749999402
}
}
}
}
}
}
}