poca-SoccerTwos / run_logs /timers.json
kaljr's picture
First Push
e6b9473 verified
raw
history blame
15.5 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.821158528327942,
"min": 1.7865797281265259,
"max": 3.2957515716552734,
"count": 784
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36073.5078125,
"min": 11908.9765625,
"max": 110369.0,
"count": 784
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 70.05555555555556,
"min": 42.6140350877193,
"max": 999.0,
"count": 784
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20176.0,
"min": 13372.0,
"max": 28728.0,
"count": 784
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1651.730870948647,
"min": 1194.04761563103,
"max": 1661.041047044523,
"count": 725
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 237849.24541660515,
"min": 2389.598123961062,
"max": 369318.25076768815,
"count": 725
},
"SoccerTwos.Step.mean": {
"value": 7839940.0,
"min": 9384.0,
"max": 7839940.0,
"count": 784
},
"SoccerTwos.Step.sum": {
"value": 7839940.0,
"min": 9384.0,
"max": 7839940.0,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.00606311671435833,
"min": -0.11393260955810547,
"max": 0.14806979894638062,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.8730888366699219,
"min": -20.507869720458984,
"max": 23.903762817382812,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0034142599906772375,
"min": -0.11152000725269318,
"max": 0.1510571390390396,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.4916534423828125,
"min": -20.07360076904297,
"max": 24.164939880371094,
"count": 784
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 784
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.05976111276282205,
"min": -0.5,
"max": 0.4247749907275041,
"count": 784
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -8.605600237846375,
"min": -59.54660004377365,
"max": 59.76800012588501,
"count": 784
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.05976111276282205,
"min": -0.5,
"max": 0.4247749907275041,
"count": 784
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -8.605600237846375,
"min": -59.54660004377365,
"max": 59.76800012588501,
"count": 784
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 784
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 784
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017344167526850165,
"min": 0.01151408407992373,
"max": 0.024683230219913335,
"count": 376
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017344167526850165,
"min": 0.01151408407992373,
"max": 0.024683230219913335,
"count": 376
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1019140084584554,
"min": 1.3814547846398759e-07,
"max": 0.12279464155435563,
"count": 376
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1019140084584554,
"min": 1.3814547846398759e-07,
"max": 0.12279464155435563,
"count": 376
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10396987472971281,
"min": 1.535417389675331e-07,
"max": 0.12530338714520137,
"count": 376
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10396987472971281,
"min": 1.535417389675331e-07,
"max": 0.12530338714520137,
"count": 376
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 376
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 376
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 376
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 376
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 376
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 376
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708651488",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\kenny\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1708669310"
},
"total": 17821.3719575,
"count": 1,
"self": 2.6133588000084274,
"children": {
"run_training.setup": {
"total": 0.15957779996097088,
"count": 1,
"self": 0.15957779996097088
},
"TrainerController.start_learning": {
"total": 17818.59902090003,
"count": 1,
"self": 13.789283990859985,
"children": {
"TrainerController._reset_env": {
"total": 12.517377799842507,
"count": 40,
"self": 12.517377799842507
},
"TrainerController.advance": {
"total": 17792.064280409308,
"count": 532414,
"self": 14.279852897278033,
"children": {
"env_step": {
"total": 13226.733587019728,
"count": 532414,
"self": 7462.81779612886,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5755.805586011498,
"count": 532414,
"self": 99.81591828144155,
"children": {
"TorchPolicy.evaluate": {
"total": 5655.989667730057,
"count": 992420,
"self": 5655.989667730057
}
}
},
"workers": {
"total": 8.110204879369121,
"count": 532413,
"self": 0.0,
"children": {
"worker_root": {
"total": 17792.416428375815,
"count": 532413,
"is_parallel": true,
"self": 11909.83916948532,
"children": {
"steps_from_proto": {
"total": 0.07088090002071112,
"count": 80,
"is_parallel": true,
"self": 0.014594499720260501,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05628640030045062,
"count": 320,
"is_parallel": true,
"self": 0.05628640030045062
}
}
},
"UnityEnvironment.step": {
"total": 5882.506377990474,
"count": 532413,
"is_parallel": true,
"self": 288.9347997960285,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 218.1922739962465,
"count": 532413,
"is_parallel": true,
"self": 218.1922739962465
},
"communicator.exchange": {
"total": 4469.835122104443,
"count": 532413,
"is_parallel": true,
"self": 4469.835122104443
},
"steps_from_proto": {
"total": 905.5441820937558,
"count": 1064826,
"is_parallel": true,
"self": 188.35595930245472,
"children": {
"_process_rank_one_or_two_observation": {
"total": 717.1882227913011,
"count": 4259304,
"is_parallel": true,
"self": 717.1882227913011
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4551.050840492302,
"count": 532413,
"self": 99.70117697620299,
"children": {
"process_trajectory": {
"total": 2195.025824516779,
"count": 532413,
"self": 2191.6383199165575,
"children": {
"RLTrainer._checkpoint": {
"total": 3.387504600221291,
"count": 15,
"self": 3.387504600221291
}
}
},
"_update_policy": {
"total": 2256.3238389993203,
"count": 376,
"self": 1265.4843034012592,
"children": {
"TorchPOCAOptimizer.update": {
"total": 990.839535598061,
"count": 11283,
"self": 990.839535598061
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100008375942707e-06,
"count": 1,
"self": 1.100008375942707e-06
},
"TrainerController._save_models": {
"total": 0.22807760001160204,
"count": 1,
"self": 0.007860900019295514,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22021669999230653,
"count": 1,
"self": 0.22021669999230653
}
}
}
}
}
}
}