RYOBEAR's picture
First Push
527f49f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5294710397720337,
"min": 1.4700586795806885,
"max": 1.5294710397720337,
"count": 9
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28827.470703125,
"min": 11687.9130859375,
"max": 33734.49609375,
"count": 9
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 76.1076923076923,
"min": 46.774193548387096,
"max": 78.65573770491804,
"count": 9
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19788.0,
"min": 5800.0,
"max": 20260.0,
"count": 9
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1205.9667852559141,
"min": 1197.7324490298895,
"max": 1210.9597691499735,
"count": 9
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 156775.68208326882,
"min": 74778.19615188657,
"max": 201019.3216788956,
"count": 9
},
"SoccerTwos.Step.mean": {
"value": 35099966.0,
"min": 35019956.0,
"max": 35099966.0,
"count": 9
},
"SoccerTwos.Step.sum": {
"value": 35099966.0,
"min": 35019956.0,
"max": 35099966.0,
"count": 9
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.040143996477127075,
"min": -0.018500762060284615,
"max": 0.06616511940956116,
"count": 9
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 5.17857551574707,
"min": -2.9231202602386475,
"max": 9.4616117477417,
"count": 9
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03842344135046005,
"min": -0.016103558242321014,
"max": 0.07143042981624603,
"count": 9
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 4.9566240310668945,
"min": -2.5443620681762695,
"max": 9.094518661499023,
"count": 9
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 9
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 9
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.19893333523772483,
"min": -0.21252658321887632,
"max": 0.24854753740498278,
"count": 9
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 25.662400245666504,
"min": -33.57920014858246,
"max": 25.662400245666504,
"count": 9
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.19893333523772483,
"min": -0.21252658321887632,
"max": 0.24854753740498278,
"count": 9
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 25.662400245666504,
"min": -33.57920014858246,
"max": 25.662400245666504,
"count": 9
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 9
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 9
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01885766123208062,
"min": 0.016385537358776976,
"max": 0.01885766123208062,
"count": 4
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01885766123208062,
"min": 0.016385537358776976,
"max": 0.01885766123208062,
"count": 4
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08712201341986656,
"min": 0.08712201341986656,
"max": 0.10357063959042231,
"count": 4
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08712201341986656,
"min": 0.08712201341986656,
"max": 0.10357063959042231,
"count": 4
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08830761685967445,
"min": 0.08830761685967445,
"max": 0.1051780936618646,
"count": 4
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08830761685967445,
"min": 0.08830761685967445,
"max": 0.1051780936618646,
"count": 4
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 4
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 4
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 4
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 4
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 4
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683020605",
"python_version": "3.9.5 (default, Jun 6 2021, 11:13:51) \n[Clang 12.0.5 (clang-1205.0.22.9)]",
"command_line_arguments": "/Users/main/.pyenv/versions/3.9.5/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos-v1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1683020942"
},
"total": 337.611078125,
"count": 1,
"self": 0.6461438340000427,
"children": {
"run_training.setup": {
"total": 0.029269790999999934,
"count": 1,
"self": 0.029269790999999934
},
"TrainerController.start_learning": {
"total": 336.9356645,
"count": 1,
"self": 0.07367294999943397,
"children": {
"TrainerController._reset_env": {
"total": 2.335710374,
"count": 2,
"self": 2.335710374
},
"TrainerController.advance": {
"total": 334.39428400900056,
"count": 5711,
"self": 0.0652968649989134,
"children": {
"env_step": {
"total": 67.37632123500114,
"count": 5711,
"self": 54.765413246001586,
"children": {
"SubprocessEnvManager._take_step": {
"total": 12.561572899000003,
"count": 5711,
"self": 0.313488940998111,
"children": {
"TorchPolicy.evaluate": {
"total": 12.248083958001892,
"count": 10454,
"self": 12.248083958001892
}
}
},
"workers": {
"total": 0.04933508999955105,
"count": 5711,
"self": 0.0,
"children": {
"worker_root": {
"total": 334.38130617399923,
"count": 5711,
"is_parallel": true,
"self": 287.7031886710009,
"children": {
"steps_from_proto": {
"total": 0.002902042999999299,
"count": 4,
"is_parallel": true,
"self": 0.0005716699999984698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023303730000008294,
"count": 16,
"is_parallel": true,
"self": 0.0023303730000008294
}
}
},
"UnityEnvironment.step": {
"total": 46.675215459998356,
"count": 5711,
"is_parallel": true,
"self": 2.413774209998664,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.5073794849998658,
"count": 5711,
"is_parallel": true,
"self": 1.5073794849998658
},
"communicator.exchange": {
"total": 35.81329271099928,
"count": 5711,
"is_parallel": true,
"self": 35.81329271099928
},
"steps_from_proto": {
"total": 6.9407690540005476,
"count": 11422,
"is_parallel": true,
"self": 1.1925039910013648,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5.748265062999183,
"count": 45688,
"is_parallel": true,
"self": 5.748265062999183
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 266.95266590900053,
"count": 5711,
"self": 0.5841022219997853,
"children": {
"process_trajectory": {
"total": 34.149873186000775,
"count": 5711,
"self": 34.149873186000775
},
"_update_policy": {
"total": 232.21869050099997,
"count": 4,
"self": 10.378228367999952,
"children": {
"TorchPOCAOptimizer.update": {
"total": 221.84046213300002,
"count": 120,
"self": 221.84046213300002
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.999999987376214e-07,
"count": 1,
"self": 4.999999987376214e-07
},
"TrainerController._save_models": {
"total": 0.1319966670000099,
"count": 1,
"self": 0.0005956249999599095,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13140104200004998,
"count": 1,
"self": 0.13140104200004998
}
}
}
}
}
}
}