poca-SoccerTwos / run_logs /timers.json
YoneShiro's picture
First Push`
ced6a15
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.277820587158203,
"min": 2.233041763305664,
"max": 2.971186637878418,
"count": 160
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 43588.375,
"min": 18071.310546875,
"max": 65700.578125,
"count": 160
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 75.64615384615385,
"min": 52.75555555555555,
"max": 156.96875,
"count": 160
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19668.0,
"min": 2788.0,
"max": 21592.0,
"count": 160
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1548.0452403303507,
"min": 1303.0682328106807,
"max": 1560.847477448936,
"count": 160
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 201245.8812429456,
"min": 21029.240688677917,
"max": 281108.6245172949,
"count": 160
},
"SoccerTwos.Step.mean": {
"value": 4999966.0,
"min": 3409917.0,
"max": 4999966.0,
"count": 160
},
"SoccerTwos.Step.sum": {
"value": 4999966.0,
"min": 3409917.0,
"max": 4999966.0,
"count": 160
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.07828344404697418,
"min": -0.025616463273763657,
"max": 0.206451416015625,
"count": 160
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 10.176847457885742,
"min": -3.7143871784210205,
"max": 28.49029541015625,
"count": 160
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.07244706898927689,
"min": -0.02666577138006687,
"max": 0.20727582275867462,
"count": 160
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 9.418119430541992,
"min": -3.866536855697632,
"max": 28.604063034057617,
"count": 160
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 160
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 160
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.1599061553294842,
"min": -0.2795611106687122,
"max": 0.47410973405416035,
"count": 160
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 20.787800192832947,
"min": -38.73800003528595,
"max": 53.57439994812012,
"count": 160
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.1599061553294842,
"min": -0.2795611106687122,
"max": 0.47410973405416035,
"count": 160
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 20.787800192832947,
"min": -38.73800003528595,
"max": 53.57439994812012,
"count": 160
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 160
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 160
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016362083276423314,
"min": 0.012747406342532486,
"max": 0.021592475559252002,
"count": 77
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016362083276423314,
"min": 0.012747406342532486,
"max": 0.021592475559252002,
"count": 77
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08703848247726759,
"min": 0.043512505541245146,
"max": 0.09584195911884308,
"count": 77
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08703848247726759,
"min": 0.043512505541245146,
"max": 0.09584195911884308,
"count": 77
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08898836001753807,
"min": 0.04558493693669637,
"max": 0.09694288621346156,
"count": 77
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08898836001753807,
"min": 0.04558493693669637,
"max": 0.09694288621346156,
"count": 77
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 77
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 77
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 77
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 77
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 77
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 77
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688136081",
"python_version": "3.9.16 (main, May 17 2023, 17:49:16) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\gava_\\miniconda3\\envs\\rl_7\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --torch-device=cuda --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1688139308"
},
"total": 3226.9689866,
"count": 1,
"self": 2.2630827999996654,
"children": {
"run_training.setup": {
"total": 0.1483901999999997,
"count": 1,
"self": 0.1483901999999997
},
"TrainerController.start_learning": {
"total": 3224.5575136,
"count": 1,
"self": 2.6613024000321275,
"children": {
"TrainerController._reset_env": {
"total": 10.684930699999972,
"count": 9,
"self": 10.684930699999972
},
"TrainerController.advance": {
"total": 3210.9817783999683,
"count": 108296,
"self": 2.7155326000065543,
"children": {
"env_step": {
"total": 2242.4152754999836,
"count": 108296,
"self": 1249.9930535000467,
"children": {
"SubprocessEnvManager._take_step": {
"total": 990.8399423999942,
"count": 108296,
"self": 20.11214710002855,
"children": {
"TorchPolicy.evaluate": {
"total": 970.7277952999657,
"count": 199626,
"self": 970.7277952999657
}
}
},
"workers": {
"total": 1.5822795999428223,
"count": 108296,
"self": 0.0,
"children": {
"worker_root": {
"total": 3216.7188293000618,
"count": 108296,
"is_parallel": true,
"self": 2240.770959000104,
"children": {
"steps_from_proto": {
"total": 0.015113399999514332,
"count": 18,
"is_parallel": true,
"self": 0.0031775999996366266,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.011935799999877705,
"count": 72,
"is_parallel": true,
"self": 0.011935799999877705
}
}
},
"UnityEnvironment.step": {
"total": 975.9327568999579,
"count": 108296,
"is_parallel": true,
"self": 56.801512999965894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 42.889611400014545,
"count": 108296,
"is_parallel": true,
"self": 42.889611400014545
},
"communicator.exchange": {
"total": 705.4799123999802,
"count": 108296,
"is_parallel": true,
"self": 705.4799123999802
},
"steps_from_proto": {
"total": 170.76172009999718,
"count": 216592,
"is_parallel": true,
"self": 36.24901950005798,
"children": {
"_process_rank_one_or_two_observation": {
"total": 134.5127005999392,
"count": 866368,
"is_parallel": true,
"self": 134.5127005999392
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 965.8509702999783,
"count": 108296,
"self": 19.738505599945142,
"children": {
"process_trajectory": {
"total": 446.5538022000351,
"count": 108296,
"self": 445.04256160003484,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5112406000002352,
"count": 4,
"self": 1.5112406000002352
}
}
},
"_update_policy": {
"total": 499.558662499998,
"count": 77,
"self": 271.1019353000052,
"children": {
"TorchPOCAOptimizer.update": {
"total": 228.45672719999277,
"count": 2310,
"self": 228.45672719999277
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.000000222324161e-06,
"count": 1,
"self": 2.000000222324161e-06
},
"TrainerController._save_models": {
"total": 0.22950009999976828,
"count": 1,
"self": 0.017689499999505642,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21181060000026264,
"count": 1,
"self": 0.21181060000026264
}
}
}
}
}
}
}