poca-SoccerTwos / run_logs /timers.json
rootacess's picture
First Push
65028e6
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6352548599243164,
"min": 1.5858198404312134,
"max": 1.8710861206054688,
"count": 376
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32600.44140625,
"min": 26645.12109375,
"max": 40594.4296875,
"count": 376
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.144329896907216,
"min": 39.395161290322584,
"max": 72.3529411764706,
"count": 376
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19456.0,
"min": 13776.0,
"max": 20848.0,
"count": 376
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1602.888870935971,
"min": 1589.7784550849465,
"max": 1643.6332130258231,
"count": 376
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 310960.4409615784,
"min": 217664.007909227,
"max": 410588.5659338291,
"count": 376
},
"SoccerTwos.Step.mean": {
"value": 9999742.0,
"min": 6249998.0,
"max": 9999742.0,
"count": 376
},
"SoccerTwos.Step.sum": {
"value": 9999742.0,
"min": 6249998.0,
"max": 9999742.0,
"count": 376
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06604859232902527,
"min": -0.10717771202325821,
"max": 0.08588841557502747,
"count": 376
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -12.879475593566895,
"min": -20.899654388427734,
"max": 21.27772331237793,
"count": 376
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06388282030820847,
"min": -0.10746608674526215,
"max": 0.08911468833684921,
"count": 376
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -12.457149505615234,
"min": -20.955886840820312,
"max": 22.10044288635254,
"count": 376
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 376
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 376
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.026838973852304313,
"min": -0.2997705258821186,
"max": 0.20987659661059685,
"count": 376
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 5.233599901199341,
"min": -56.95639991760254,
"max": 40.03939998149872,
"count": 376
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.026838973852304313,
"min": -0.2997705258821186,
"max": 0.20987659661059685,
"count": 376
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 5.233599901199341,
"min": -56.95639991760254,
"max": 40.03939998149872,
"count": 376
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 376
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 376
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016824819935330502,
"min": 0.010986334524932317,
"max": 0.022639802926278207,
"count": 182
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016824819935330502,
"min": 0.010986334524932317,
"max": 0.022639802926278207,
"count": 182
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1037929154932499,
"min": 0.0973580169181029,
"max": 0.13210416510701178,
"count": 182
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1037929154932499,
"min": 0.0973580169181029,
"max": 0.13210416510701178,
"count": 182
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10495370825131735,
"min": 0.09941927989323934,
"max": 0.13449397459626197,
"count": 182
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10495370825131735,
"min": 0.09941927989323934,
"max": 0.13449397459626197,
"count": 182
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 182
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 182
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 182
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 182
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 182
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 182
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675368422",
"python_version": "3.9.16 (main, Jan 11 2023, 10:02:19) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/swayam/miniconda3/envs/aivsai/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1675389355"
},
"total": 20933.09908675,
"count": 1,
"self": 0.2547062500016182,
"children": {
"run_training.setup": {
"total": 0.018279332999999953,
"count": 1,
"self": 0.018279332999999953
},
"TrainerController.start_learning": {
"total": 20932.826101167,
"count": 1,
"self": 3.9952369969541905,
"children": {
"TrainerController._reset_env": {
"total": 3.0492747479994047,
"count": 20,
"self": 3.0492747479994047
},
"TrainerController.advance": {
"total": 20925.66328925605,
"count": 264705,
"self": 3.487712746646139,
"children": {
"env_step": {
"total": 16740.51882920255,
"count": 264705,
"self": 16159.525152573467,
"children": {
"SubprocessEnvManager._take_step": {
"total": 578.3572410770118,
"count": 264705,
"self": 15.895055308947349,
"children": {
"TorchPolicy.evaluate": {
"total": 562.4621857680645,
"count": 470956,
"self": 562.4621857680645
}
}
},
"workers": {
"total": 2.6364355520686753,
"count": 264705,
"self": 0.0,
"children": {
"worker_root": {
"total": 20923.958696107245,
"count": 264705,
"is_parallel": true,
"self": 5269.333188276369,
"children": {
"steps_from_proto": {
"total": 0.048249625000226226,
"count": 40,
"is_parallel": true,
"self": 0.005078376988491007,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04317124801173522,
"count": 160,
"is_parallel": true,
"self": 0.04317124801173522
}
}
},
"UnityEnvironment.step": {
"total": 15654.577258205876,
"count": 264705,
"is_parallel": true,
"self": 42.89317328836478,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 280.45364911746367,
"count": 264705,
"is_parallel": true,
"self": 280.45364911746367
},
"communicator.exchange": {
"total": 14708.78579793911,
"count": 264705,
"is_parallel": true,
"self": 14708.78579793911
},
"steps_from_proto": {
"total": 622.4446378609377,
"count": 529410,
"is_parallel": true,
"self": 65.03145552253682,
"children": {
"_process_rank_one_or_two_observation": {
"total": 557.4131823384009,
"count": 2117640,
"is_parallel": true,
"self": 557.4131823384009
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4181.656747306854,
"count": 264705,
"self": 25.074067883005227,
"children": {
"process_trajectory": {
"total": 1009.6312764168599,
"count": 264705,
"self": 1008.656022584857,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9752538320028634,
"count": 8,
"self": 0.9752538320028634
}
}
},
"_update_policy": {
"total": 3146.9514030069895,
"count": 182,
"self": 470.07831389695184,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2676.8730891100377,
"count": 5460,
"self": 2676.8730891100377
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.250011210795492e-07,
"count": 1,
"self": 6.250011210795492e-07
},
"TrainerController._save_models": {
"total": 0.11829954099812312,
"count": 1,
"self": 0.0010015820007538423,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11729795899736928,
"count": 1,
"self": 0.11729795899736928
}
}
}
}
}
}
}