poca-SoccerTwos / run_logs /timers.json
nachshonc's picture
FirstPush
633da63
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4567415714263916,
"min": 1.3126190900802612,
"max": 3.2957043647766113,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28761.90625,
"min": 20743.95703125,
"max": 132507.15625,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 52.351063829787236,
"min": 39.52,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19684.0,
"min": 7992.0,
"max": 27920.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1649.5823315696862,
"min": 1196.6379404980053,
"max": 1746.240399168466,
"count": 4969
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 310121.478335101,
"min": 2393.2758809960105,
"max": 410382.6908686255,
"count": 4969
},
"SoccerTwos.Step.mean": {
"value": 49999914.0,
"min": 9550.0,
"max": 49999914.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999914.0,
"min": 9550.0,
"max": 49999914.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.07750029116868973,
"min": -0.13002663850784302,
"max": 0.20040389895439148,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -14.57005500793457,
"min": -23.36751937866211,
"max": 31.813045501708984,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07770086079835892,
"min": -0.13141168653964996,
"max": 0.19435946643352509,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -14.607762336730957,
"min": -22.205169677734375,
"max": 31.409889221191406,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.15238297809945775,
"min": -0.7142857142857143,
"max": 0.48140241367271147,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -28.64799988269806,
"min": -67.65360009670258,
"max": 63.22719943523407,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.15238297809945775,
"min": -0.7142857142857143,
"max": 0.48140241367271147,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -28.64799988269806,
"min": -67.65360009670258,
"max": 63.22719943523407,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017055213035200724,
"min": 0.009752229471147681,
"max": 0.025547918040926258,
"count": 2423
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017055213035200724,
"min": 0.009752229471147681,
"max": 0.025547918040926258,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10096047694484393,
"min": 7.215567302409909e-06,
"max": 0.1292221556107203,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10096047694484393,
"min": 7.215567302409909e-06,
"max": 0.1292221556107203,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10134102652470271,
"min": 7.898130525063607e-06,
"max": 0.1324529307583968,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10134102652470271,
"min": 7.898130525063607e-06,
"max": 0.1324529307583968,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675329634",
"python_version": "3.8.16 | packaged by conda-forge | (default, Feb 1 2023, 16:01:55) \n[GCC 11.3.0]",
"command_line_arguments": "/opt/conda/envs/rl/bin/mlagents-learn config/poca/SoccerTwos.yaml --env ../env/SoccerTows.x86_64 --run-id nachshonc_v1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1675437122"
},
"total": 107488.28744331401,
"count": 1,
"self": 0.3362978690129239,
"children": {
"run_training.setup": {
"total": 0.01918340900000004,
"count": 1,
"self": 0.01918340900000004
},
"TrainerController.start_learning": {
"total": 107487.931962036,
"count": 1,
"self": 83.91960325575201,
"children": {
"TrainerController._reset_env": {
"total": 13.701780004989097,
"count": 250,
"self": 13.701780004989097
},
"TrainerController.advance": {
"total": 107389.73796373328,
"count": 3441322,
"self": 89.4884539435734,
"children": {
"env_step": {
"total": 80250.48119759267,
"count": 3441322,
"self": 58430.20581952722,
"children": {
"SubprocessEnvManager._take_step": {
"total": 21768.999314276625,
"count": 3441322,
"self": 515.6430356909405,
"children": {
"TorchPolicy.evaluate": {
"total": 21253.356278585685,
"count": 6282110,
"self": 21253.356278585685
}
}
},
"workers": {
"total": 51.27606378882807,
"count": 3441322,
"self": 0.0,
"children": {
"worker_root": {
"total": 107318.01145466334,
"count": 3441322,
"is_parallel": true,
"self": 58591.8244719033,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004790505000002554,
"count": 2,
"is_parallel": true,
"self": 0.002787836000010202,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002002668999992352,
"count": 8,
"is_parallel": true,
"self": 0.002002668999992352
}
}
},
"UnityEnvironment.step": {
"total": 0.07278313799999836,
"count": 1,
"is_parallel": true,
"self": 0.0006551979999969149,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004804220000025339,
"count": 1,
"is_parallel": true,
"self": 0.0004804220000025339
},
"communicator.exchange": {
"total": 0.023657260999996765,
"count": 1,
"is_parallel": true,
"self": 0.023657260999996765
},
"steps_from_proto": {
"total": 0.047990257000002146,
"count": 2,
"is_parallel": true,
"self": 0.0004740980000050854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04751615899999706,
"count": 8,
"is_parallel": true,
"self": 0.04751615899999706
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 48725.62705070806,
"count": 3441321,
"is_parallel": true,
"self": 2561.9700887540166,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1686.7395188800815,
"count": 3441321,
"is_parallel": true,
"self": 1686.7395188800815
},
"communicator.exchange": {
"total": 36563.25568634441,
"count": 3441321,
"is_parallel": true,
"self": 36563.25568634441
},
"steps_from_proto": {
"total": 7913.661756729556,
"count": 6882642,
"is_parallel": true,
"self": 1541.4449322352084,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6372.216824494348,
"count": 27530568,
"is_parallel": true,
"self": 6372.216824494348
}
}
}
}
},
"steps_from_proto": {
"total": 0.5599320519748403,
"count": 498,
"is_parallel": true,
"self": 0.11079636092500778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.44913569104983253,
"count": 1992,
"is_parallel": true,
"self": 0.44913569104983253
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 27049.768312197037,
"count": 3441322,
"self": 609.4411538380627,
"children": {
"process_trajectory": {
"total": 12460.254691461007,
"count": 3441322,
"self": 12410.249031787978,
"children": {
"RLTrainer._checkpoint": {
"total": 50.00565967302839,
"count": 100,
"self": 50.00565967302839
}
}
},
"_update_policy": {
"total": 13980.072466897966,
"count": 2423,
"self": 8165.400367447144,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5814.672099450821,
"count": 72699,
"self": 5814.672099450821
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.279894245788455e-07,
"count": 1,
"self": 8.279894245788455e-07
},
"TrainerController._save_models": {
"total": 0.572614213990164,
"count": 1,
"self": 0.04405526698974427,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5285589470004197,
"count": 1,
"self": 0.5285589470004197
}
}
}
}
}
}
}