BASE / run_logs /timers.json
Agog's picture
Push
5576976 unverified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.624455451965332,
"min": 1.3943887948989868,
"max": 1.6854439973831177,
"count": 96
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 3317320.0,
"min": 2016890.0,
"max": 3430053.75,
"count": 96
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.7043078209954,
"min": 62.01512287334594,
"max": 72.57880794701987,
"count": 96
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 1971264.0,
"min": 1280580.0,
"max": 1972816.0,
"count": 96
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1924.6456481120542,
"min": 1716.2503646149212,
"max": 1932.1841669790583,
"count": 96
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 27610966.46781553,
"min": 16789401.77766643,
"max": 30483801.029434506,
"count": 96
},
"SoccerTwos.Step.mean": {
"value": 999999936.0,
"min": 904999968.0,
"max": 999999936.0,
"count": 96
},
"SoccerTwos.Step.sum": {
"value": 999999936.0,
"min": 904999968.0,
"max": 999999936.0,
"count": 96
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03068486414849758,
"min": -0.04107572138309479,
"max": -0.006001170724630356,
"count": 96
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -440.2357482910156,
"min": -612.1077880859375,
"max": -85.8707504272461,
"count": 96
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.030335357412695885,
"min": -0.040899623185396194,
"max": -0.0058718216605484486,
"count": 96
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -435.22137451171875,
"min": -611.8626708984375,
"max": -84.0198974609375,
"count": 96
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 96
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 96
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.03908942628506709,
"min": -0.061987708790219365,
"max": -0.008603256710487572,
"count": 96
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -560.8159989118576,
"min": -934.0307960510254,
"max": -123.10400027036667,
"count": 96
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.03908942628506709,
"min": -0.061987708790219365,
"max": -0.008603256710487572,
"count": 96
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -560.8159989118576,
"min": -934.0307960510254,
"max": -123.10400027036667,
"count": 96
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016996252771980146,
"min": 0.01612159730351697,
"max": 0.018172603595048204,
"count": 96
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.8328163858270272,
"min": 0.5260980272011694,
"max": 0.8744922907883543,
"count": 96
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09311231680873297,
"min": 0.08947957741717498,
"max": 0.1026641885439555,
"count": 96
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 4.562503523627916,
"min": 2.9565853697558246,
"max": 5.030545238653819,
"count": 96
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09347450240534179,
"min": 0.0897525948834502,
"max": 0.10301987835440507,
"count": 96
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 4.580250617861748,
"min": 2.9664761359492933,
"max": 5.047974039365848,
"count": 96
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 96
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.014699999999999998,
"min": 0.0093,
"max": 0.014699999999999998,
"count": 96
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.2000000000000001,
"min": 0.2000000000000001,
"max": 0.2000000000000001,
"count": 96
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 9.800000000000004,
"min": 6.200000000000003,
"max": 9.800000000000004,
"count": 96
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000002,
"count": 96
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.24500000000000005,
"min": 0.15500000000000005,
"max": 0.24500000000000005,
"count": 96
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 96
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 96
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680881236",
"python_version": "3.9.16 (main, Dec 7 2022, 01:12:08) \n[GCC 11.3.0]",
"command_line_arguments": "/home/jonatan/PycharmProjects/HuggingFaceDLUnit7/venv3.9/bin/mlagents-learn ./config/poca/0048.yaml --env=./ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=0048 --num-envs=4 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.0+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1680968284"
},
"total": 87048.175701533,
"count": 1,
"self": 0.3316875769960461,
"children": {
"run_training.setup": {
"total": 0.024446632000035606,
"count": 1,
"self": 0.024446632000035606
},
"TrainerController.start_learning": {
"total": 87047.819567324,
"count": 1,
"self": 125.16966521918948,
"children": {
"TrainerController._reset_env": {
"total": 32.70872525499726,
"count": 479,
"self": 32.70872525499726
},
"TrainerController.advance": {
"total": 86889.77726001281,
"count": 4948111,
"self": 112.03211297637608,
"children": {
"env_step": {
"total": 45975.9774582799,
"count": 4948111,
"self": 18752.706505601025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 27144.921234357702,
"count": 6650861,
"self": 838.6496852510754,
"children": {
"TorchPolicy.evaluate": {
"total": 26306.271549106626,
"count": 12184978,
"self": 26306.271549106626
}
}
},
"workers": {
"total": 78.3497183211739,
"count": 4948111,
"self": 0.0,
"children": {
"worker_root": {
"total": 347810.94125152973,
"count": 6649761,
"is_parallel": true,
"self": 236190.8112412791,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004971511998519418,
"count": 4,
"is_parallel": true,
"self": 0.0010361049990024185,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003935406999516999,
"count": 16,
"is_parallel": true,
"self": 0.003935406999516999
}
}
},
"UnityEnvironment.step": {
"total": 0.06909569400067994,
"count": 2,
"is_parallel": true,
"self": 0.0016857320015333244,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0011996329994872212,
"count": 2,
"is_parallel": true,
"self": 0.0011996329994872212
},
"communicator.exchange": {
"total": 0.06198637399938889,
"count": 2,
"is_parallel": true,
"self": 0.06198637399938889
},
"steps_from_proto": {
"total": 0.004223955000270507,
"count": 4,
"is_parallel": true,
"self": 0.0007649990011486807,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0034589559991218266,
"count": 16,
"is_parallel": true,
"self": 0.0034589559991218266
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 3.794406687566152,
"count": 3824,
"is_parallel": true,
"self": 0.622983008130177,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.171423679435975,
"count": 15296,
"is_parallel": true,
"self": 3.171423679435975
}
}
},
"UnityEnvironment.step": {
"total": 111616.33560356307,
"count": 6649759,
"is_parallel": true,
"self": 5289.976000233466,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3149.0230169496426,
"count": 6649759,
"is_parallel": true,
"self": 3149.0230169496426
},
"communicator.exchange": {
"total": 90208.68516113196,
"count": 6649759,
"is_parallel": true,
"self": 90208.68516113196
},
"steps_from_proto": {
"total": 12968.651425248001,
"count": 13299518,
"is_parallel": true,
"self": 2270.3721931114305,
"children": {
"_process_rank_one_or_two_observation": {
"total": 10698.27923213657,
"count": 53198072,
"is_parallel": true,
"self": 10698.27923213657
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 40801.76768875653,
"count": 4948111,
"self": 1148.0550729062816,
"children": {
"process_trajectory": {
"total": 16460.543154391984,
"count": 4948111,
"self": 16429.780812657078,
"children": {
"RLTrainer._checkpoint": {
"total": 30.76234173490593,
"count": 192,
"self": 30.76234173490593
}
}
},
"_update_policy": {
"total": 23193.169461458263,
"count": 4641,
"self": 17611.234308369956,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5581.935153088307,
"count": 139230,
"self": 5581.935153088307
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5429977793246508e-06,
"count": 1,
"self": 1.5429977793246508e-06
},
"TrainerController._save_models": {
"total": 0.16391529400425497,
"count": 1,
"self": 0.0028431520040612668,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1610721420001937,
"count": 1,
"self": 0.1610721420001937
}
}
}
}
}
}
}