poca-SoccerTwos / run_logs /timers.json
eddyyeo's picture
First Push
9a0ce84
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4987492561340332,
"min": 1.4687831401824951,
"max": 3.295746088027954,
"count": 1415
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30790.3046875,
"min": 18622.8828125,
"max": 108499.0546875,
"count": 1415
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 45.632075471698116,
"min": 40.98275862068966,
"max": 999.0,
"count": 1415
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19348.0,
"min": 16092.0,
"max": 26748.0,
"count": 1415
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1666.6848612477863,
"min": 1198.862184640307,
"max": 1732.9687814775837,
"count": 1408
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 353337.1905845307,
"min": 2400.0217860570974,
"max": 388621.78174823633,
"count": 1408
},
"SoccerTwos.Step.mean": {
"value": 14149970.0,
"min": 9394.0,
"max": 14149970.0,
"count": 1415
},
"SoccerTwos.Step.sum": {
"value": 14149970.0,
"min": 9394.0,
"max": 14149970.0,
"count": 1415
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.042452745139598846,
"min": -0.15603262186050415,
"max": 0.16966989636421204,
"count": 1415
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -9.042434692382812,
"min": -24.17540168762207,
"max": 25.75534439086914,
"count": 1415
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04572370648384094,
"min": -0.15828919410705566,
"max": 0.16486148536205292,
"count": 1415
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -9.73914909362793,
"min": -24.06298065185547,
"max": 25.919845581054688,
"count": 1415
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1415
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1415
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.12937652616993361,
"min": -0.5138800024986268,
"max": 0.5900315720784036,
"count": 1415
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -27.557200074195862,
"min": -74.71399998664856,
"max": 51.5206001996994,
"count": 1415
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.12937652616993361,
"min": -0.5138800024986268,
"max": 0.5900315720784036,
"count": 1415
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -27.557200074195862,
"min": -74.71399998664856,
"max": 51.5206001996994,
"count": 1415
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1415
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1415
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01879771878690614,
"min": 0.009975290037497567,
"max": 0.026518385223365235,
"count": 684
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01879771878690614,
"min": 0.009975290037497567,
"max": 0.026518385223365235,
"count": 684
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11922327304879825,
"min": 1.9979877803658987e-05,
"max": 0.1255020409822464,
"count": 684
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11922327304879825,
"min": 1.9979877803658987e-05,
"max": 0.1255020409822464,
"count": 684
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.12138473739226659,
"min": 1.988412389740309e-05,
"max": 0.1276343566675981,
"count": 684
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.12138473739226659,
"min": 1.988412389740309e-05,
"max": 0.1276343566675981,
"count": 684
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 684
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 684
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 684
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 684
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 684
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 684
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696475516",
"python_version": "3.9.18 (main, Sep 11 2023, 08:25:10) \n[Clang 14.0.6 ]",
"command_line_arguments": "/opt/homebrew/Caskroom/miniconda/base/envs/hf-rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1696520939"
},
"total": 45423.014969375,
"count": 1,
"self": 0.18460329099616501,
"children": {
"run_training.setup": {
"total": 0.01175666700000022,
"count": 1,
"self": 0.01175666700000022
},
"TrainerController.start_learning": {
"total": 45422.818609417,
"count": 1,
"self": 10.917374800090329,
"children": {
"TrainerController._reset_env": {
"total": 4.425894872005513,
"count": 71,
"self": 4.425894872005513
},
"TrainerController.advance": {
"total": 45407.38982716191,
"count": 976178,
"self": 9.960945893602911,
"children": {
"env_step": {
"total": 35121.811429669804,
"count": 976178,
"self": 33645.357237809425,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1469.4130961211345,
"count": 976178,
"self": 41.28506463991266,
"children": {
"TorchPolicy.evaluate": {
"total": 1428.1280314812218,
"count": 1779816,
"self": 1428.1280314812218
}
}
},
"workers": {
"total": 7.041095739245463,
"count": 976178,
"self": 0.0,
"children": {
"worker_root": {
"total": 45395.48765676783,
"count": 976178,
"is_parallel": true,
"self": 13065.285799928712,
"children": {
"steps_from_proto": {
"total": 0.08380895602536742,
"count": 142,
"is_parallel": true,
"self": 0.011396127006569223,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0724128290187982,
"count": 568,
"is_parallel": true,
"self": 0.0724128290187982
}
}
},
"UnityEnvironment.step": {
"total": 32330.118047883094,
"count": 976178,
"is_parallel": true,
"self": 94.98763262297507,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 508.84762762833793,
"count": 976178,
"is_parallel": true,
"self": 508.84762762833793
},
"communicator.exchange": {
"total": 30659.478355265474,
"count": 976178,
"is_parallel": true,
"self": 30659.478355265474
},
"steps_from_proto": {
"total": 1066.8044323663082,
"count": 1952356,
"is_parallel": true,
"self": 140.81307109628915,
"children": {
"_process_rank_one_or_two_observation": {
"total": 925.9913612700191,
"count": 7809424,
"is_parallel": true,
"self": 925.9913612700191
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10275.617451598508,
"count": 976178,
"self": 80.53959470139307,
"children": {
"process_trajectory": {
"total": 2434.5386137400783,
"count": 976178,
"self": 2432.297959696082,
"children": {
"RLTrainer._checkpoint": {
"total": 2.2406540439960736,
"count": 28,
"self": 2.2406540439960736
}
}
},
"_update_policy": {
"total": 7760.539243157037,
"count": 685,
"self": 1073.6516661982896,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6686.887576958748,
"count": 20547,
"self": 6686.887576958748
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.999965312890708e-07,
"count": 1,
"self": 4.999965312890708e-07
},
"TrainerController._save_models": {
"total": 0.08551208299468271,
"count": 1,
"self": 0.0014863329924992286,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08402575000218349,
"count": 1,
"self": 0.08402575000218349
}
}
}
}
}
}
}