ppo-Huggy / run_logs /timers.json
EXrRor3's picture
Huggy
6e7f10b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4046621322631836,
"min": 1.4046621322631836,
"max": 1.4287943840026855,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70196.5859375,
"min": 68342.4375,
"max": 78381.75,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.54942965779468,
"min": 75.71165644171779,
"max": 374.9924812030075,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49733.0,
"min": 48935.0,
"max": 49939.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999510.0,
"min": 49982.0,
"max": 1999510.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999510.0,
"min": 49982.0,
"max": 1999510.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4553120136260986,
"min": 0.06751848012208939,
"max": 2.4952285289764404,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1291.494140625,
"min": 8.912439346313477,
"max": 1538.981201171875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.741369301035377,
"min": 1.8202778536713484,
"max": 4.024256020470669,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1967.9602523446083,
"min": 240.276676684618,
"max": 2413.202315092087,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.741369301035377,
"min": 1.8202778536713484,
"max": 4.024256020470669,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1967.9602523446083,
"min": 240.276676684618,
"max": 2413.202315092087,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0171636830578791,
"min": 0.012765157987936012,
"max": 0.020909278890273223,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051491049173637296,
"min": 0.025530315975872023,
"max": 0.055669393203182455,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05250822889308135,
"min": 0.022904966833690803,
"max": 0.06120150937802262,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15752468667924405,
"min": 0.045809933667381605,
"max": 0.18360452813406786,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.618198793966668e-06,
"min": 3.618198793966668e-06,
"max": 0.00029533042655652497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0854596381900004e-05,
"min": 1.0854596381900004e-05,
"max": 0.0008440078686640499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120603333333338,
"min": 0.10120603333333338,
"max": 0.19844347499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036181000000001,
"min": 0.20757005000000006,
"max": 0.5813359500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.018106333333338e-05,
"min": 7.018106333333338e-05,
"max": 0.004922329402499998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021054319000000016,
"min": 0.00021054319000000016,
"max": 0.014068663905,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690005761",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690008333"
},
"total": 2571.8374378179997,
"count": 1,
"self": 0.4371220279995214,
"children": {
"run_training.setup": {
"total": 0.05147145399996589,
"count": 1,
"self": 0.05147145399996589
},
"TrainerController.start_learning": {
"total": 2571.348844336,
"count": 1,
"self": 4.65227508494354,
"children": {
"TrainerController._reset_env": {
"total": 5.651858772999958,
"count": 1,
"self": 5.651858772999958
},
"TrainerController.advance": {
"total": 2560.919621293057,
"count": 232649,
"self": 4.813824574218415,
"children": {
"env_step": {
"total": 1986.8640328239485,
"count": 232649,
"self": 1677.5751170549643,
"children": {
"SubprocessEnvManager._take_step": {
"total": 306.11858398700633,
"count": 232649,
"self": 17.436255140854428,
"children": {
"TorchPolicy.evaluate": {
"total": 288.6823288461519,
"count": 222965,
"self": 288.6823288461519
}
}
},
"workers": {
"total": 3.170331781978007,
"count": 232649,
"self": 0.0,
"children": {
"worker_root": {
"total": 2563.3703484139787,
"count": 232649,
"is_parallel": true,
"self": 1195.3595283230543,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011080819999733649,
"count": 1,
"is_parallel": true,
"self": 0.0003699609999330278,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007381210000403371,
"count": 2,
"is_parallel": true,
"self": 0.0007381210000403371
}
}
},
"UnityEnvironment.step": {
"total": 0.030982210000047417,
"count": 1,
"is_parallel": true,
"self": 0.00036796500012314937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002280620000192357,
"count": 1,
"is_parallel": true,
"self": 0.0002280620000192357
},
"communicator.exchange": {
"total": 0.029658719999929417,
"count": 1,
"is_parallel": true,
"self": 0.029658719999929417
},
"steps_from_proto": {
"total": 0.0007274629999756144,
"count": 1,
"is_parallel": true,
"self": 0.0002273220000006404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000500140999974974,
"count": 2,
"is_parallel": true,
"self": 0.000500140999974974
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1368.0108200909244,
"count": 232648,
"is_parallel": true,
"self": 40.91234105894546,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 86.79152083705094,
"count": 232648,
"is_parallel": true,
"self": 86.79152083705094
},
"communicator.exchange": {
"total": 1139.0382471249595,
"count": 232648,
"is_parallel": true,
"self": 1139.0382471249595
},
"steps_from_proto": {
"total": 101.26871106996839,
"count": 232648,
"is_parallel": true,
"self": 39.39402028480288,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.87469078516551,
"count": 465296,
"is_parallel": true,
"self": 61.87469078516551
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 569.2417638948901,
"count": 232649,
"self": 7.101360429794568,
"children": {
"process_trajectory": {
"total": 148.53990333709714,
"count": 232649,
"self": 147.08147774009717,
"children": {
"RLTrainer._checkpoint": {
"total": 1.458425596999973,
"count": 10,
"self": 1.458425596999973
}
}
},
"_update_policy": {
"total": 413.60050012799843,
"count": 97,
"self": 352.65182710100123,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.9486730269972,
"count": 2910,
"self": 60.9486730269972
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.059999704011716e-06,
"count": 1,
"self": 1.059999704011716e-06
},
"TrainerController._save_models": {
"total": 0.12508812499982014,
"count": 1,
"self": 0.0019049809998250566,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12318314399999508,
"count": 1,
"self": 0.12318314399999508
}
}
}
}
}
}
}