ppo-Huggy / run_logs /timers.json
micheljperez's picture
Huggy
4658da4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.403228998184204,
"min": 1.403228998184204,
"max": 1.4289727210998535,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70002.8828125,
"min": 69019.6796875,
"max": 77445.3203125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 69.04341736694678,
"min": 69.04341736694678,
"max": 391.3828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49297.0,
"min": 48641.0,
"max": 50097.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999979.0,
"min": 49617.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999979.0,
"min": 49617.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.529850721359253,
"min": 0.09101362526416779,
"max": 2.562394857406616,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1806.3133544921875,
"min": 11.558730125427246,
"max": 1806.3133544921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9115888817804536,
"min": 1.9256142039937296,
"max": 4.080791214987032,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2792.8744615912437,
"min": 244.55300390720367,
"max": 2823.9075207710266,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9115888817804536,
"min": 1.9256142039937296,
"max": 4.080791214987032,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2792.8744615912437,
"min": 244.55300390720367,
"max": 2823.9075207710266,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017578730093858515,
"min": 0.013388852636368635,
"max": 0.020930114932222445,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05273619028157554,
"min": 0.02677770527273727,
"max": 0.059735836560139435,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0627057037419743,
"min": 0.023507859713087477,
"max": 0.0627057037419743,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1881171112259229,
"min": 0.047015719426174954,
"max": 0.1881171112259229,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.958048680683332e-06,
"min": 3.958048680683332e-06,
"max": 0.000295338826553725,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1874146042049995e-05,
"min": 1.1874146042049995e-05,
"max": 0.0008438790187069998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10131931666666667,
"min": 0.10131931666666667,
"max": 0.19844627499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30395795000000003,
"min": 0.20777125000000002,
"max": 0.581293,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.583390166666665e-05,
"min": 7.583390166666665e-05,
"max": 0.0049224691224999985,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022750170499999996,
"min": 0.00022750170499999996,
"max": 0.014066520700000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696951107",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1696953531"
},
"total": 2423.802595407,
"count": 1,
"self": 0.43636329399987517,
"children": {
"run_training.setup": {
"total": 0.04781813099998544,
"count": 1,
"self": 0.04781813099998544
},
"TrainerController.start_learning": {
"total": 2423.318413982,
"count": 1,
"self": 4.413656039996113,
"children": {
"TrainerController._reset_env": {
"total": 8.021537040000112,
"count": 1,
"self": 8.021537040000112
},
"TrainerController.advance": {
"total": 2410.784739200004,
"count": 233890,
"self": 4.560497000141822,
"children": {
"env_step": {
"total": 1846.5977701539052,
"count": 233890,
"self": 1531.5861030200733,
"children": {
"SubprocessEnvManager._take_step": {
"total": 312.20134225886727,
"count": 233890,
"self": 16.499976662820472,
"children": {
"TorchPolicy.evaluate": {
"total": 295.7013655960468,
"count": 222945,
"self": 295.7013655960468
}
}
},
"workers": {
"total": 2.8103248749646355,
"count": 233890,
"self": 0.0,
"children": {
"worker_root": {
"total": 2415.992196399164,
"count": 233890,
"is_parallel": true,
"self": 1171.2062316571828,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000846455000100832,
"count": 1,
"is_parallel": true,
"self": 0.00024190499993892445,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006045500001619075,
"count": 2,
"is_parallel": true,
"self": 0.0006045500001619075
}
}
},
"UnityEnvironment.step": {
"total": 0.02694922000000588,
"count": 1,
"is_parallel": true,
"self": 0.0002893549999498646,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023497400002270297,
"count": 1,
"is_parallel": true,
"self": 0.00023497400002270297
},
"communicator.exchange": {
"total": 0.025645232999977452,
"count": 1,
"is_parallel": true,
"self": 0.025645232999977452
},
"steps_from_proto": {
"total": 0.0007796580000558606,
"count": 1,
"is_parallel": true,
"self": 0.00023580899983244308,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005438490002234175,
"count": 2,
"is_parallel": true,
"self": 0.0005438490002234175
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1244.7859647419814,
"count": 233889,
"is_parallel": true,
"self": 40.70974441420867,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.11496812792711,
"count": 233889,
"is_parallel": true,
"self": 82.11496812792711
},
"communicator.exchange": {
"total": 1022.8206100708637,
"count": 233889,
"is_parallel": true,
"self": 1022.8206100708637
},
"steps_from_proto": {
"total": 99.14064212898188,
"count": 233889,
"is_parallel": true,
"self": 35.78881004893856,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.35183208004332,
"count": 467778,
"is_parallel": true,
"self": 63.35183208004332
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 559.6264720459569,
"count": 233890,
"self": 6.173125561966344,
"children": {
"process_trajectory": {
"total": 152.44454011298876,
"count": 233890,
"self": 151.1038633239889,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3406767889998719,
"count": 10,
"self": 1.3406767889998719
}
}
},
"_update_policy": {
"total": 401.0088063710018,
"count": 97,
"self": 339.93564413300624,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.073162237995575,
"count": 2910,
"self": 61.073162237995575
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.7599968285067e-07,
"count": 1,
"self": 9.7599968285067e-07
},
"TrainerController._save_models": {
"total": 0.09848072600016167,
"count": 1,
"self": 0.001958457000000635,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09652226900016103,
"count": 1,
"self": 0.09652226900016103
}
}
}
}
}
}
}