ppo-Huggy / run_logs /timers.json
marcogfedozzi's picture
first trained Huggy model
6a6e40e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4042669534683228,
"min": 1.4042669534683228,
"max": 1.4298268556594849,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69858.0703125,
"min": 68859.7578125,
"max": 76366.4140625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 87.19753086419753,
"min": 81.90547263681592,
"max": 410.24590163934425,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49441.0,
"min": 48872.0,
"max": 50050.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999977.0,
"min": 49807.0,
"max": 1999977.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999977.0,
"min": 49807.0,
"max": 1999977.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.497117280960083,
"min": 0.09787911921739578,
"max": 2.497117280960083,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1415.865478515625,
"min": 11.84337329864502,
"max": 1441.69970703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.963196579426054,
"min": 1.80726876128311,
"max": 3.963196579426054,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2247.1324605345726,
"min": 218.6795201152563,
"max": 2268.305274605751,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.963196579426054,
"min": 1.80726876128311,
"max": 3.963196579426054,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2247.1324605345726,
"min": 218.6795201152563,
"max": 2268.305274605751,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017829436317525607,
"min": 0.013463766763096727,
"max": 0.019521910698676946,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05348830895257682,
"min": 0.028171292125868302,
"max": 0.057325057592242955,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05964023657143116,
"min": 0.0234710360566775,
"max": 0.061926840556164583,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17892070971429347,
"min": 0.046942072113355,
"max": 0.17892070971429347,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.351348882916671e-06,
"min": 3.351348882916671e-06,
"max": 0.0002953361265546249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0054046648750013e-05,
"min": 1.0054046648750013e-05,
"max": 0.0008440041186652998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111708333333336,
"min": 0.10111708333333336,
"max": 0.19844537500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30335125000000007,
"min": 0.20736780000000002,
"max": 0.5813347000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.574245833333344e-05,
"min": 6.574245833333344e-05,
"max": 0.0049224242125,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001972273750000003,
"min": 0.0001972273750000003,
"max": 0.014068601530000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703932047",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/hypothe/miniconda3/envs/hfdrl-huggy/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=../trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703935198"
},
"total": 3150.946085375006,
"count": 1,
"self": 1.1816208670061314,
"children": {
"run_training.setup": {
"total": 0.020355569999082945,
"count": 1,
"self": 0.020355569999082945
},
"TrainerController.start_learning": {
"total": 3149.744108938001,
"count": 1,
"self": 4.484166687092511,
"children": {
"TrainerController._reset_env": {
"total": 12.77082071499899,
"count": 1,
"self": 12.77082071499899
},
"TrainerController.advance": {
"total": 3131.9666172709112,
"count": 231986,
"self": 4.027760334036429,
"children": {
"env_step": {
"total": 2656.9101721523766,
"count": 231986,
"self": 1898.6415112300456,
"children": {
"SubprocessEnvManager._take_step": {
"total": 755.1842428825039,
"count": 231986,
"self": 22.514688649462187,
"children": {
"TorchPolicy.evaluate": {
"total": 732.6695542330417,
"count": 222855,
"self": 732.6695542330417
}
}
},
"workers": {
"total": 3.084418039827142,
"count": 231986,
"self": 0.0,
"children": {
"worker_root": {
"total": 3136.3761239261657,
"count": 231986,
"is_parallel": true,
"self": 1519.9350784037815,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0006213980086613446,
"count": 1,
"is_parallel": true,
"self": 0.00019088601402472705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004305119946366176,
"count": 2,
"is_parallel": true,
"self": 0.0004305119946366176
}
}
},
"UnityEnvironment.step": {
"total": 0.06067524400714319,
"count": 1,
"is_parallel": true,
"self": 0.0001972279860638082,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00029502900724764913,
"count": 1,
"is_parallel": true,
"self": 0.00029502900724764913
},
"communicator.exchange": {
"total": 0.05750926800828893,
"count": 1,
"is_parallel": true,
"self": 0.05750926800828893
},
"steps_from_proto": {
"total": 0.0026737190055428073,
"count": 1,
"is_parallel": true,
"self": 0.0003416170075070113,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002332101998035796,
"count": 2,
"is_parallel": true,
"self": 0.002332101998035796
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1616.4410455223842,
"count": 231985,
"is_parallel": true,
"self": 29.127033460594248,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 60.150693706818856,
"count": 231985,
"is_parallel": true,
"self": 60.150693706818856
},
"communicator.exchange": {
"total": 1470.73132999886,
"count": 231985,
"is_parallel": true,
"self": 1470.73132999886
},
"steps_from_proto": {
"total": 56.431988356111106,
"count": 231985,
"is_parallel": true,
"self": 16.359741454260075,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.07224690185103,
"count": 463970,
"is_parallel": true,
"self": 40.07224690185103
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 471.0286847844982,
"count": 231986,
"self": 7.1175089162861696,
"children": {
"process_trajectory": {
"total": 183.37574707728345,
"count": 231986,
"self": 180.45603832126653,
"children": {
"RLTrainer._checkpoint": {
"total": 2.9197087560169166,
"count": 10,
"self": 2.9197087560169166
}
}
},
"_update_policy": {
"total": 280.53542879092856,
"count": 97,
"self": 183.58064100179763,
"children": {
"TorchPPOOptimizer.update": {
"total": 96.95478778913093,
"count": 2910,
"self": 96.95478778913093
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.210037438198924e-07,
"count": 1,
"self": 7.210037438198924e-07
},
"TrainerController._save_models": {
"total": 0.5225035439943895,
"count": 1,
"self": 0.045967391997692175,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4765361519966973,
"count": 1,
"self": 0.4765361519966973
}
}
}
}
}
}
}