ppo-Huggy / run_logs /timers.json
max-fofanov's picture
Huggy
eff098d verified
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4028276205062866,
"min": 1.4028276205062866,
"max": 1.426146388053894,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69735.9609375,
"min": 68696.953125,
"max": 76566.703125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 99.068,
"min": 81.73553719008264,
"max": 396.63492063492066,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49534.0,
"min": 49236.0,
"max": 49976.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999997.0,
"min": 49989.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999997.0,
"min": 49989.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3969130516052246,
"min": 0.06284072995185852,
"max": 2.4694459438323975,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1198.45654296875,
"min": 7.855091571807861,
"max": 1464.737060546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5379013612270356,
"min": 1.857462304353714,
"max": 3.98543688480617,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1768.9506806135178,
"min": 232.18278804421425,
"max": 2315.0689674019814,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5379013612270356,
"min": 1.857462304353714,
"max": 3.98543688480617,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1768.9506806135178,
"min": 232.18278804421425,
"max": 2315.0689674019814,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015502174489342402,
"min": 0.013723477651202149,
"max": 0.02136158649227582,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04650652346802721,
"min": 0.027446955302404298,
"max": 0.05563260692870244,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.049569168976611566,
"min": 0.022972435110972984,
"max": 0.06296835113316775,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1487075069298347,
"min": 0.046046937070786956,
"max": 0.18516006159285703,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.486548837850008e-06,
"min": 3.486548837850008e-06,
"max": 0.0002953356015548,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0459646513550024e-05,
"min": 1.0459646513550024e-05,
"max": 0.0008440308186563999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116215000000002,
"min": 0.10116215000000002,
"max": 0.19844520000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034864500000001,
"min": 0.20745455000000002,
"max": 0.5813436000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.799128500000013e-05,
"min": 6.799128500000013e-05,
"max": 0.00492241548,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020397385500000039,
"min": 0.00020397385500000039,
"max": 0.014069045640000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726417792",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1726420404"
},
"total": 2612.003500327,
"count": 1,
"self": 0.4345275419996142,
"children": {
"run_training.setup": {
"total": 0.059454344000073434,
"count": 1,
"self": 0.059454344000073434
},
"TrainerController.start_learning": {
"total": 2611.509518441,
"count": 1,
"self": 4.656345241022336,
"children": {
"TrainerController._reset_env": {
"total": 2.439727566999977,
"count": 1,
"self": 2.439727566999977
},
"TrainerController.advance": {
"total": 2604.290350808978,
"count": 232573,
"self": 4.810721937107246,
"children": {
"env_step": {
"total": 2076.650089941891,
"count": 232573,
"self": 1640.5037841209396,
"children": {
"SubprocessEnvManager._take_step": {
"total": 433.08918961596,
"count": 232573,
"self": 16.326614944993707,
"children": {
"TorchPolicy.evaluate": {
"total": 416.7625746709663,
"count": 223001,
"self": 416.7625746709663
}
}
},
"workers": {
"total": 3.057116204991644,
"count": 232573,
"self": 0.0,
"children": {
"worker_root": {
"total": 2604.00725320507,
"count": 232573,
"is_parallel": true,
"self": 1275.3757122771367,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011102760000767375,
"count": 1,
"is_parallel": true,
"self": 0.00037041700011286594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007398589999638716,
"count": 2,
"is_parallel": true,
"self": 0.0007398589999638716
}
}
},
"UnityEnvironment.step": {
"total": 0.03284953900003984,
"count": 1,
"is_parallel": true,
"self": 0.0005063499999096166,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021888900005251344,
"count": 1,
"is_parallel": true,
"self": 0.00021888900005251344
},
"communicator.exchange": {
"total": 0.031228449000082037,
"count": 1,
"is_parallel": true,
"self": 0.031228449000082037
},
"steps_from_proto": {
"total": 0.0008958509999956732,
"count": 1,
"is_parallel": true,
"self": 0.00028227300003891287,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006135779999567603,
"count": 2,
"is_parallel": true,
"self": 0.0006135779999567603
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1328.6315409279332,
"count": 232572,
"is_parallel": true,
"self": 39.17254286008142,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.87266476887271,
"count": 232572,
"is_parallel": true,
"self": 87.87266476887271
},
"communicator.exchange": {
"total": 1107.759753772033,
"count": 232572,
"is_parallel": true,
"self": 1107.759753772033
},
"steps_from_proto": {
"total": 93.82657952694603,
"count": 232572,
"is_parallel": true,
"self": 35.731528897921635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.0950506290244,
"count": 465144,
"is_parallel": true,
"self": 58.0950506290244
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 522.8295389299794,
"count": 232573,
"self": 6.626725503921307,
"children": {
"process_trajectory": {
"total": 170.0985410910571,
"count": 232573,
"self": 168.841086514057,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2574545770000896,
"count": 10,
"self": 1.2574545770000896
}
}
},
"_update_policy": {
"total": 346.10427233500104,
"count": 97,
"self": 278.90165930600244,
"children": {
"TorchPPOOptimizer.update": {
"total": 67.2026130289986,
"count": 2910,
"self": 67.2026130289986
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.979999049392063e-07,
"count": 1,
"self": 9.979999049392063e-07
},
"TrainerController._save_models": {
"total": 0.12309382600005847,
"count": 1,
"self": 0.003123944999970263,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11996988100008821,
"count": 1,
"self": 0.11996988100008821
}
}
}
}
}
}
}