ppo-Pyramids / run_logs /timers.json
pseudo2010's picture
first_push
ad1fd63
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37931501865386963,
"min": 0.37931501865386963,
"max": 1.4356695413589478,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11519.0380859375,
"min": 11488.189453125,
"max": 43552.47265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989970.0,
"min": 29903.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989970.0,
"min": 29903.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6160839796066284,
"min": -0.10576646029949188,
"max": 0.6504782438278198,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 176.20001220703125,
"min": -25.489717483520508,
"max": 188.63868713378906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0023723181802779436,
"min": -0.023880835622549057,
"max": 0.40277382731437683,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.6784830093383789,
"min": -6.2806596755981445,
"max": 95.4573974609375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06576063543547034,
"min": 0.06444122273976627,
"max": 0.07404839421177725,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9206488960965847,
"min": 0.5183387594824408,
"max": 1.0636327762222921,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01671217508070792,
"min": 0.00041937753764921185,
"max": 0.018361406066554082,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23397045112991086,
"min": 0.0037743978388429067,
"max": 0.27240155329733773,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.693768864014286e-06,
"min": 7.693768864014286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010771276409620001,
"min": 0.00010771276409620001,
"max": 0.0035079317306895,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256455714285714,
"min": 0.10256455714285714,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359038,
"min": 1.3886848,
"max": 2.6625277,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002661992585714287,
"min": 0.0002661992585714287,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037267896200000017,
"min": 0.0037267896200000017,
"max": 0.11695411894999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010105631314218044,
"min": 0.010105631314218044,
"max": 0.35911989212036133,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14147883653640747,
"min": 0.14147883653640747,
"max": 2.5138392448425293,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 302.53921568627453,
"min": 293.4757281553398,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30859.0,
"min": 16830.0,
"max": 32982.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.69635642032222,
"min": -1.0000000521540642,
"max": 1.69635642032222,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 171.3319984525442,
"min": -32.000001668930054,
"max": 171.3319984525442,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.69635642032222,
"min": -1.0000000521540642,
"max": 1.69635642032222,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 171.3319984525442,
"min": -32.000001668930054,
"max": 171.3319984525442,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03191587889159564,
"min": 0.031087937423418553,
"max": 6.286493404823191,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2235037680511596,
"min": 3.151987976489181,
"max": 106.87038788199425,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698262506",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698264892"
},
"total": 2385.5449966209994,
"count": 1,
"self": 0.5262584829993102,
"children": {
"run_training.setup": {
"total": 0.04622457899995425,
"count": 1,
"self": 0.04622457899995425
},
"TrainerController.start_learning": {
"total": 2384.972513559,
"count": 1,
"self": 1.5940617879159618,
"children": {
"TrainerController._reset_env": {
"total": 3.5643994729998667,
"count": 1,
"self": 3.5643994729998667
},
"TrainerController.advance": {
"total": 2379.731835599084,
"count": 64115,
"self": 1.594001031112839,
"children": {
"env_step": {
"total": 1735.5803722039386,
"count": 64115,
"self": 1593.434857382861,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.19756557018945,
"count": 64115,
"self": 4.9891576851237005,
"children": {
"TorchPolicy.evaluate": {
"total": 136.20840788506575,
"count": 62551,
"self": 136.20840788506575
}
}
},
"workers": {
"total": 0.9479492508880867,
"count": 64115,
"self": 0.0,
"children": {
"worker_root": {
"total": 2379.8468308858405,
"count": 64115,
"is_parallel": true,
"self": 908.7819385949042,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018776850001813727,
"count": 1,
"is_parallel": true,
"self": 0.0005979749998914485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012797100002899242,
"count": 8,
"is_parallel": true,
"self": 0.0012797100002899242
}
}
},
"UnityEnvironment.step": {
"total": 0.05231575800007704,
"count": 1,
"is_parallel": true,
"self": 0.0005716440000469447,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004881510003542644,
"count": 1,
"is_parallel": true,
"self": 0.0004881510003542644
},
"communicator.exchange": {
"total": 0.049529531999723986,
"count": 1,
"is_parallel": true,
"self": 0.049529531999723986
},
"steps_from_proto": {
"total": 0.0017264309999518446,
"count": 1,
"is_parallel": true,
"self": 0.00037407599984362605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013523550001082185,
"count": 8,
"is_parallel": true,
"self": 0.0013523550001082185
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1471.0648922909363,
"count": 64114,
"is_parallel": true,
"self": 36.24249428485746,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.42519154521915,
"count": 64114,
"is_parallel": true,
"self": 24.42519154521915
},
"communicator.exchange": {
"total": 1309.3580496230047,
"count": 64114,
"is_parallel": true,
"self": 1309.3580496230047
},
"steps_from_proto": {
"total": 101.03915683785499,
"count": 64114,
"is_parallel": true,
"self": 20.932430413078237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.10672642477675,
"count": 512912,
"is_parallel": true,
"self": 80.10672642477675
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 642.5574623640327,
"count": 64115,
"self": 2.928210118009247,
"children": {
"process_trajectory": {
"total": 124.28750345502658,
"count": 64115,
"self": 124.10867306602677,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1788303889998133,
"count": 2,
"self": 0.1788303889998133
}
}
},
"_update_policy": {
"total": 515.3417487909969,
"count": 449,
"self": 307.34143334690134,
"children": {
"TorchPPOOptimizer.update": {
"total": 208.00031544409558,
"count": 22818,
"self": 208.00031544409558
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.050999344442971e-06,
"count": 1,
"self": 1.050999344442971e-06
},
"TrainerController._save_models": {
"total": 0.0822156480007834,
"count": 1,
"self": 0.001768110000739398,
"children": {
"RLTrainer._checkpoint": {
"total": 0.080447538000044,
"count": 1,
"self": 0.080447538000044
}
}
}
}
}
}
}