elemosynov's picture
First Push
0c2382b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6582197546958923,
"min": 0.6582197546958923,
"max": 1.4857103824615479,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19778.1875,
"min": 19778.1875,
"max": 45070.51171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989968.0,
"min": 29952.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989968.0,
"min": 29952.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.34232568740844727,
"min": -0.21555005013942719,
"max": 0.4189187288284302,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 89.6893310546875,
"min": -51.08536148071289,
"max": 112.27021789550781,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00566603010520339,
"min": -0.002149995882064104,
"max": 0.2953774631023407,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.4844999313354492,
"min": -0.5697489380836487,
"max": 70.00445556640625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06853258555998938,
"min": 0.06531036150582398,
"max": 0.07275970583852011,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9594561978398513,
"min": 0.49417190839858977,
"max": 1.0340315212048226,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011694101730556997,
"min": 0.0001747867651937809,
"max": 0.013626769085882895,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16371742422779795,
"min": 0.0022722279475191516,
"max": 0.19077476720236053,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.350590406978569e-06,
"min": 7.350590406978569e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010290826569769996,
"min": 0.00010290826569769996,
"max": 0.0035073536308821993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024501642857143,
"min": 0.1024501642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343023000000001,
"min": 1.3886848,
"max": 2.5691178000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002547714121428571,
"min": 0.0002547714121428571,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035667997699999994,
"min": 0.0035667997699999994,
"max": 0.11693486821999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009588450193405151,
"min": 0.009588450193405151,
"max": 0.3562386631965637,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13423830270767212,
"min": 0.13423830270767212,
"max": 2.493670701980591,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 482.42622950819674,
"min": 454.85074626865674,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29428.0,
"min": 15984.0,
"max": 32992.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2551835816903192,
"min": -1.0000000521540642,
"max": 1.4555581845899126,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 76.56619848310947,
"min": -31.99680168926716,
"max": 97.52239836752415,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2551835816903192,
"min": -1.0000000521540642,
"max": 1.4555581845899126,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 76.56619848310947,
"min": -31.99680168926716,
"max": 97.52239836752415,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04768418025303647,
"min": 0.0456898459311533,
"max": 7.947462385054678,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.908734995435225,
"min": 2.908734995435225,
"max": 127.15939816087484,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699466360",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699468533"
},
"total": 2173.6519661160005,
"count": 1,
"self": 0.5765998490001039,
"children": {
"run_training.setup": {
"total": 0.040772619000108534,
"count": 1,
"self": 0.040772619000108534
},
"TrainerController.start_learning": {
"total": 2173.0345936480003,
"count": 1,
"self": 1.3792977400594282,
"children": {
"TrainerController._reset_env": {
"total": 3.4425388829999974,
"count": 1,
"self": 3.4425388829999974
},
"TrainerController.advance": {
"total": 2168.1375249099415,
"count": 63383,
"self": 1.4181992019557583,
"children": {
"env_step": {
"total": 1535.2488306299983,
"count": 63383,
"self": 1400.759572539032,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.6288033199903,
"count": 63383,
"self": 4.651064258031511,
"children": {
"TorchPolicy.evaluate": {
"total": 128.9777390619588,
"count": 62554,
"self": 128.9777390619588
}
}
},
"workers": {
"total": 0.8604547709760482,
"count": 63383,
"self": 0.0,
"children": {
"worker_root": {
"total": 2168.2852865001164,
"count": 63383,
"is_parallel": true,
"self": 888.7236655020965,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023036030002003827,
"count": 1,
"is_parallel": true,
"self": 0.0010890640014622477,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001214538998738135,
"count": 8,
"is_parallel": true,
"self": 0.001214538998738135
}
}
},
"UnityEnvironment.step": {
"total": 0.05155775299999732,
"count": 1,
"is_parallel": true,
"self": 0.0007745489997432742,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005119410002407676,
"count": 1,
"is_parallel": true,
"self": 0.0005119410002407676
},
"communicator.exchange": {
"total": 0.04836888199997702,
"count": 1,
"is_parallel": true,
"self": 0.04836888199997702
},
"steps_from_proto": {
"total": 0.0019023810000362573,
"count": 1,
"is_parallel": true,
"self": 0.0003906789993379789,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015117020006982784,
"count": 8,
"is_parallel": true,
"self": 0.0015117020006982784
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1279.5616209980199,
"count": 63382,
"is_parallel": true,
"self": 35.48726316592138,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.061340590052623,
"count": 63382,
"is_parallel": true,
"self": 26.061340590052623
},
"communicator.exchange": {
"total": 1112.8768973930382,
"count": 63382,
"is_parallel": true,
"self": 1112.8768973930382
},
"steps_from_proto": {
"total": 105.13611984900763,
"count": 63382,
"is_parallel": true,
"self": 21.255029476914387,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.88109037209324,
"count": 507056,
"is_parallel": true,
"self": 83.88109037209324
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 631.4704950779874,
"count": 63383,
"self": 2.5765435710713973,
"children": {
"process_trajectory": {
"total": 125.80892255992376,
"count": 63383,
"self": 125.58306548792325,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2258570720005082,
"count": 2,
"self": 0.2258570720005082
}
}
},
"_update_policy": {
"total": 503.0850289469922,
"count": 445,
"self": 297.5838996999855,
"children": {
"TorchPPOOptimizer.update": {
"total": 205.5011292470067,
"count": 22857,
"self": 205.5011292470067
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.065999640559312e-06,
"count": 1,
"self": 1.065999640559312e-06
},
"TrainerController._save_models": {
"total": 0.07523104899973987,
"count": 1,
"self": 0.0014802519999648212,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07375079699977505,
"count": 1,
"self": 0.07375079699977505
}
}
}
}
}
}
}