ppo-Pyramids / run_logs /timers.json
charmquark's picture
First Push
df543d1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.15313640236854553,
"min": 0.1491367369890213,
"max": 1.461522102355957,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4608.79296875,
"min": 4481.2607421875,
"max": 44336.734375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999939.0,
"min": 29952.0,
"max": 2999939.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999939.0,
"min": 29952.0,
"max": 2999939.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8002849221229553,
"min": -0.1223563402891159,
"max": 0.8612830638885498,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 243.28662109375,
"min": -28.99845314025879,
"max": 258.3849182128906,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.001654613995924592,
"min": -0.00840162206441164,
"max": 0.3478734791278839,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.5030026435852051,
"min": -2.15921688079834,
"max": 82.44601440429688,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06586157622846908,
"min": 0.06444821091506456,
"max": 0.07432933354458258,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9220620671985671,
"min": 0.4809304167537939,
"max": 1.0895733896856352,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017249026318452144,
"min": 0.0002988890158269202,
"max": 0.01863462961946858,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24148636845833,
"min": 0.0032877791740961223,
"max": 0.26088481467256014,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.45688522869048e-06,
"min": 1.45688522869048e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.039639320166672e-05,
"min": 2.039639320166672e-05,
"max": 0.0038008565330478667,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10048559523809522,
"min": 0.10048559523809522,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4067983333333332,
"min": 1.3962282666666668,
"max": 2.737539033333334,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.8510964285714416e-05,
"min": 5.8510964285714416e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008191535000000018,
"min": 0.0008191535000000018,
"max": 0.12670851812,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00719241751357913,
"min": 0.0069189821369946,
"max": 0.2772340476512909,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10069384425878525,
"min": 0.09686575084924698,
"max": 1.9406384229660034,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 219.84172661870502,
"min": 206.19424460431654,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30558.0,
"min": 15984.0,
"max": 33941.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7793550641424414,
"min": -1.0000000521540642,
"max": 1.793805743507344,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 245.5509988516569,
"min": -31.998001664876938,
"max": 254.04199777543545,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7793550641424414,
"min": -1.0000000521540642,
"max": 1.793805743507344,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 245.5509988516569,
"min": -31.998001664876938,
"max": 254.04199777543545,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.016406612893278827,
"min": 0.01543570772984246,
"max": 5.6335464948788285,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.264112579272478,
"min": 2.0673963893787004,
"max": 90.13674391806126,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679375216",
"python_version": "3.9.9 | packaged by conda-forge | (main, Dec 20 2021, 02:40:17) \n[GCC 9.4.0]",
"command_line_arguments": "/home/qiang/opt/miniconda3/envs/x-hf-deeprl/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.19.5",
"end_time_seconds": "1679380503"
},
"total": 5286.875102930935,
"count": 1,
"self": 0.3200054179178551,
"children": {
"run_training.setup": {
"total": 0.016610361053608358,
"count": 1,
"self": 0.016610361053608358
},
"TrainerController.start_learning": {
"total": 5286.538487151964,
"count": 1,
"self": 4.187149831326678,
"children": {
"TrainerController._reset_env": {
"total": 3.228897077962756,
"count": 1,
"self": 3.228897077962756
},
"TrainerController.advance": {
"total": 5279.0278340847,
"count": 195082,
"self": 3.8233351692324504,
"children": {
"env_step": {
"total": 3081.3609271334717,
"count": 195082,
"self": 2766.362139328383,
"children": {
"SubprocessEnvManager._take_step": {
"total": 312.45665560045745,
"count": 195082,
"self": 12.229900089208968,
"children": {
"TorchPolicy.evaluate": {
"total": 300.2267555112485,
"count": 187556,
"self": 107.18996791588143,
"children": {
"TorchPolicy.sample_actions": {
"total": 193.03678759536706,
"count": 187556,
"self": 193.03678759536706
}
}
}
}
},
"workers": {
"total": 2.5421322046313435,
"count": 195082,
"self": 0.0,
"children": {
"worker_root": {
"total": 5279.747725866968,
"count": 195082,
"is_parallel": true,
"self": 2781.507334805443,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016106260009109974,
"count": 1,
"is_parallel": true,
"self": 0.0004166037542745471,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011940222466364503,
"count": 8,
"is_parallel": true,
"self": 0.0011940222466364503
}
}
},
"UnityEnvironment.step": {
"total": 0.03265785798430443,
"count": 1,
"is_parallel": true,
"self": 0.0002808289136737585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006118100136518478,
"count": 1,
"is_parallel": true,
"self": 0.0006118100136518478
},
"communicator.exchange": {
"total": 0.03059137798845768,
"count": 1,
"is_parallel": true,
"self": 0.03059137798845768
},
"steps_from_proto": {
"total": 0.001173841068521142,
"count": 1,
"is_parallel": true,
"self": 0.00022726005408912897,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000946581014432013,
"count": 8,
"is_parallel": true,
"self": 0.000946581014432013
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2498.240391061525,
"count": 195081,
"is_parallel": true,
"self": 57.95700426143594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 46.52879376744386,
"count": 195081,
"is_parallel": true,
"self": 46.52879376744386
},
"communicator.exchange": {
"total": 2118.264028996695,
"count": 195081,
"is_parallel": true,
"self": 2118.264028996695
},
"steps_from_proto": {
"total": 275.49056403595023,
"count": 195081,
"is_parallel": true,
"self": 48.38291724619921,
"children": {
"_process_rank_one_or_two_observation": {
"total": 227.10764678975102,
"count": 1560648,
"is_parallel": true,
"self": 227.10764678975102
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2193.8435717819957,
"count": 195082,
"self": 7.84908934996929,
"children": {
"process_trajectory": {
"total": 433.3110535793239,
"count": 195082,
"self": 432.74776552128606,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5632880580378696,
"count": 6,
"self": 0.5632880580378696
}
}
},
"_update_policy": {
"total": 1752.6834288527025,
"count": 1393,
"self": 652.2041771702934,
"children": {
"TorchPPOOptimizer.update": {
"total": 1100.479251682409,
"count": 68358,
"self": 1100.479251682409
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.050119504332542e-07,
"count": 1,
"self": 8.050119504332542e-07
},
"TrainerController._save_models": {
"total": 0.09460535296238959,
"count": 1,
"self": 0.001206306042149663,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09339904692023993,
"count": 1,
"self": 0.09339904692023993
}
}
}
}
}
}
}