testpyramidsrnd / run_logs /timers.json
rebolforces's picture
First Pyramids
942d8f2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.13484309613704681,
"min": 0.13255511224269867,
"max": 1.4715614318847656,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4079.8125,
"min": 3946.9609375,
"max": 44641.2890625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999934.0,
"min": 29952.0,
"max": 2999934.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999934.0,
"min": 29952.0,
"max": 2999934.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.801253080368042,
"min": -0.11023728549480438,
"max": 0.8468865752220154,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 244.38218688964844,
"min": -26.126235961914062,
"max": 261.1581726074219,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01356786210089922,
"min": -0.03086991049349308,
"max": 0.3554634749889374,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.138197898864746,
"min": -8.365745544433594,
"max": 84.24484252929688,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06996008481387524,
"min": 0.06292635622163335,
"max": 0.07445767317658566,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0494012722081285,
"min": 0.46839485366945155,
"max": 1.0970733698923139,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015125260608300441,
"min": 0.0013127453142280992,
"max": 0.017222367379526118,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22687890912450662,
"min": 0.014108472776027866,
"max": 0.25053178836242296,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5121194959933316e-06,
"min": 1.5121194959933316e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2681792439899973e-05,
"min": 2.2681792439899973e-05,
"max": 0.004027228257590632,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10050400666666666,
"min": 0.10050400666666666,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5075600999999998,
"min": 1.3962282666666668,
"max": 2.8424093666666668,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.035026599999994e-05,
"min": 6.035026599999994e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0009052539899999991,
"min": 0.0009052539899999991,
"max": 0.13425669573,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008564097806811333,
"min": 0.00841474998742342,
"max": 0.38969168066978455,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12846146523952484,
"min": 0.11780649423599243,
"max": 2.727841854095459,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 216.86805555555554,
"min": 209.08333333333334,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31229.0,
"min": 15984.0,
"max": 33039.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7822237635825897,
"min": -1.0000000521540642,
"max": 1.7881167812721572,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 254.85799819231033,
"min": -30.559001743793488,
"max": 262.56799817085266,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7822237635825897,
"min": -1.0000000521540642,
"max": 1.7881167812721572,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 254.85799819231033,
"min": -30.559001743793488,
"max": 262.56799817085266,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.019242743338998898,
"min": 0.018860102466922917,
"max": 7.597729445435107,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7517122974768427,
"min": 2.508393628100748,
"max": 121.56367112696171,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1660980358",
"python_version": "3.9.7 | packaged by conda-forge | (default, Sep 29 2021, 19:23:11) \n[GCC 9.4.0]",
"command_line_arguments": "/home/studio-lab-user/.conda/envs/d2l/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.4",
"end_time_seconds": "1660985715"
},
"total": 5357.028527222,
"count": 1,
"self": 0.3207162340004288,
"children": {
"run_training.setup": {
"total": 0.07508762400016167,
"count": 1,
"self": 0.07508762400016167
},
"TrainerController.start_learning": {
"total": 5356.6327233639995,
"count": 1,
"self": 3.5570683310452296,
"children": {
"TrainerController._reset_env": {
"total": 9.231680407000113,
"count": 1,
"self": 9.231680407000113
},
"TrainerController.advance": {
"total": 5343.752470810954,
"count": 195431,
"self": 3.5716219709247525,
"children": {
"env_step": {
"total": 3451.897419213865,
"count": 195431,
"self": 3164.6863409240223,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.05936036884873,
"count": 195431,
"self": 13.534244142049147,
"children": {
"TorchPolicy.evaluate": {
"total": 271.5251162267996,
"count": 187573,
"self": 88.17436237970378,
"children": {
"TorchPolicy.sample_actions": {
"total": 183.3507538470958,
"count": 187573,
"self": 183.3507538470958
}
}
}
}
},
"workers": {
"total": 2.1517179209938604,
"count": 195431,
"self": 0.0,
"children": {
"worker_root": {
"total": 5348.999863559679,
"count": 195431,
"is_parallel": true,
"self": 2434.9059987708597,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0035244210000655585,
"count": 1,
"is_parallel": true,
"self": 0.0019902950004961895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001534125999569369,
"count": 8,
"is_parallel": true,
"self": 0.001534125999569369
}
}
},
"UnityEnvironment.step": {
"total": 0.03743287900010728,
"count": 1,
"is_parallel": true,
"self": 0.00034319700012019894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002939920000244456,
"count": 1,
"is_parallel": true,
"self": 0.0002939920000244456
},
"communicator.exchange": {
"total": 0.03574911199984854,
"count": 1,
"is_parallel": true,
"self": 0.03574911199984854
},
"steps_from_proto": {
"total": 0.0010465780001140956,
"count": 1,
"is_parallel": true,
"self": 0.0002887999999074964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007577780002065992,
"count": 8,
"is_parallel": true,
"self": 0.0007577780002065992
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2914.0938647888197,
"count": 195430,
"is_parallel": true,
"self": 67.43264153396467,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 46.074633980849285,
"count": 195430,
"is_parallel": true,
"self": 46.074633980849285
},
"communicator.exchange": {
"total": 2596.2725378499445,
"count": 195430,
"is_parallel": true,
"self": 2596.2725378499445
},
"steps_from_proto": {
"total": 204.31405142406106,
"count": 195430,
"is_parallel": true,
"self": 49.94330426554234,
"children": {
"_process_rank_one_or_two_observation": {
"total": 154.37074715851872,
"count": 1563440,
"is_parallel": true,
"self": 154.37074715851872
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1888.2834296261642,
"count": 195431,
"self": 7.22656501701681,
"children": {
"process_trajectory": {
"total": 418.3046924581438,
"count": 195431,
"self": 417.7316452461439,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5730472119998922,
"count": 6,
"self": 0.5730472119998922
}
}
},
"_update_policy": {
"total": 1462.7521721510036,
"count": 1397,
"self": 585.1648097449454,
"children": {
"TorchPPOOptimizer.update": {
"total": 877.5873624060582,
"count": 68418,
"self": 877.5873624060582
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0390003808424808e-06,
"count": 1,
"self": 1.0390003808424808e-06
},
"TrainerController._save_models": {
"total": 0.09150277600019763,
"count": 1,
"self": 0.0008970120006779325,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0906057639995197,
"count": 1,
"self": 0.0906057639995197
}
}
}
}
}
}
}