testpushblock / run_logs /timers.json
rebolforces's picture
First PushBlock
96d7c6f
raw
history blame
18 kB
{
"name": "root",
"gauges": {
"PushBlock.Policy.Entropy.mean": {
"value": 0.36918264627456665,
"min": 0.36918264627456665,
"max": 1.84763765335083,
"count": 33
},
"PushBlock.Policy.Entropy.sum": {
"value": 22221.841796875,
"min": 22221.841796875,
"max": 112099.875,
"count": 33
},
"PushBlock.Step.mean": {
"value": 1979989.0,
"min": 59968.0,
"max": 1979989.0,
"count": 33
},
"PushBlock.Step.sum": {
"value": 1979989.0,
"min": 59968.0,
"max": 1979989.0,
"count": 33
},
"PushBlock.Policy.ExtrinsicValueEstimate.mean": {
"value": 3.524479866027832,
"min": -0.09835633635520935,
"max": 3.524479866027832,
"count": 33
},
"PushBlock.Policy.ExtrinsicValueEstimate.sum": {
"value": 6065.6298828125,
"min": -94.52043914794922,
"max": 6065.6298828125,
"count": 33
},
"PushBlock.Losses.PolicyLoss.mean": {
"value": 0.06531524254004246,
"min": 0.06531524254004246,
"max": 0.07230422536205226,
"count": 33
},
"PushBlock.Losses.PolicyLoss.sum": {
"value": 1.8941420336612316,
"min": 0.9877540243655976,
"max": 2.0708480319105242,
"count": 33
},
"PushBlock.Losses.ValueLoss.mean": {
"value": 0.3559052664226147,
"min": 1.0239713772145584e-05,
"max": 0.41948080000778043,
"count": 33
},
"PushBlock.Losses.ValueLoss.sum": {
"value": 10.321252726255826,
"min": 0.00015359570658218376,
"max": 12.164943200225633,
"count": 33
},
"PushBlock.Policy.LearningRate.mean": {
"value": 7.590966435227584e-06,
"min": 7.590966435227584e-06,
"max": 0.00029544960151679995,
"count": 33
},
"PushBlock.Policy.LearningRate.sum": {
"value": 0.00022013802662159996,
"min": 0.00022013802662159996,
"max": 0.0070045391651538,
"count": 33
},
"PushBlock.Policy.Epsilon.mean": {
"value": 0.10253028965517243,
"min": 0.10253028965517243,
"max": 0.1984832,
"count": 33
},
"PushBlock.Policy.Epsilon.sum": {
"value": 2.9733784000000005,
"min": 2.7787648,
"max": 5.2348462,
"count": 33
},
"PushBlock.Policy.Beta.mean": {
"value": 0.0002627759365517241,
"min": 0.0002627759365517241,
"max": 0.00984847168,
"count": 33
},
"PushBlock.Policy.Beta.sum": {
"value": 0.00762050216,
"min": 0.00762050216,
"max": 0.23354113538,
"count": 33
},
"PushBlock.Environment.EpisodeLength.mean": {
"value": 45.92818110850898,
"min": 45.92818110850898,
"max": 999.0,
"count": 33
},
"PushBlock.Environment.EpisodeLength.sum": {
"value": 58834.0,
"min": 31968.0,
"max": 64324.0,
"count": 33
},
"PushBlock.Environment.CumulativeReward.mean": {
"value": 4.891086941858094,
"min": -1.0000000447034836,
"max": 4.902474769966761,
"count": 33
},
"PushBlock.Environment.CumulativeReward.sum": {
"value": 6270.373459462076,
"min": -64.00000286102295,
"max": 6270.373459462076,
"count": 33
},
"PushBlock.Policy.ExtrinsicReward.mean": {
"value": 4.891086941858094,
"min": -1.0000000447034836,
"max": 4.902474769966761,
"count": 33
},
"PushBlock.Policy.ExtrinsicReward.sum": {
"value": 6270.373459462076,
"min": -64.00000286102295,
"max": 6270.373459462076,
"count": 33
},
"PushBlock.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"PushBlock.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1661028747",
"python_version": "3.9.7 | packaged by conda-forge | (default, Sep 29 2021, 19:23:11) \n[GCC 9.4.0]",
"command_line_arguments": "/home/studio-lab-user/.conda/envs/d2l/bin/mlagents-learn ./config/ppo/PushBlock.yaml --env=./trained-envs-executables/linux/PushBlock/PushBlock --run-id=PushBlock Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.4",
"end_time_seconds": "1661030447"
},
"total": 1700.625083998,
"count": 1,
"self": 0.27019369400022697,
"children": {
"run_training.setup": {
"total": 0.07572018399991975,
"count": 1,
"self": 0.07572018399991975
},
"TrainerController.start_learning": {
"total": 1700.27917012,
"count": 1,
"self": 1.593551067011731,
"children": {
"TrainerController._reset_env": {
"total": 12.003173340999865,
"count": 1,
"self": 12.003173340999865
},
"TrainerController.advance": {
"total": 1686.6228198569884,
"count": 75568,
"self": 1.4462087121214608,
"children": {
"env_step": {
"total": 971.9844109099299,
"count": 75568,
"self": 877.8616919780079,
"children": {
"SubprocessEnvManager._take_step": {
"total": 93.2743697950034,
"count": 75568,
"self": 4.636906597020243,
"children": {
"TorchPolicy.evaluate": {
"total": 88.63746319798315,
"count": 62528,
"self": 28.58412011988935,
"children": {
"TorchPolicy.sample_actions": {
"total": 60.0533430780938,
"count": 62528,
"self": 60.0533430780938
}
}
}
}
},
"workers": {
"total": 0.848349136918614,
"count": 75568,
"self": 0.0,
"children": {
"worker_root": {
"total": 1697.377955602092,
"count": 75568,
"is_parallel": true,
"self": 939.1471152440615,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003576241000018854,
"count": 1,
"is_parallel": true,
"self": 0.0016262459998870327,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019499950001318211,
"count": 4,
"is_parallel": true,
"self": 0.0019499950001318211
}
}
},
"UnityEnvironment.step": {
"total": 0.02470750600014071,
"count": 1,
"is_parallel": true,
"self": 0.0008270579999134497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043646000017361075,
"count": 1,
"is_parallel": true,
"self": 0.00043646000017361075
},
"communicator.exchange": {
"total": 0.020933761000151208,
"count": 1,
"is_parallel": true,
"self": 0.020933761000151208
},
"steps_from_proto": {
"total": 0.002510226999902443,
"count": 1,
"is_parallel": true,
"self": 0.0004533799999535404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020568469999489025,
"count": 4,
"is_parallel": true,
"self": 0.0020568469999489025
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 758.2308403580305,
"count": 75567,
"is_parallel": true,
"self": 33.301698001971545,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.422092879958655,
"count": 75567,
"is_parallel": true,
"self": 26.422092879958655
},
"communicator.exchange": {
"total": 604.322116844068,
"count": 75567,
"is_parallel": true,
"self": 604.322116844068
},
"steps_from_proto": {
"total": 94.18493263203231,
"count": 75567,
"is_parallel": true,
"self": 18.92606160921855,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.25887102281376,
"count": 302268,
"is_parallel": true,
"self": 75.25887102281376
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 713.192200234937,
"count": 75568,
"self": 3.11769389285314,
"children": {
"process_trajectory": {
"total": 179.35090118809308,
"count": 75568,
"self": 179.05766282609375,
"children": {
"RLTrainer._checkpoint": {
"total": 0.29323836199932884,
"count": 4,
"self": 0.29323836199932884
}
}
},
"_update_policy": {
"total": 530.7236051539908,
"count": 888,
"self": 281.29957376098946,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.42403139300131,
"count": 46191,
"self": 249.42403139300131
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1100000847363845e-06,
"count": 1,
"self": 1.1100000847363845e-06
},
"TrainerController._save_models": {
"total": 0.059624744999837276,
"count": 1,
"self": 0.00044027100011589937,
"children": {
"RLTrainer._checkpoint": {
"total": 0.059184473999721376,
"count": 1,
"self": 0.059184473999721376
}
}
}
}
}
}
}