philippds's picture
Upload 12 files
5e9c45d verified
raw
history blame
27.3 kB
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.3702343702316284,
"min": 1.3702343702316284,
"max": 1.4285763502120972,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 9368.2919921875,
"min": 6823.7998046875,
"max": 9800.8203125,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.8095238095238095,
"min": 0.3333333333333333,
"max": 1.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 34.0,
"min": 7.0,
"max": 128.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 9.5,
"min": 3.7142857142857144,
"max": 44.766666666666666,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 399.0,
"min": 78.0,
"max": 1441.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.11904761904761904,
"min": 0.0,
"max": 0.47619047619047616,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 5.0,
"min": 0.0,
"max": 11.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 58.831503096080965,
"min": 12.754429340362549,
"max": 70.47900446256001,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 2470.9231300354004,
"min": 306.1063041687012,
"max": 6669.781922340393,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 3.6928408598039475,
"min": 0.505825142065684,
"max": 5.846334925956196,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 155.0993161117658,
"min": 12.139803409576416,
"max": 582.5994163304567,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 1.6115015120733351,
"min": 0.13446491956710815,
"max": 2.243967639075385,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 67.68306350708008,
"min": 3.2271580696105957,
"max": 179.2016315460205,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.1611501517749968,
"min": 0.013446491832534472,
"max": 0.22439676605992848,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 6.768306374549866,
"min": 0.32271580398082733,
"max": 17.920163184404373,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 53.584198315938316,
"min": 20.27740110669817,
"max": 83.83410439667878,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 2250.536329269409,
"min": 386.5410442352295,
"max": 9119.055777072906,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 162.0,
"min": 39.357142857142854,
"max": 324.14285714285717,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 6804.0,
"min": 4884.0,
"max": 6951.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199712.0,
"min": 5673.0,
"max": 1199712.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199712.0,
"min": 5673.0,
"max": 1199712.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.17536352574825287,
"min": 0.013186157681047916,
"max": 1.0032501220703125,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 7.014541149139404,
"min": 0.39558473229408264,
"max": 35.391231536865234,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 42.13700485229492,
"min": 0.05967150628566742,
"max": 54.45671463012695,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 1685.480224609375,
"min": 1.4917876720428467,
"max": 6933.2783203125,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 88.9996132960543,
"min": 31.145872324705124,
"max": 106.24138140407476,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 3559.984531842172,
"min": 716.3550634682178,
"max": 13591.092759132385,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.2655094655696303,
"min": 0.0,
"max": 6.215978073351311,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 10.62037862278521,
"min": 0.0,
"max": 205.12727642059326,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 80.09965533008798,
"min": 28.031288092955947,
"max": 95.61724598299374,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 3203.9862132035196,
"min": 644.7196261379868,
"max": 12231.983855217695,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.02166815393138677,
"min": 0.016005230757097404,
"max": 0.03362609566344569,
"count": 139
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.02166815393138677,
"min": 0.016005230757097404,
"max": 0.03362609566344569,
"count": 139
},
"Agent.Losses.ValueLoss.mean": {
"value": 115.71876049041748,
"min": 19.359095017115276,
"max": 149.22057723999023,
"count": 139
},
"Agent.Losses.ValueLoss.sum": {
"value": 115.71876049041748,
"min": 19.359095017115276,
"max": 149.22057723999023,
"count": 139
},
"Agent.Policy.LearningRate.mean": {
"value": 8.693497102499992e-07,
"min": 8.693497102499992e-07,
"max": 0.00029776350074549996,
"count": 139
},
"Agent.Policy.LearningRate.sum": {
"value": 8.693497102499992e-07,
"min": 8.693497102499992e-07,
"max": 0.00029776350074549996,
"count": 139
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10028975000000001,
"min": 0.10028975000000001,
"max": 0.1992545,
"count": 139
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10028975000000001,
"min": 0.10028975000000001,
"max": 0.1992545,
"count": 139
},
"Agent.Policy.Beta.mean": {
"value": 2.445852499999999e-05,
"min": 2.445852499999999e-05,
"max": 0.00496279955,
"count": 139
},
"Agent.Policy.Beta.sum": {
"value": 2.445852499999999e-05,
"min": 2.445852499999999e-05,
"max": 0.00496279955,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.019822144803280633,
"min": 0.017879613287126023,
"max": 0.6002316027879715,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.019822144803280633,
"min": 0.017879613287126023,
"max": 0.6002316027879715,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 2.2797528902689614,
"min": 2.261426647504171,
"max": 3.353024572134018,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 2.2797528902689614,
"min": 2.261426647504171,
"max": 3.353024572134018,
"count": 139
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717644296",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_5_task_3_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_5_task_3_run_id_2_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717648175"
},
"total": 3879.5636875,
"count": 1,
"self": 0.5836715000000368,
"children": {
"run_training.setup": {
"total": 0.04922060000000006,
"count": 1,
"self": 0.04922060000000006
},
"TrainerController.start_learning": {
"total": 3878.9307954,
"count": 1,
"self": 4.769126999989112,
"children": {
"TrainerController._reset_env": {
"total": 2.0138585000000004,
"count": 1,
"self": 2.0138585000000004
},
"TrainerController.advance": {
"total": 3871.983030500011,
"count": 403331,
"self": 4.73594840008991,
"children": {
"env_step": {
"total": 3867.247082099921,
"count": 403331,
"self": 1914.9331705998552,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1949.0585163001092,
"count": 403331,
"self": 10.356838600219362,
"children": {
"TorchPolicy.evaluate": {
"total": 1938.7016776998898,
"count": 400304,
"self": 1938.7016776998898
}
}
},
"workers": {
"total": 3.255395199956714,
"count": 403331,
"self": 0.0,
"children": {
"worker_root": {
"total": 3870.2295594999377,
"count": 403331,
"is_parallel": true,
"self": 2171.3851849999296,
"children": {
"steps_from_proto": {
"total": 0.006126599999999982,
"count": 1,
"is_parallel": true,
"self": 0.00010400000000010401,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.0059762999999999344,
"count": 2,
"is_parallel": true,
"self": 3.45000000001594e-05,
"children": {
"_observation_to_np_array": {
"total": 0.005941799999999775,
"count": 3,
"is_parallel": true,
"self": 2.8299999999426007e-05,
"children": {
"process_pixels": {
"total": 0.005913500000000349,
"count": 3,
"is_parallel": true,
"self": 0.0002469000000002719,
"children": {
"image_decompress": {
"total": 0.005666600000000077,
"count": 3,
"is_parallel": true,
"self": 0.005666600000000077
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.629999999994361e-05,
"count": 2,
"is_parallel": true,
"self": 4.629999999994361e-05
}
}
},
"UnityEnvironment.step": {
"total": 1698.8382479000081,
"count": 403331,
"is_parallel": true,
"self": 19.066693599921336,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.76571009997244,
"count": 403331,
"is_parallel": true,
"self": 18.76571009997244
},
"communicator.exchange": {
"total": 1518.0595141001145,
"count": 403331,
"is_parallel": true,
"self": 1518.0595141001145
},
"steps_from_proto": {
"total": 142.94633009999978,
"count": 403331,
"is_parallel": true,
"self": 28.38861349963352,
"children": {
"_process_maybe_compressed_observation": {
"total": 101.96121070031596,
"count": 806662,
"is_parallel": true,
"self": 8.329450300419992,
"children": {
"_observation_to_np_array": {
"total": 93.63176039989597,
"count": 1210584,
"is_parallel": true,
"self": 8.190591199726384,
"children": {
"process_pixels": {
"total": 85.44116920016958,
"count": 1210584,
"is_parallel": true,
"self": 40.30926290037272,
"children": {
"image_decompress": {
"total": 45.13190629979686,
"count": 1210584,
"is_parallel": true,
"self": 45.13190629979686
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 12.596505900050296,
"count": 806662,
"is_parallel": true,
"self": 12.596505900050296
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.429999995001708e-05,
"count": 1,
"self": 2.429999995001708e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3875.512636300037,
"count": 190352,
"is_parallel": true,
"self": 3.6124466000769644,
"children": {
"process_trajectory": {
"total": 3143.815014799961,
"count": 190352,
"is_parallel": true,
"self": 3143.40340489996,
"children": {
"RLTrainer._checkpoint": {
"total": 0.41160990000048514,
"count": 2,
"is_parallel": true,
"self": 0.41160990000048514
}
}
},
"_update_policy": {
"total": 728.0851748999988,
"count": 139,
"is_parallel": true,
"self": 488.25519190000125,
"children": {
"TorchPPOOptimizer.update": {
"total": 239.8299829999976,
"count": 3366,
"is_parallel": true,
"self": 239.8299829999976
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.16475510000009308,
"count": 1,
"self": 0.011624800000390678,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1531302999997024,
"count": 1,
"self": 0.1531302999997024
}
}
}
}
}
}
}