|
{
|
|
"name": "root",
|
|
"gauges": {
|
|
"Agent.Policy.Entropy.mean": {
|
|
"value": 1.461394190788269,
|
|
"min": 1.4189385175704956,
|
|
"max": 1.4627084732055664,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.Entropy.sum": {
|
|
"value": 8768.365234375,
|
|
"min": 7108.82421875,
|
|
"max": 10005.6728515625,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
|
|
"value": 85.73333333333333,
|
|
"min": 0.0,
|
|
"max": 522.4,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
|
|
"value": 1286.0,
|
|
"min": 0.0,
|
|
"max": 7836.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
|
|
"value": 0.6666666666666666,
|
|
"min": 0.375,
|
|
"max": 0.7333333333333333,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
|
|
"value": 10.0,
|
|
"min": 7.0,
|
|
"max": 14.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.FurthestDistanceExplored.mean": {
|
|
"value": 146.21323369344074,
|
|
"min": 92.7341601451238,
|
|
"max": 176.34202880859374,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.FurthestDistanceExplored.sum": {
|
|
"value": 2193.1985054016113,
|
|
"min": 1417.522566318512,
|
|
"max": 2846.2834649086,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.task.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.task.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.EpisodeLength.mean": {
|
|
"value": 399.0,
|
|
"min": 273.375,
|
|
"max": 399.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.EpisodeLength.sum": {
|
|
"value": 5985.0,
|
|
"min": 4833.0,
|
|
"max": 6855.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Step.mean": {
|
|
"value": 1199889.0,
|
|
"min": 5600.0,
|
|
"max": 1199889.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Step.sum": {
|
|
"value": 1199889.0,
|
|
"min": 5600.0,
|
|
"max": 1199889.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.mean": {
|
|
"value": 0.1302863210439682,
|
|
"min": 0.031902581453323364,
|
|
"max": 1.057988166809082,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.sum": {
|
|
"value": 1.9542948007583618,
|
|
"min": 0.4466361403465271,
|
|
"max": 18.928512573242188,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.mean": {
|
|
"value": 0.41766637563705444,
|
|
"min": -0.24882428348064423,
|
|
"max": 1.1357226371765137,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.sum": {
|
|
"value": 6.264995574951172,
|
|
"min": -4.230012893676758,
|
|
"max": 20.25455665588379,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.CumulativeReward.mean": {
|
|
"value": 8.835175029436748,
|
|
"min": -1.1326333165168763,
|
|
"max": 15.139918671920896,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.CumulativeReward.sum": {
|
|
"value": 132.5276254415512,
|
|
"min": -16.989499747753143,
|
|
"max": 277.330514757894,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityReward.mean": {
|
|
"value": 0.493207577864329,
|
|
"min": 0.0,
|
|
"max": 18.167254384358724,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityReward.sum": {
|
|
"value": 7.398113667964935,
|
|
"min": 0.0,
|
|
"max": 272.50881576538086,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.mean": {
|
|
"value": 7.95165540377299,
|
|
"min": -1.0193702220916747,
|
|
"max": 13.625924177467823,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.sum": {
|
|
"value": 119.27483105659485,
|
|
"min": -15.290553331375122,
|
|
"max": 249.59742173738778,
|
|
"count": 200
|
|
},
|
|
"Agent.IsTraining.mean": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 200
|
|
},
|
|
"Agent.IsTraining.sum": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Losses.PolicyLoss.mean": {
|
|
"value": 0.028155309342158336,
|
|
"min": 0.013494075605800996,
|
|
"max": 0.034375812800135463,
|
|
"count": 140
|
|
},
|
|
"Agent.Losses.PolicyLoss.sum": {
|
|
"value": 0.028155309342158336,
|
|
"min": 0.013494075605800996,
|
|
"max": 0.034375812800135463,
|
|
"count": 140
|
|
},
|
|
"Agent.Losses.ValueLoss.mean": {
|
|
"value": 8.478612542152405,
|
|
"min": 0.0005081328054075129,
|
|
"max": 8.902225154417533,
|
|
"count": 140
|
|
},
|
|
"Agent.Losses.ValueLoss.sum": {
|
|
"value": 8.478612542152405,
|
|
"min": 0.0005081328054075129,
|
|
"max": 8.902225154417533,
|
|
"count": 140
|
|
},
|
|
"Agent.Policy.LearningRate.mean": {
|
|
"value": 2.7849990750015264e-08,
|
|
"min": 2.7849990750015264e-08,
|
|
"max": 0.0002979000007,
|
|
"count": 140
|
|
},
|
|
"Agent.Policy.LearningRate.sum": {
|
|
"value": 2.7849990750015264e-08,
|
|
"min": 2.7849990750015264e-08,
|
|
"max": 0.0002979000007,
|
|
"count": 140
|
|
},
|
|
"Agent.Policy.Epsilon.mean": {
|
|
"value": 0.10000925000000001,
|
|
"min": 0.10000925000000001,
|
|
"max": 0.1993,
|
|
"count": 140
|
|
},
|
|
"Agent.Policy.Epsilon.sum": {
|
|
"value": 0.10000925000000001,
|
|
"min": 0.10000925000000001,
|
|
"max": 0.1993,
|
|
"count": 140
|
|
},
|
|
"Agent.Policy.Beta.mean": {
|
|
"value": 1.0461575000000252e-05,
|
|
"min": 1.0461575000000252e-05,
|
|
"max": 0.00496507,
|
|
"count": 140
|
|
},
|
|
"Agent.Policy.Beta.sum": {
|
|
"value": 1.0461575000000252e-05,
|
|
"min": 1.0461575000000252e-05,
|
|
"max": 0.00496507,
|
|
"count": 140
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.mean": {
|
|
"value": 0.011916863693234822,
|
|
"min": 0.010664537665434182,
|
|
"max": 0.5835290277997652,
|
|
"count": 140
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.sum": {
|
|
"value": 0.011916863693234822,
|
|
"min": 0.010664537665434182,
|
|
"max": 0.5835290277997652,
|
|
"count": 140
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.mean": {
|
|
"value": 2.4481616616249084,
|
|
"min": 2.3409460683663688,
|
|
"max": 3.310828596353531,
|
|
"count": 140
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.sum": {
|
|
"value": 2.4481616616249084,
|
|
"min": 2.3409460683663688,
|
|
"max": 3.310828596353531,
|
|
"count": 140
|
|
}
|
|
},
|
|
"metadata": {
|
|
"timer_format_version": "0.1.0",
|
|
"start_time_seconds": "1717292782",
|
|
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
|
|
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_5_task_6_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_5_task_6_run_id_1_train --base-port 5007",
|
|
"mlagents_version": "0.30.0",
|
|
"mlagents_envs_version": "0.30.0",
|
|
"communication_protocol_version": "1.5.0",
|
|
"pytorch_version": "1.7.1+cu110",
|
|
"numpy_version": "1.21.0",
|
|
"end_time_seconds": "1717296310"
|
|
},
|
|
"total": 3527.6842628,
|
|
"count": 1,
|
|
"self": 0.271194800000103,
|
|
"children": {
|
|
"run_training.setup": {
|
|
"total": 0.0522089,
|
|
"count": 1,
|
|
"self": 0.0522089
|
|
},
|
|
"TrainerController.start_learning": {
|
|
"total": 3527.3608590999997,
|
|
"count": 1,
|
|
"self": 4.724687999997059,
|
|
"children": {
|
|
"TrainerController._reset_env": {
|
|
"total": 2.0738813,
|
|
"count": 1,
|
|
"self": 2.0738813
|
|
},
|
|
"TrainerController.advance": {
|
|
"total": 3520.388816000003,
|
|
"count": 401225,
|
|
"self": 4.241707900048368,
|
|
"children": {
|
|
"env_step": {
|
|
"total": 3516.1471080999545,
|
|
"count": 401225,
|
|
"self": 1496.8510475001858,
|
|
"children": {
|
|
"SubprocessEnvManager._take_step": {
|
|
"total": 2016.2752550000253,
|
|
"count": 401225,
|
|
"self": 10.122549799896206,
|
|
"children": {
|
|
"TorchPolicy.evaluate": {
|
|
"total": 2006.152705200129,
|
|
"count": 400363,
|
|
"self": 2006.152705200129
|
|
}
|
|
}
|
|
},
|
|
"workers": {
|
|
"total": 3.0208055997434866,
|
|
"count": 401225,
|
|
"self": 0.0,
|
|
"children": {
|
|
"worker_root": {
|
|
"total": 3520.4959868000665,
|
|
"count": 401225,
|
|
"is_parallel": true,
|
|
"self": 2203.313819800009,
|
|
"children": {
|
|
"steps_from_proto": {
|
|
"total": 0.006579399999999902,
|
|
"count": 1,
|
|
"is_parallel": true,
|
|
"self": 0.00010719999999975194,
|
|
"children": {
|
|
"_process_maybe_compressed_observation": {
|
|
"total": 0.006426100000000101,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 3.2999999999949736e-05,
|
|
"children": {
|
|
"_observation_to_np_array": {
|
|
"total": 0.0063931000000001514,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 3.219999999992673e-05,
|
|
"children": {
|
|
"process_pixels": {
|
|
"total": 0.006360900000000225,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 0.000252800000000164,
|
|
"children": {
|
|
"image_decompress": {
|
|
"total": 0.006108100000000061,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 0.006108100000000061
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 4.610000000004888e-05,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 4.610000000004888e-05
|
|
}
|
|
}
|
|
},
|
|
"UnityEnvironment.step": {
|
|
"total": 1317.175587600058,
|
|
"count": 401225,
|
|
"is_parallel": true,
|
|
"self": 16.29968470018207,
|
|
"children": {
|
|
"UnityEnvironment._generate_step_input": {
|
|
"total": 19.04841950003623,
|
|
"count": 401225,
|
|
"is_parallel": true,
|
|
"self": 19.04841950003623
|
|
},
|
|
"communicator.exchange": {
|
|
"total": 1147.2209986000285,
|
|
"count": 401225,
|
|
"is_parallel": true,
|
|
"self": 1147.2209986000285
|
|
},
|
|
"steps_from_proto": {
|
|
"total": 134.6064847998111,
|
|
"count": 401225,
|
|
"is_parallel": true,
|
|
"self": 26.638156099796177,
|
|
"children": {
|
|
"_process_maybe_compressed_observation": {
|
|
"total": 96.88266109995595,
|
|
"count": 802450,
|
|
"is_parallel": true,
|
|
"self": 7.688365399733755,
|
|
"children": {
|
|
"_observation_to_np_array": {
|
|
"total": 89.1942957002222,
|
|
"count": 1204305,
|
|
"is_parallel": true,
|
|
"self": 7.565408300169878,
|
|
"children": {
|
|
"process_pixels": {
|
|
"total": 81.62888740005232,
|
|
"count": 1204305,
|
|
"is_parallel": true,
|
|
"self": 38.62657160014098,
|
|
"children": {
|
|
"image_decompress": {
|
|
"total": 43.002315799911344,
|
|
"count": 1204305,
|
|
"is_parallel": true,
|
|
"self": 43.002315799911344
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 11.085667600058988,
|
|
"count": 802450,
|
|
"is_parallel": true,
|
|
"self": 11.085667600058988
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"trainer_threads": {
|
|
"total": 3.879999985656468e-05,
|
|
"count": 1,
|
|
"self": 3.879999985656468e-05,
|
|
"children": {
|
|
"thread_root": {
|
|
"total": 0.0,
|
|
"count": 0,
|
|
"is_parallel": true,
|
|
"self": 0.0,
|
|
"children": {
|
|
"trainer_advance": {
|
|
"total": 3523.306334700067,
|
|
"count": 168218,
|
|
"is_parallel": true,
|
|
"self": 5.020779300031336,
|
|
"children": {
|
|
"process_trajectory": {
|
|
"total": 2763.2327570000357,
|
|
"count": 168218,
|
|
"is_parallel": true,
|
|
"self": 2762.7916066000353,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.44115040000042427,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 0.44115040000042427
|
|
}
|
|
}
|
|
},
|
|
"_update_policy": {
|
|
"total": 755.0527984000001,
|
|
"count": 140,
|
|
"is_parallel": true,
|
|
"self": 502.3291928999985,
|
|
"children": {
|
|
"TorchPPOOptimizer.update": {
|
|
"total": 252.72360550000167,
|
|
"count": 3372,
|
|
"is_parallel": true,
|
|
"self": 252.72360550000167
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"TrainerController._save_models": {
|
|
"total": 0.1734350000001541,
|
|
"count": 1,
|
|
"self": 0.012856700000156707,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.1605782999999974,
|
|
"count": 1,
|
|
"self": 0.1605782999999974
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} |