{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.3832859992980957, "min": 1.377905249595642, "max": 1.4387047290802002, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 7614.98974609375, "min": 6836.2822265625, "max": 9952.52734375, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.956989247311828, "min": 0.42857142857142855, "max": 1.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 89.0, "min": 9.0, "max": 143.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 10.709677419354838, "min": 3.875, "max": 45.370370370370374, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 996.0, "min": 93.0, "max": 1738.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.03225806451612903, "min": 0.0, "max": 0.5, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 3.0, "min": 0.0, "max": 12.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 47.078604585380965, "min": 14.654892603556315, "max": 65.3136192668568, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 4378.31022644043, "min": 395.6821002960205, "max": 7543.583985328674, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 3.7718643879377716, "min": 0.44154480044488553, "max": 6.812180314745222, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 350.78338807821274, "min": 11.92170961201191, "max": 617.6223002076149, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 1.2195584652244404, "min": 0.18142950336138408, "max": 2.1000422419923725, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 113.41893726587296, "min": 5.442885100841522, "max": 198.9668880701065, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.12195584682687637, "min": 0.018142950534820557, "max": 0.21000422447016745, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 11.341893754899502, "min": 0.5442885160446167, "max": 19.89668869227171, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 68.13730222948136, "min": 19.516381581624348, "max": 79.35923038437252, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 6336.769107341766, "min": 468.3931579589844, "max": 10385.185774683952, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 58.096774193548384, "min": 41.53191489361702, "max": 296.42857142857144, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5403.0, "min": 4851.0, "max": 7107.0, "count": 200 }, "Agent.Step.mean": { "value": 1199953.0, "min": 5673.0, "max": 1199953.0, "count": 200 }, "Agent.Step.sum": { "value": 1199953.0, "min": 5673.0, "max": 1199953.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.24108651280403137, "min": 0.013187325559556484, "max": 1.2134068012237549, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 22.90321922302246, "min": 0.39561977982521057, "max": 40.94060516357422, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": 51.391300201416016, "min": 0.05967150628566742, "max": 56.34388732910156, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": 4882.17333984375, "min": 1.4917876720428467, "max": 7844.13818359375, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": 102.85636713693016, "min": 39.79958934223522, "max": 107.05925941467285, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": 9771.354878008366, "min": 915.3905548714101, "max": 15001.364575862885, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 0.17962274133766953, "min": 0.0, "max": 6.215758005777995, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 17.064160427078605, "min": 0.0, "max": 205.12001419067383, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": 92.57073308982348, "min": 35.819633419423, "max": 96.35333502633232, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": 8794.21964353323, "min": 823.851568646729, "max": 13501.22828745842, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.020890201403138537, "min": 0.014204279228579253, "max": 0.03359081801803162, "count": 139 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.020890201403138537, "min": 0.014204279228579253, "max": 0.03359081801803162, "count": 139 }, "Agent.Losses.ValueLoss.mean": { "value": 136.8279104232788, "min": 25.80408549308777, "max": 150.56823108814382, "count": 139 }, "Agent.Losses.ValueLoss.sum": { "value": 136.8279104232788, "min": 25.80408549308777, "max": 150.56823108814382, "count": 139 }, "Agent.Policy.LearningRate.mean": { "value": 4.4634985124999247e-07, "min": 4.4634985124999247e-07, "max": 0.00029776350074549996, "count": 139 }, "Agent.Policy.LearningRate.sum": { "value": 4.4634985124999247e-07, "min": 4.4634985124999247e-07, "max": 0.00029776350074549996, "count": 139 }, "Agent.Policy.Epsilon.mean": { "value": 0.10014875000000001, "min": 0.10014875000000001, "max": 0.1992545, "count": 139 }, "Agent.Policy.Epsilon.sum": { "value": 0.10014875000000001, "min": 0.10014875000000001, "max": 0.1992545, "count": 139 }, "Agent.Policy.Beta.mean": { "value": 1.7422624999999878e-05, "min": 1.7422624999999878e-05, "max": 0.00496279955, "count": 139 }, "Agent.Policy.Beta.sum": { "value": 1.7422624999999878e-05, "min": 1.7422624999999878e-05, "max": 0.00496279955, "count": 139 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.02983051825625201, "min": 0.02136377152055502, "max": 0.6002316027879715, "count": 139 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.02983051825625201, "min": 0.02136377152055502, "max": 0.6002316027879715, "count": 139 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 2.113467812538147, "min": 2.113467812538147, "max": 3.353024572134018, "count": 139 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 2.113467812538147, "min": 2.113467812538147, "max": 3.353024572134018, "count": 139 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717579535", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_3_task_3_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_3_task_3_run_id_2_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717584772" }, "total": 5236.342279799999, "count": 1, "self": 0.7435994999996183, "children": { "run_training.setup": { "total": 0.058709300000000075, "count": 1, "self": 0.058709300000000075 }, "TrainerController.start_learning": { "total": 5235.539970999999, "count": 1, "self": 8.381034100153556, "children": { "TrainerController._reset_env": { "total": 2.3645626, "count": 1, "self": 2.3645626 }, "TrainerController.advance": { "total": 5224.615400199847, "count": 403669, "self": 8.296690299905094, "children": { "env_step": { "total": 5216.3187098999415, "count": 403669, "self": 2517.3413550999517, "children": { "SubprocessEnvManager._take_step": { "total": 2693.4561459000506, "count": 403669, "self": 15.115519000012227, "children": { "TorchPolicy.evaluate": { "total": 2678.3406269000384, "count": 400012, "self": 2678.3406269000384 } } }, "workers": { "total": 5.5212088999393565, "count": 403669, "self": 0.0, "children": { "worker_root": { "total": 5224.7721400997925, "count": 403669, "is_parallel": true, "self": 3010.0866067999964, "children": { "steps_from_proto": { "total": 0.006928399999999835, "count": 1, "is_parallel": true, "self": 0.00015489999999984683, "children": { "_process_maybe_compressed_observation": { "total": 0.006702800000000009, "count": 2, "is_parallel": true, "self": 3.770000000002938e-05, "children": { "_observation_to_np_array": { "total": 0.006665099999999979, "count": 3, "is_parallel": true, "self": 3.389999999980908e-05, "children": { "process_pixels": { "total": 0.00663120000000017, "count": 3, "is_parallel": true, "self": 0.00029599999999985194, "children": { "image_decompress": { "total": 0.006335200000000318, "count": 3, "is_parallel": true, "self": 0.006335200000000318 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 7.069999999997911e-05, "count": 2, "is_parallel": true, "self": 7.069999999997911e-05 } } }, "UnityEnvironment.step": { "total": 2214.678604899796, "count": 403669, "is_parallel": true, "self": 26.384135099844116, "children": { "UnityEnvironment._generate_step_input": { "total": 26.45881169997105, "count": 403669, "is_parallel": true, "self": 26.45881169997105 }, "communicator.exchange": { "total": 1951.531202800009, "count": 403669, "is_parallel": true, "self": 1951.531202800009 }, "steps_from_proto": { "total": 210.30445529997178, "count": 403669, "is_parallel": true, "self": 42.30163210001666, "children": { "_process_maybe_compressed_observation": { "total": 149.75453989994236, "count": 807338, "is_parallel": true, "self": 11.712765800346347, "children": { "_observation_to_np_array": { "total": 138.041774099596, "count": 1211604, "is_parallel": true, "self": 11.243764599780192, "children": { "process_pixels": { "total": 126.79800949981582, "count": 1211604, "is_parallel": true, "self": 59.6969102997937, "children": { "image_decompress": { "total": 67.10109920002212, "count": 1211604, "is_parallel": true, "self": 67.10109920002212 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 18.24828330001275, "count": 807338, "is_parallel": true, "self": 18.24828330001275 } } } } } } } } } } } } }, "trainer_threads": { "total": 2.479999966453761e-05, "count": 1, "self": 2.479999966453761e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 5229.446374899926, "count": 254708, "is_parallel": true, "self": 10.728341100017133, "children": { "process_trajectory": { "total": 4269.139873099913, "count": 254708, "is_parallel": true, "self": 4268.623405599913, "children": { "RLTrainer._checkpoint": { "total": 0.516467499999635, "count": 2, "is_parallel": true, "self": 0.516467499999635 } } }, "_update_policy": { "total": 949.5781606999959, "count": 139, "is_parallel": true, "self": 635.0770896999879, "children": { "TorchPPOOptimizer.update": { "total": 314.501071000008, "count": 3378, "is_parallel": true, "self": 314.501071000008 } } } } } } } } }, "TrainerController._save_models": { "total": 0.17894929999965825, "count": 1, "self": 0.013293699999849196, "children": { "RLTrainer._checkpoint": { "total": 0.16565559999980906, "count": 1, "self": 0.16565559999980906 } } } } } } }