{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.2774068117141724, "min": 1.2774068117141724, "max": 1.4205453395843506, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 7848.3876953125, "min": 6373.7900390625, "max": 9439.21484375, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.0, "min": 0.0, "max": 0.3333333333333333, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 0.0, "min": 0.0, "max": 9.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 19.505050505050505, "min": 19.12621359223301, "max": 314.3333333333333, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5793.0, "min": 4830.0, "max": 6990.0, "count": 200 }, "Agent.Step.mean": { "value": 1199991.0, "min": 5913.0, "max": 1199991.0, "count": 200 }, "Agent.Step.sum": { "value": 1199991.0, "min": 5913.0, "max": 1199991.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.8460439443588257, "min": -0.48043814301490784, "max": 0.9943035244941711, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 244.50669860839844, "min": -93.83024597167969, "max": 266.0840759277344, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": 71.61072540283203, "min": 0.07052389532327652, "max": 77.85440063476562, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": 20695.5, "min": 1.7630974054336548, "max": 21979.3046875, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": 100.2953803449016, "min": 30.795576611631795, "max": 109.47389614482721, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": 28985.364919676562, "min": 585.1159556210041, "max": 28985.364919676562, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 0.040613243248793826, "min": 0.0, "max": 12.558006788554945, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 11.737227298901416, "min": 0.0, "max": 238.60212898254395, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": 90.2658401311975, "min": 27.71601836932333, "max": 98.526500642548, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": 26086.827797916078, "min": 526.6043490171432, "max": 26086.827797916078, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.02290214142218853, "min": 0.014762484478220964, "max": 0.03367272803249458, "count": 144 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.02290214142218853, "min": 0.014762484478220964, "max": 0.03367272803249458, "count": 144 }, "Agent.Losses.ValueLoss.mean": { "value": 2103.551829020182, "min": 209.12515830993652, "max": 2302.9478047688804, "count": 144 }, "Agent.Losses.ValueLoss.sum": { "value": 2103.551829020182, "min": 209.12515830993652, "max": 2302.9478047688804, "count": 144 }, "Agent.Policy.LearningRate.mean": { "value": 4.5849984749999955e-08, "min": 4.5849984749999955e-08, "max": 0.00029789475070175, "count": 144 }, "Agent.Policy.LearningRate.sum": { "value": 4.5849984749999955e-08, "min": 4.5849984749999955e-08, "max": 0.00029789475070175, "count": 144 }, "Agent.Policy.Epsilon.mean": { "value": 0.10001525, "min": 0.10001525, "max": 0.19929825, "count": 144 }, "Agent.Policy.Epsilon.sum": { "value": 0.10001525, "min": 0.10001525, "max": 0.19929825, "count": 144 }, "Agent.Policy.Beta.mean": { "value": 1.0760975e-05, "min": 1.0760975e-05, "max": 0.004964982675, "count": 144 }, "Agent.Policy.Beta.sum": { "value": 1.0760975e-05, "min": 1.0760975e-05, "max": 0.004964982675, "count": 144 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.019480469947059948, "min": 0.019480469947059948, "max": 0.572342399507761, "count": 144 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.019480469947059948, "min": 0.019480469947059948, "max": 0.572342399507761, "count": 144 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 1.2492165565490723, "min": 1.2492165565490723, "max": 3.3059381445248923, "count": 144 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 1.2492165565490723, "min": 1.2492165565490723, "max": 3.3059381445248923, "count": 144 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717299844", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_6_task_1_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_6_task_1_run_id_1_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717306000" }, "total": 6156.047205299999, "count": 1, "self": 1.6398964999989403, "children": { "run_training.setup": { "total": 0.05093060000000005, "count": 1, "self": 0.05093060000000005 }, "TrainerController.start_learning": { "total": 6154.3563782, "count": 1, "self": 5.418136900317222, "children": { "TrainerController._reset_env": { "total": 2.0237714, "count": 1, "self": 2.0237714 }, "TrainerController.advance": { "total": 6146.658031099682, "count": 410099, "self": 5.228230199619247, "children": { "env_step": { "total": 6141.429800900063, "count": 410099, "self": 3938.228473300334, "children": { "SubprocessEnvManager._take_step": { "total": 2199.3581026999154, "count": 410099, "self": 11.15753579954253, "children": { "TorchPolicy.evaluate": { "total": 2188.200566900373, "count": 400072, "self": 2188.200566900373 } } }, "workers": { "total": 3.8432248998136105, "count": 410099, "self": 0.0, "children": { "worker_root": { "total": 6146.764267100197, "count": 410099, "is_parallel": true, "self": 2496.0789261001496, "children": { "steps_from_proto": { "total": 0.006509899999999957, "count": 1, "is_parallel": true, "self": 0.00010509999999985808, "children": { "_process_maybe_compressed_observation": { "total": 0.006357100000000004, "count": 2, "is_parallel": true, "self": 3.329999999990285e-05, "children": { "_observation_to_np_array": { "total": 0.0063238000000001016, "count": 3, "is_parallel": true, "self": 2.6500000000151402e-05, "children": { "process_pixels": { "total": 0.00629729999999995, "count": 3, "is_parallel": true, "self": 0.00024250000000014538, "children": { "image_decompress": { "total": 0.006054799999999805, "count": 3, "is_parallel": true, "self": 0.006054799999999805 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 4.770000000009489e-05, "count": 2, "is_parallel": true, "self": 4.770000000009489e-05 } } }, "UnityEnvironment.step": { "total": 3650.6788311000473, "count": 410099, "is_parallel": true, "self": 20.28972939995265, "children": { "UnityEnvironment._generate_step_input": { "total": 20.27040840011356, "count": 410099, "is_parallel": true, "self": 20.27040840011356 }, "communicator.exchange": { "total": 3456.35562999988, "count": 410099, "is_parallel": true, "self": 3456.35562999988 }, "steps_from_proto": { "total": 153.7630633001011, "count": 410099, "is_parallel": true, "self": 30.445696599802332, "children": { "_process_maybe_compressed_observation": { "total": 110.14864739993155, "count": 820198, "is_parallel": true, "self": 8.65457649990762, "children": { "_observation_to_np_array": { "total": 101.49407090002393, "count": 1237356, "is_parallel": true, "self": 8.708754800048453, "children": { "process_pixels": { "total": 92.78531609997547, "count": 1237356, "is_parallel": true, "self": 43.93794599960066, "children": { "image_decompress": { "total": 48.84737010037482, "count": 1237356, "is_parallel": true, "self": 48.84737010037482 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 13.16871930036721, "count": 820198, "is_parallel": true, "self": 13.16871930036721 } } } } } } } } } } } } }, "trainer_threads": { "total": 3.0700000024808105e-05, "count": 1, "self": 3.0700000024808105e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 6148.932291200009, "count": 302847, "is_parallel": true, "self": 8.721021099908285, "children": { "process_trajectory": { "total": 5343.9588161001, "count": 302847, "is_parallel": true, "self": 5343.4286023001005, "children": { "RLTrainer._checkpoint": { "total": 0.5302138000001833, "count": 2, "is_parallel": true, "self": 0.5302138000001833 } } }, "_update_policy": { "total": 796.2524540000004, "count": 144, "is_parallel": true, "self": 532.9141348999988, "children": { "TorchPPOOptimizer.update": { "total": 263.3383191000017, "count": 3462, "is_parallel": true, "self": 263.3383191000017 } } } } } } } } }, "TrainerController._save_models": { "total": 0.2564081000000442, "count": 1, "self": 0.011734800000340329, "children": { "RLTrainer._checkpoint": { "total": 0.24467329999970389, "count": 1, "self": 0.24467329999970389 } } } } } } }