{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.3807820081710815, "min": 1.3803404569625854, "max": 1.4203031063079834, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 8280.5498046875, "min": 8274.6318359375, "max": 8543.4287109375, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.6, "min": 0.6, "max": 0.7333333333333333, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 9.0, "min": 9.0, "max": 11.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.HighestPointonTerrainFound.mean": { "value": 46.930075327555336, "min": 21.87359364827474, "max": 60.21321233113607, "count": 200 }, "Agent.DroneBasedReforestation.HighestPointonTerrainFound.sum": { "value": 703.9511299133301, "min": 393.7246856689453, "max": 903.198184967041, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 399.0, "min": 399.0, "max": 399.0, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5985.0, "min": 5985.0, "max": 5985.0, "count": 200 }, "Agent.Step.mean": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Step.sum": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.4775623083114624, "min": 0.03938337415456772, "max": 0.9533703923225403, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 7.1634345054626465, "min": 0.5513672232627869, "max": 14.300556182861328, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": 3.1230053901672363, "min": 0.07217133045196533, "max": 8.87872314453125, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": 46.8450813293457, "min": 1.0103986263275146, "max": 133.18084716796875, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": 46.4388173421224, "min": 24.650212036711828, "max": 59.341819508870444, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": 696.5822601318359, "min": 345.1029685139656, "max": 890.1272926330566, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 1.5581605116526285, "min": 0.0, "max": 15.48292179107666, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 23.37240767478943, "min": 0.0, "max": 232.2438268661499, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": 41.794939295450845, "min": 22.185190899031504, "max": 53.40762786865234, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": 626.9240894317627, "min": 310.59267258644104, "max": 801.1144180297852, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.02470815012929961, "min": 0.012490201450418681, "max": 0.035067617893218994, "count": 142 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.02470815012929961, "min": 0.012490201450418681, "max": 0.035067617893218994, "count": 142 }, "Agent.Losses.ValueLoss.mean": { "value": 5.0055422484874725, "min": 3.142949879169464, "max": 8.148600021998087, "count": 142 }, "Agent.Losses.ValueLoss.sum": { "value": 5.0055422484874725, "min": 3.142949879169464, "max": 8.148600021998087, "count": 142 }, "Agent.Policy.LearningRate.mean": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.LearningRate.sum": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.Epsilon.mean": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Epsilon.sum": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Beta.mean": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Policy.Beta.sum": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.04293661502500375, "min": 0.042009815108031034, "max": 0.5812804649273554, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.04293661502500375, "min": 0.042009815108031034, "max": 0.5812804649273554, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 1.6515407661596935, "min": 1.5960870534181595, "max": 3.2931356926759086, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 1.6515407661596935, "min": 1.5960870534181595, "max": 3.2931356926759086, "count": 142 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717289285", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_5_task_5_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_5_task_5_run_id_1_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717292781" }, "total": 3496.2361535, "count": 1, "self": 0.26444320000018706, "children": { "run_training.setup": { "total": 0.05150100000000002, "count": 1, "self": 0.05150100000000002 }, "TrainerController.start_learning": { "total": 3495.9202093, "count": 1, "self": 4.711024099986389, "children": { "TrainerController._reset_env": { "total": 3.6536857, "count": 1, "self": 3.6536857 }, "TrainerController.advance": { "total": 3487.388301300013, "count": 400401, "self": 4.290108400022746, "children": { "env_step": { "total": 3483.0981928999904, "count": 400401, "self": 1480.1963766002125, "children": { "SubprocessEnvManager._take_step": { "total": 1999.925581099766, "count": 400401, "self": 10.09024999982148, "children": { "TorchPolicy.evaluate": { "total": 1989.8353310999446, "count": 400401, "self": 1989.8353310999446 } } }, "workers": { "total": 2.9762352000119128, "count": 400401, "self": 0.0, "children": { "worker_root": { "total": 3487.512672300067, "count": 400401, "is_parallel": true, "self": 2184.921548999967, "children": { "steps_from_proto": { "total": 0.006318399999999613, "count": 1, "is_parallel": true, "self": 0.00010419999999999874, "children": { "_process_maybe_compressed_observation": { "total": 0.006168199999999846, "count": 2, "is_parallel": true, "self": 3.529999999996036e-05, "children": { "_observation_to_np_array": { "total": 0.006132899999999886, "count": 3, "is_parallel": true, "self": 2.8399999999706438e-05, "children": { "process_pixels": { "total": 0.006104500000000179, "count": 3, "is_parallel": true, "self": 0.00024159999999984194, "children": { "image_decompress": { "total": 0.005862900000000337, "count": 3, "is_parallel": true, "self": 0.005862900000000337 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 4.599999999976845e-05, "count": 2, "is_parallel": true, "self": 4.599999999976845e-05 } } }, "UnityEnvironment.step": { "total": 1302.5848049001002, "count": 400401, "is_parallel": true, "self": 16.494191499997214, "children": { "UnityEnvironment._generate_step_input": { "total": 18.62136990002272, "count": 400401, "is_parallel": true, "self": 18.62136990002272 }, "communicator.exchange": { "total": 1132.8639745999753, "count": 400401, "is_parallel": true, "self": 1132.8639745999753 }, "steps_from_proto": { "total": 134.60526890010476, "count": 400401, "is_parallel": true, "self": 26.551548999961852, "children": { "_process_maybe_compressed_observation": { "total": 96.76484550012158, "count": 800802, "is_parallel": true, "self": 7.473714899931551, "children": { "_observation_to_np_array": { "total": 89.29113060019003, "count": 1204206, "is_parallel": true, "self": 7.5502318003791515, "children": { "process_pixels": { "total": 81.74089879981088, "count": 1204206, "is_parallel": true, "self": 38.559457799759215, "children": { "image_decompress": { "total": 43.18144100005166, "count": 1204206, "is_parallel": true, "self": 43.18144100005166 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 11.288874400021335, "count": 800802, "is_parallel": true, "self": 11.288874400021335 } } } } } } } } } } } } }, "trainer_threads": { "total": 3.480000032141106e-05, "count": 1, "self": 3.480000032141106e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 3490.3282555999645, "count": 166513, "is_parallel": true, "self": 5.017665299951204, "children": { "process_trajectory": { "total": 2733.387028800014, "count": 166513, "is_parallel": true, "self": 2732.929002400014, "children": { "RLTrainer._checkpoint": { "total": 0.4580263999998806, "count": 2, "is_parallel": true, "self": 0.4580263999998806 } } }, "_update_policy": { "total": 751.9235614999991, "count": 142, "is_parallel": true, "self": 500.53509150000303, "children": { "TorchPPOOptimizer.update": { "total": 251.38846999999606, "count": 3408, "is_parallel": true, "self": 251.38846999999606 } } } } } } } } }, "TrainerController._save_models": { "total": 0.1671633999999358, "count": 1, "self": 0.006814699999722507, "children": { "RLTrainer._checkpoint": { "total": 0.16034870000021328, "count": 1, "self": 0.16034870000021328 } } } } } } }