{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.427849531173706, "min": 1.418702483177185, "max": 1.437328577041626, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 8571.380859375, "min": 8512.21484375, "max": 8628.283203125, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.6, "min": 0.6, "max": 0.7333333333333333, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 9.0, "min": 9.0, "max": 11.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.HighestPotentialSoildFound.mean": { "value": 0.9998361984888713, "min": 0.4800114636619886, "max": 0.9998857935269674, "count": 200 }, "Agent.DroneBasedReforestation.HighestPotentialSoildFound.sum": { "value": 14.997542977333069, "min": 7.200171954929829, "max": 14.99828690290451, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 399.0, "min": 399.0, "max": 399.0, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5985.0, "min": 5985.0, "max": 5985.0, "count": 200 }, "Agent.Step.mean": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Step.sum": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.3489936292171478, "min": 0.03917349502444267, "max": 1.003085970878601, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 5.2349042892456055, "min": 0.5513672232627869, "max": 15.046289443969727, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": -0.12275466322898865, "min": -0.151315376162529, "max": 0.07217133045196533, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": -1.8413199186325073, "min": -2.269730567932129, "max": 1.0103986263275146, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": -1.24971071879069e-05, "min": -0.528077228864034, "max": 5.482236544291178e-05, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": -0.00018745660781860352, "min": -7.92115843296051, "max": 0.0008223354816436768, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 1.3173022826512655, "min": 0.0, "max": 15.465808455149332, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 19.759534239768982, "min": 0.0, "max": 231.98712682724, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": -1.1450052261352539e-05, "min": -0.4752696712811788, "max": 4.9176315466562905e-05, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": -0.00017175078392028809, "min": -7.129045069217682, "max": 0.0007376447319984436, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.025451530411373824, "min": 0.01599841533849637, "max": 0.034998478911196194, "count": 142 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.025451530411373824, "min": 0.01599841533849637, "max": 0.034998478911196194, "count": 142 }, "Agent.Losses.ValueLoss.mean": { "value": 0.0032331619198278836, "min": 0.002739706886738228, "max": 0.05614178154307107, "count": 142 }, "Agent.Losses.ValueLoss.sum": { "value": 0.0032331619198278836, "min": 0.002739706886738228, "max": 0.05614178154307107, "count": 142 }, "Agent.Policy.LearningRate.mean": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.LearningRate.sum": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.Epsilon.mean": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Epsilon.sum": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Beta.mean": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Policy.Beta.sum": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.032335208263248205, "min": 0.030415774633487064, "max": 0.5812804649273554, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.032335208263248205, "min": 0.030415774633487064, "max": 0.5812804649273554, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 2.0179600367943444, "min": 1.9452497959136963, "max": 3.2931356926759086, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 2.0179600367943444, "min": 1.9452497959136963, "max": 3.2931356926759086, "count": 142 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717648176", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_5_task_4_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_5_task_4_run_id_2_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717651676" }, "total": 3500.2174176000003, "count": 1, "self": 0.2749975000001541, "children": { "run_training.setup": { "total": 0.052103200000000016, "count": 1, "self": 0.052103200000000016 }, "TrainerController.start_learning": { "total": 3499.8903169, "count": 1, "self": 4.614986500089344, "children": { "TrainerController._reset_env": { "total": 2.1153891000000002, "count": 1, "self": 2.1153891000000002 }, "TrainerController.advance": { "total": 3492.9848644999106, "count": 400401, "self": 4.3951731999914045, "children": { "env_step": { "total": 3488.589691299919, "count": 400401, "self": 1577.1130145999903, "children": { "SubprocessEnvManager._take_step": { "total": 1908.3537281998636, "count": 400401, "self": 10.14322639996908, "children": { "TorchPolicy.evaluate": { "total": 1898.2105017998945, "count": 400401, "self": 1898.2105017998945 } } }, "workers": { "total": 3.1229485000654384, "count": 400401, "self": 0.0, "children": { "worker_root": { "total": 3491.0138146998825, "count": 400401, "is_parallel": true, "self": 2116.3947193999384, "children": { "steps_from_proto": { "total": 0.0067330000000001, "count": 1, "is_parallel": true, "self": 0.0001045000000001739, "children": { "_process_maybe_compressed_observation": { "total": 0.006583999999999923, "count": 2, "is_parallel": true, "self": 3.16000000000205e-05, "children": { "_observation_to_np_array": { "total": 0.006552399999999903, "count": 3, "is_parallel": true, "self": 3.1499999999962114e-05, "children": { "process_pixels": { "total": 0.006520899999999941, "count": 3, "is_parallel": true, "self": 0.0002321999999999047, "children": { "image_decompress": { "total": 0.006288700000000036, "count": 3, "is_parallel": true, "self": 0.006288700000000036 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 4.450000000000287e-05, "count": 2, "is_parallel": true, "self": 4.450000000000287e-05 } } }, "UnityEnvironment.step": { "total": 1374.6123622999444, "count": 400401, "is_parallel": true, "self": 18.32776709986888, "children": { "UnityEnvironment._generate_step_input": { "total": 19.05626360005065, "count": 400401, "is_parallel": true, "self": 19.05626360005065 }, "communicator.exchange": { "total": 1191.3707521000165, "count": 400401, "is_parallel": true, "self": 1191.3707521000165 }, "steps_from_proto": { "total": 145.8575795000084, "count": 400401, "is_parallel": true, "self": 28.86785080007303, "children": { "_process_maybe_compressed_observation": { "total": 104.43266129989556, "count": 800802, "is_parallel": true, "self": 8.402499999922924, "children": { "_observation_to_np_array": { "total": 96.03016129997263, "count": 1204206, "is_parallel": true, "self": 8.491150599996772, "children": { "process_pixels": { "total": 87.53901069997586, "count": 1204206, "is_parallel": true, "self": 40.95264879998032, "children": { "image_decompress": { "total": 46.586361899995545, "count": 1204206, "is_parallel": true, "self": 46.586361899995545 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 12.557067400039806, "count": 800802, "is_parallel": true, "self": 12.557067400039806 } } } } } } } } } } } } }, "trainer_threads": { "total": 2.499999982319423e-05, "count": 1, "self": 2.499999982319423e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 3496.475213099929, "count": 170699, "is_parallel": true, "self": 3.4796951998414443, "children": { "process_trajectory": { "total": 2779.1840630000847, "count": 170699, "is_parallel": true, "self": 2778.801018400084, "children": { "RLTrainer._checkpoint": { "total": 0.3830446000004031, "count": 2, "is_parallel": true, "self": 0.3830446000004031 } } }, "_update_policy": { "total": 713.811454900003, "count": 142, "is_parallel": true, "self": 478.3603295999879, "children": { "TorchPPOOptimizer.update": { "total": 235.45112530001512, "count": 3408, "is_parallel": true, "self": 235.45112530001512 } } } } } } } } }, "TrainerController._save_models": { "total": 0.1750518000003467, "count": 1, "self": 0.0074424000004000845, "children": { "RLTrainer._checkpoint": { "total": 0.16760939999994662, "count": 1, "self": 0.16760939999994662 } } } } } } }