{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.3802821636199951, "min": 1.3802800178527832, "max": 1.4226044416427612, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 8285.833984375, "min": 8273.3984375, "max": 8547.685546875, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.6, "min": 0.6, "max": 0.7333333333333333, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 9.0, "min": 9.0, "max": 11.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.HighestPointonTerrainFound.mean": { "value": 49.44323221842448, "min": 21.87359364827474, "max": 57.73535359700521, "count": 200 }, "Agent.DroneBasedReforestation.HighestPointonTerrainFound.sum": { "value": 741.6484832763672, "min": 347.5026168823242, "max": 866.0303039550781, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 399.0, "min": 399.0, "max": 399.0, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5985.0, "min": 5985.0, "max": 5985.0, "count": 200 }, "Agent.Step.mean": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Step.sum": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.45340976119041443, "min": 0.03938337415456772, "max": 0.926279604434967, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 6.801146507263184, "min": 0.5513672232627869, "max": 13.894193649291992, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": 3.3910324573516846, "min": 0.07217133045196533, "max": 8.808566093444824, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": 50.86548614501953, "min": 1.0103986263275146, "max": 132.1284942626953, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": 48.25275446573893, "min": 22.211960558096568, "max": 54.82623405456543, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": 723.791316986084, "min": 333.1794083714485, "max": 822.3935108184814, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 1.754322099685669, "min": 0.0, "max": 15.152112325032553, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 26.314831495285034, "min": 0.0, "max": 227.28168487548828, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": 43.42747243245443, "min": 19.99076159397761, "max": 49.34361089070638, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": 651.4120864868164, "min": 299.86142390966415, "max": 740.1541633605957, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.02422509165868784, "min": 0.013419038674328476, "max": 0.034023417703186475, "count": 142 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.02422509165868784, "min": 0.013419038674328476, "max": 0.034023417703186475, "count": 142 }, "Agent.Losses.ValueLoss.mean": { "value": 4.857426633437474, "min": 3.281998614470164, "max": 7.86569486061732, "count": 142 }, "Agent.Losses.ValueLoss.sum": { "value": 4.857426633437474, "min": 3.281998614470164, "max": 7.86569486061732, "count": 142 }, "Agent.Policy.LearningRate.mean": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.LearningRate.sum": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.Epsilon.mean": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Epsilon.sum": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Beta.mean": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Policy.Beta.sum": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.04594996618106961, "min": 0.04188392901172241, "max": 0.5812800365189711, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.04594996618106961, "min": 0.04188392901172241, "max": 0.5812800365189711, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 1.7005163729190826, "min": 1.6171739896138508, "max": 3.293136020501455, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 1.7005163729190826, "min": 1.6171739896138508, "max": 3.293136020501455, "count": 142 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717740057", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_8_task_5_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_8_task_5_run_id_2_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717743993" }, "total": 3935.9511347999996, "count": 1, "self": 0.28479429999970307, "children": { "run_training.setup": { "total": 0.04959659999999999, "count": 1, "self": 0.04959659999999999 }, "TrainerController.start_learning": { "total": 3935.6167439, "count": 1, "self": 6.517244800071694, "children": { "TrainerController._reset_env": { "total": 2.0246074000000003, "count": 1, "self": 2.0246074000000003 }, "TrainerController.advance": { "total": 3926.830021099928, "count": 400401, "self": 6.331649899886088, "children": { "env_step": { "total": 3920.498371200042, "count": 400401, "self": 1580.05505449998, "children": { "SubprocessEnvManager._take_step": { "total": 2336.3787044000087, "count": 400401, "self": 11.964751300151875, "children": { "TorchPolicy.evaluate": { "total": 2324.413953099857, "count": 400401, "self": 2324.413953099857 } } }, "workers": { "total": 4.064612300053128, "count": 400401, "self": 0.0, "children": { "worker_root": { "total": 3927.933748399921, "count": 400401, "is_parallel": true, "self": 2567.251229599953, "children": { "steps_from_proto": { "total": 0.0060957999999999846, "count": 1, "is_parallel": true, "self": 0.00010560000000015002, "children": { "_process_maybe_compressed_observation": { "total": 0.005943299999999985, "count": 2, "is_parallel": true, "self": 3.2200000000148776e-05, "children": { "_observation_to_np_array": { "total": 0.005911099999999836, "count": 3, "is_parallel": true, "self": 2.9699999999577287e-05, "children": { "process_pixels": { "total": 0.005881400000000259, "count": 3, "is_parallel": true, "self": 0.0002335000000004417, "children": { "image_decompress": { "total": 0.005647899999999817, "count": 3, "is_parallel": true, "self": 0.005647899999999817 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 4.689999999984984e-05, "count": 2, "is_parallel": true, "self": 4.689999999984984e-05 } } }, "UnityEnvironment.step": { "total": 1360.676422999968, "count": 400401, "is_parallel": true, "self": 19.892875999995113, "children": { "UnityEnvironment._generate_step_input": { "total": 22.03998209999896, "count": 400401, "is_parallel": true, "self": 22.03998209999896 }, "communicator.exchange": { "total": 1145.8378231999636, "count": 400401, "is_parallel": true, "self": 1145.8378231999636 }, "steps_from_proto": { "total": 172.90574170001042, "count": 400401, "is_parallel": true, "self": 34.82223710028774, "children": { "_process_maybe_compressed_observation": { "total": 123.35323009999728, "count": 800802, "is_parallel": true, "self": 9.733467300032885, "children": { "_observation_to_np_array": { "total": 113.61976279996439, "count": 1204206, "is_parallel": true, "self": 9.950532699734651, "children": { "process_pixels": { "total": 103.66923010022974, "count": 1204206, "is_parallel": true, "self": 49.127413500253326, "children": { "image_decompress": { "total": 54.541816599976414, "count": 1204206, "is_parallel": true, "self": 54.541816599976414 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 14.730274499725407, "count": 800802, "is_parallel": true, "self": 14.730274499725407 } } } } } } } } } } } } }, "trainer_threads": { "total": 3.49000001733657e-05, "count": 1, "self": 3.49000001733657e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 3931.4203604000268, "count": 185898, "is_parallel": true, "self": 5.316124400042099, "children": { "process_trajectory": { "total": 3053.861029099984, "count": 185898, "is_parallel": true, "self": 3053.345836399984, "children": { "RLTrainer._checkpoint": { "total": 0.5151927000001706, "count": 2, "is_parallel": true, "self": 0.5151927000001706 } } }, "_update_policy": { "total": 872.2432069000006, "count": 142, "is_parallel": true, "self": 586.860499999993, "children": { "TorchPPOOptimizer.update": { "total": 285.38270690000763, "count": 3408, "is_parallel": true, "self": 285.38270690000763 } } } } } } } } }, "TrainerController._save_models": { "total": 0.24483569999983956, "count": 1, "self": 0.005974599999717611, "children": { "RLTrainer._checkpoint": { "total": 0.23886110000012195, "count": 1, "self": 0.23886110000012195 } } } } } } }