{ "name": "root", "gauges": { "Agent.Policy.Entropy.mean": { "value": 1.3782780170440674, "min": 1.3782151937484741, "max": 1.4210225343704224, "count": 200 }, "Agent.Policy.Entropy.sum": { "value": 8269.66796875, "min": 8261.1328125, "max": 8543.4287109375, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.TreeDropCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.SaveLocationCount.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.mean": { "value": 0.6, "min": 0.6, "max": 0.7333333333333333, "count": 200 }, "Agent.DroneBasedReforestation.OutofEnergyCount.sum": { "value": 9.0, "min": 9.0, "max": 11.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.DroneBasedReforestation.HighestPointonTerrainFound.mean": { "value": 49.8122802734375, "min": 21.87359364827474, "max": 57.18936614990234, "count": 200 }, "Agent.DroneBasedReforestation.HighestPointonTerrainFound.sum": { "value": 747.1842041015625, "min": 351.02964878082275, "max": 857.8404922485352, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.difficulty.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.LessonNumber.task.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 200 }, "Agent.Environment.EpisodeLength.mean": { "value": 399.0, "min": 399.0, "max": 399.0, "count": 200 }, "Agent.Environment.EpisodeLength.sum": { "value": 5985.0, "min": 5985.0, "max": 5985.0, "count": 200 }, "Agent.Step.mean": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Step.sum": { "value": 1199600.0, "min": 5600.0, "max": 1199600.0, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.mean": { "value": 0.4593314230442047, "min": 0.03938337415456772, "max": 0.9567351937294006, "count": 200 }, "Agent.Policy.CuriosityValueEstimate.sum": { "value": 6.8899712562561035, "min": 0.5513672232627869, "max": 14.351027488708496, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.mean": { "value": 3.419581651687622, "min": 0.07217133045196533, "max": 7.836782932281494, "count": 200 }, "Agent.Policy.ExtrinsicValueEstimate.sum": { "value": 51.293724060058594, "min": 1.0103986263275146, "max": 117.55174255371094, "count": 200 }, "Agent.Environment.CumulativeReward.mean": { "value": 49.30374501546224, "min": 24.650212036711828, "max": 55.93592198689779, "count": 200 }, "Agent.Environment.CumulativeReward.sum": { "value": 739.5561752319336, "min": 345.1029685139656, "max": 839.0388298034668, "count": 200 }, "Agent.Policy.CuriosityReward.mean": { "value": 1.5181209087371825, "min": 0.0, "max": 15.111220169067384, "count": 200 }, "Agent.Policy.CuriosityReward.sum": { "value": 22.77181363105774, "min": 0.0, "max": 226.66830253601074, "count": 200 }, "Agent.Policy.ExtrinsicReward.mean": { "value": 44.373364766438804, "min": 22.185190899031504, "max": 50.34231847127278, "count": 200 }, "Agent.Policy.ExtrinsicReward.sum": { "value": 665.600471496582, "min": 310.59267258644104, "max": 755.1347770690918, "count": 200 }, "Agent.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 200 }, "Agent.Losses.PolicyLoss.mean": { "value": 0.021738319968183834, "min": 0.015113459376152605, "max": 0.03495504959331205, "count": 142 }, "Agent.Losses.PolicyLoss.sum": { "value": 0.021738319968183834, "min": 0.015113459376152605, "max": 0.03495504959331205, "count": 142 }, "Agent.Losses.ValueLoss.mean": { "value": 3.596959392229716, "min": 3.4740884204705558, "max": 8.149625758330027, "count": 142 }, "Agent.Losses.ValueLoss.sum": { "value": 3.596959392229716, "min": 3.4740884204705558, "max": 8.149625758330027, "count": 142 }, "Agent.Policy.LearningRate.mean": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.LearningRate.sum": { "value": 1.8000994000000013e-06, "min": 1.8000994000000013e-06, "max": 0.0002979000007, "count": 142 }, "Agent.Policy.Epsilon.mean": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Epsilon.sum": { "value": 0.10060000000000001, "min": 0.10060000000000001, "max": 0.1993, "count": 142 }, "Agent.Policy.Beta.mean": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Policy.Beta.sum": { "value": 3.994000000000003e-05, "min": 3.994000000000003e-05, "max": 0.00496507, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.mean": { "value": 0.04315997458373507, "min": 0.039169896859675646, "max": 0.5812800365189711, "count": 142 }, "Agent.Losses.CuriosityForwardLoss.sum": { "value": 0.04315997458373507, "min": 0.039169896859675646, "max": 0.5812800365189711, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.mean": { "value": 1.648421362042427, "min": 1.5828117430210114, "max": 3.293136020501455, "count": 142 }, "Agent.Losses.CuriosityInverseLoss.sum": { "value": 1.648421362042427, "min": 1.5828117430210114, "max": 3.293136020501455, "count": 142 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1717557388", "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_2_task_5_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_2_task_5_run_id_2_train --base-port 5007", "mlagents_version": "0.30.0", "mlagents_envs_version": "0.30.0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.7.1+cu110", "numpy_version": "1.21.0", "end_time_seconds": "1717560993" }, "total": 3605.1997278999997, "count": 1, "self": 0.28142909999996846, "children": { "run_training.setup": { "total": 0.06273320000000004, "count": 1, "self": 0.06273320000000004 }, "TrainerController.start_learning": { "total": 3604.8555656, "count": 1, "self": 6.687209000112944, "children": { "TrainerController._reset_env": { "total": 2.068492, "count": 1, "self": 2.068492 }, "TrainerController.advance": { "total": 3595.931360699887, "count": 400401, "self": 5.8664365998843095, "children": { "env_step": { "total": 3590.064924100003, "count": 400401, "self": 1705.568448300295, "children": { "SubprocessEnvManager._take_step": { "total": 1880.304659199843, "count": 400401, "self": 11.513266299743009, "children": { "TorchPolicy.evaluate": { "total": 1868.7913929001, "count": 400401, "self": 1868.7913929001 } } }, "workers": { "total": 4.191816599865088, "count": 400401, "self": 0.0, "children": { "worker_root": { "total": 3596.6483337999402, "count": 400401, "is_parallel": true, "self": 2108.4824045999376, "children": { "steps_from_proto": { "total": 0.006435600000000097, "count": 1, "is_parallel": true, "self": 0.00010099999999990672, "children": { "_process_maybe_compressed_observation": { "total": 0.006288499999999919, "count": 2, "is_parallel": true, "self": 3.1799999999693185e-05, "children": { "_observation_to_np_array": { "total": 0.006256700000000226, "count": 3, "is_parallel": true, "self": 3.1900000000417705e-05, "children": { "process_pixels": { "total": 0.006224799999999808, "count": 3, "is_parallel": true, "self": 0.00022210000000000285, "children": { "image_decompress": { "total": 0.006002699999999805, "count": 3, "is_parallel": true, "self": 0.006002699999999805 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 4.6100000000270924e-05, "count": 2, "is_parallel": true, "self": 4.6100000000270924e-05 } } }, "UnityEnvironment.step": { "total": 1488.1594936000026, "count": 400401, "is_parallel": true, "self": 17.05473420004978, "children": { "UnityEnvironment._generate_step_input": { "total": 17.385588199960036, "count": 400401, "is_parallel": true, "self": 17.385588199960036 }, "communicator.exchange": { "total": 1303.2457644000388, "count": 400401, "is_parallel": true, "self": 1303.2457644000388 }, "steps_from_proto": { "total": 150.47340679995398, "count": 400401, "is_parallel": true, "self": 29.94422219995191, "children": { "_process_maybe_compressed_observation": { "total": 108.47355359993594, "count": 800802, "is_parallel": true, "self": 8.360507799785523, "children": { "_observation_to_np_array": { "total": 100.11304580015042, "count": 1204206, "is_parallel": true, "self": 7.945408700214713, "children": { "process_pixels": { "total": 92.1676370999357, "count": 1204206, "is_parallel": true, "self": 42.254305699932765, "children": { "image_decompress": { "total": 49.91333140000294, "count": 1204206, "is_parallel": true, "self": 49.91333140000294 } } } } } } }, "_process_rank_one_or_two_observation": { "total": 12.055631000066128, "count": 800802, "is_parallel": true, "self": 12.055631000066128 } } } } } } } } } } } } }, "trainer_threads": { "total": 2.6300000172341242e-05, "count": 1, "self": 2.6300000172341242e-05, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 3601.030426499896, "count": 177926, "is_parallel": true, "self": 5.270895099936297, "children": { "process_trajectory": { "total": 2894.371525299961, "count": 177926, "is_parallel": true, "self": 2893.954692599961, "children": { "RLTrainer._checkpoint": { "total": 0.4168326999997589, "count": 2, "is_parallel": true, "self": 0.4168326999997589 } } }, "_update_policy": { "total": 701.3880060999986, "count": 142, "is_parallel": true, "self": 473.74644459999143, "children": { "TorchPPOOptimizer.update": { "total": 227.64156150000719, "count": 3408, "is_parallel": true, "self": 227.64156150000719 } } } } } } } } }, "TrainerController._save_models": { "total": 0.16847759999973277, "count": 1, "self": 0.007169799999701354, "children": { "RLTrainer._checkpoint": { "total": 0.16130780000003142, "count": 1, "self": 0.16130780000003142 } } } } } } }