philippds's picture
Upload 13 files
b4f5444 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.45902419090271,
"min": 1.4189385175704956,
"max": 1.4600775241851807,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 9520.1328125,
"min": 7022.2724609375,
"max": 10244.193359375,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 94.38095238095238,
"min": 0.0,
"max": 479.06666666666666,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 1982.0,
"min": 0.0,
"max": 8148.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.5714285714285714,
"min": 0.4,
"max": 0.7333333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 12.0,
"min": 6.0,
"max": 15.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 310.7142857142857,
"min": 266.25,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 6525.0,
"min": 4791.0,
"max": 7011.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199616.0,
"min": 5600.0,
"max": 1199616.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199616.0,
"min": 5600.0,
"max": 1199616.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.179624542593956,
"min": 0.029293406754732132,
"max": 1.057428240776062,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 3.4128663539886475,
"min": 0.4394010901451111,
"max": 15.861424446105957,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.82425856590271,
"min": -0.5083400011062622,
"max": 3.043698310852051,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 15.66091251373291,
"min": -7.6250996589660645,
"max": 66.96136474609375,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 14.982790558647952,
"min": -1.0659333229064942,
"max": 34.24115239522036,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 284.6730206143111,
"min": -15.988999843597412,
"max": 582.0995907187462,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.5532159134745598,
"min": 0.0,
"max": 14.208590613471138,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 10.511102356016636,
"min": 0.0,
"max": 255.75463104248047,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 13.48450844881958,
"min": -0.9593402067820231,
"max": 30.817027791458017,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 256.20566052757204,
"min": -14.390103101730347,
"max": 523.8894724547863,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.03139956008332471,
"min": 0.01544127413459743,
"max": 0.03139956008332471,
"count": 139
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.03139956008332471,
"min": 0.01544127413459743,
"max": 0.03139956008332471,
"count": 139
},
"Agent.Losses.ValueLoss.mean": {
"value": 16.85778299967448,
"min": 0.0011581518677606557,
"max": 23.230221783673322,
"count": 139
},
"Agent.Losses.ValueLoss.sum": {
"value": 16.85778299967448,
"min": 0.0011581518677606557,
"max": 23.230221783673322,
"count": 139
},
"Agent.Policy.LearningRate.mean": {
"value": 1.2998495667499884e-06,
"min": 1.2998495667499884e-06,
"max": 0.0002979000007,
"count": 139
},
"Agent.Policy.LearningRate.sum": {
"value": 1.2998495667499884e-06,
"min": 1.2998495667499884e-06,
"max": 0.0002979000007,
"count": 139
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10043324999999999,
"min": 0.10043324999999999,
"max": 0.1993,
"count": 139
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10043324999999999,
"min": 0.10043324999999999,
"max": 0.1993,
"count": 139
},
"Agent.Policy.Beta.mean": {
"value": 3.161917499999981e-05,
"min": 3.161917499999981e-05,
"max": 0.00496507,
"count": 139
},
"Agent.Policy.Beta.sum": {
"value": 3.161917499999981e-05,
"min": 3.161917499999981e-05,
"max": 0.00496507,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.01682404545135796,
"min": 0.014040677877212013,
"max": 0.5835290277997652,
"count": 139
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.01682404545135796,
"min": 0.014040677877212013,
"max": 0.5835290277997652,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 2.1926020880540213,
"min": 2.0875598937273026,
"max": 3.310828596353531,
"count": 139
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 2.1926020880540213,
"min": 2.0875598937273026,
"max": 3.310828596353531,
"count": 139
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717334406",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_7_task_2_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_7_task_2_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717337992"
},
"total": 3586.3614707,
"count": 1,
"self": 0.28622660000019096,
"children": {
"run_training.setup": {
"total": 0.0497031,
"count": 1,
"self": 0.0497031
},
"TrainerController.start_learning": {
"total": 3586.025541,
"count": 1,
"self": 4.915035899944996,
"children": {
"TrainerController._reset_env": {
"total": 2.0619252999999995,
"count": 1,
"self": 2.0619252999999995
},
"TrainerController.advance": {
"total": 3578.8856125000552,
"count": 401143,
"self": 4.76728660001163,
"children": {
"env_step": {
"total": 3574.1183259000436,
"count": 401143,
"self": 1555.4412567000554,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2015.4685812000864,
"count": 401143,
"self": 10.224518500097929,
"children": {
"TorchPolicy.evaluate": {
"total": 2005.2440626999885,
"count": 400273,
"self": 2005.2440626999885
}
}
},
"workers": {
"total": 3.2084879999017577,
"count": 401143,
"self": 0.0,
"children": {
"worker_root": {
"total": 3579.278180799884,
"count": 401143,
"is_parallel": true,
"self": 2202.2682824998215,
"children": {
"steps_from_proto": {
"total": 0.007042800000000016,
"count": 1,
"is_parallel": true,
"self": 0.00010649999999978732,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006888900000000087,
"count": 2,
"is_parallel": true,
"self": 3.500000000000725e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006853900000000079,
"count": 3,
"is_parallel": true,
"self": 3.040000000020804e-05,
"children": {
"process_pixels": {
"total": 0.006823499999999871,
"count": 3,
"is_parallel": true,
"self": 0.00024359999999989945,
"children": {
"image_decompress": {
"total": 0.006579899999999972,
"count": 3,
"is_parallel": true,
"self": 0.006579899999999972
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.740000000014177e-05,
"count": 2,
"is_parallel": true,
"self": 4.740000000014177e-05
}
}
},
"UnityEnvironment.step": {
"total": 1377.0028555000627,
"count": 401143,
"is_parallel": true,
"self": 15.780109299982087,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.21327840008589,
"count": 401143,
"is_parallel": true,
"self": 18.21327840008589
},
"communicator.exchange": {
"total": 1211.3751280000433,
"count": 401143,
"is_parallel": true,
"self": 1211.3751280000433
},
"steps_from_proto": {
"total": 131.63433979995125,
"count": 401143,
"is_parallel": true,
"self": 26.139689599939402,
"children": {
"_process_maybe_compressed_observation": {
"total": 94.5559814999875,
"count": 802286,
"is_parallel": true,
"self": 7.305775499928899,
"children": {
"_observation_to_np_array": {
"total": 87.2502060000586,
"count": 1204086,
"is_parallel": true,
"self": 7.576402400126369,
"children": {
"process_pixels": {
"total": 79.67380359993223,
"count": 1204086,
"is_parallel": true,
"self": 37.45594940006022,
"children": {
"image_decompress": {
"total": 42.21785419987201,
"count": 1204086,
"is_parallel": true,
"self": 42.21785419987201
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 10.938668700024357,
"count": 802286,
"is_parallel": true,
"self": 10.938668700024357
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.499999982319423e-05,
"count": 1,
"self": 2.499999982319423e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3582.0273328999883,
"count": 172522,
"is_parallel": true,
"self": 4.871978700006821,
"children": {
"process_trajectory": {
"total": 2826.3873785999813,
"count": 172522,
"is_parallel": true,
"self": 2825.9350023999814,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45237619999988965,
"count": 2,
"is_parallel": true,
"self": 0.45237619999988965
}
}
},
"_update_policy": {
"total": 750.7679756000002,
"count": 139,
"is_parallel": true,
"self": 500.0015625000068,
"children": {
"TorchPPOOptimizer.update": {
"total": 250.7664130999934,
"count": 3363,
"is_parallel": true,
"self": 250.7664130999934
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1629422999999406,
"count": 1,
"self": 0.011409700000058365,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15153259999988222,
"count": 1,
"self": 0.15153259999988222
}
}
}
}
}
}
}