philippds's picture
Upload 9 files
97af4e5 verified
raw
history blame
19.6 kB
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 3.97270131111145,
"min": 3.8580334186553955,
"max": 4.385058879852295,
"count": 100
},
"Agent.Policy.Entropy.sum": {
"value": 19772.134765625,
"min": 4324.59814453125,
"max": 30275.548828125,
"count": 100
},
"Agent.WildfireResourceManagement.IndividualResourceCount.mean": {
"value": 0.8925924670917017,
"min": 0.5944444899343782,
"max": 0.8999998337692685,
"count": 100
},
"Agent.WildfireResourceManagement.IndividualResourceCount.sum": {
"value": 24.099996611475945,
"min": 7.299999460577965,
"max": 32.099995002150536,
"count": 100
},
"Agent.WildfireResourceManagement.RewardforMovingResourcestoNeighbours.mean": {
"value": 1132.227039054588,
"min": 112.27245076497395,
"max": 3760.4518534342446,
"count": 100
},
"Agent.WildfireResourceManagement.RewardforMovingResourcestoNeighbours.sum": {
"value": 30570.130054473877,
"min": 1010.4520568847656,
"max": 46698.55090332031,
"count": 100
},
"Agent.WildfireResourceManagement.RewardforMovingResourcestoSelf.mean": {
"value": 0.4156373774174682,
"min": 0.12152427987328135,
"max": 4.414028333293067,
"count": 100
},
"Agent.WildfireResourceManagement.RewardforMovingResourcestoSelf.sum": {
"value": 11.222209190271641,
"min": 3.078512976934718,
"max": 150.33448672667146,
"count": 100
},
"Agent.WildfireResourceManagement.CollectivePerformance.mean": {
"value": 56.56567022535536,
"min": 18.60294959280226,
"max": 203.521976047092,
"count": 100
},
"Agent.WildfireResourceManagement.CollectivePerformance.sum": {
"value": 1527.2730960845947,
"min": 187.9657154083252,
"max": 2957.372100830078,
"count": 100
},
"Agent.WildfireResourceManagement.IndividualPerformance.mean": {
"value": 37.497118130878164,
"min": 9.254810196382028,
"max": 102.94365169604619,
"count": 100
},
"Agent.WildfireResourceManagement.IndividualPerformance.sum": {
"value": 1012.4221895337105,
"min": 95.39323675632477,
"max": 1618.2430572509766,
"count": 100
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"Agent.Environment.EpisodeLength.mean": {
"value": 184.0,
"min": 87.0,
"max": 447.0,
"count": 100
},
"Agent.Environment.EpisodeLength.sum": {
"value": 4968.0,
"min": 1026.0,
"max": 7254.0,
"count": 100
},
"Agent.Step.mean": {
"value": 449820.0,
"min": 4428.0,
"max": 449820.0,
"count": 100
},
"Agent.Step.sum": {
"value": 449820.0,
"min": 4428.0,
"max": 449820.0,
"count": 100
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": -0.00548894889652729,
"min": -0.03430468589067459,
"max": 0.14062848687171936,
"count": 100
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": -0.12075687944889069,
"min": -0.9807142615318298,
"max": 2.8125698566436768,
"count": 100
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 270.135009765625,
"min": 6.275667190551758,
"max": 340.4035339355469,
"count": 100
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 5942.97021484375,
"min": 163.16734313964844,
"max": 9848.359375,
"count": 100
},
"Agent.Environment.CumulativeReward.mean": {
"value": 1034.0226169932973,
"min": 250.50194498697917,
"max": 2925.5960973103843,
"count": 100
},
"Agent.Environment.CumulativeReward.sum": {
"value": 22748.49757385254,
"min": 5705.194595336914,
"max": 39389.590560913086,
"count": 100
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.011027288665487007,
"min": 0.008789539543123772,
"max": 0.2349855530521144,
"count": 100
},
"Agent.Policy.CuriosityReward.sum": {
"value": 0.24260035064071417,
"min": 0.1741041336208582,
"max": 5.404667720198631,
"count": 100
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 1034.0226169932973,
"min": 250.50194498697917,
"max": 2925.5960973103843,
"count": 100
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 22748.49757385254,
"min": 5705.194595336914,
"max": 39389.590560913086,
"count": 100
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.06593079745640486,
"min": 0.05476249791940467,
"max": 0.07900114618241787,
"count": 99
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.13186159491280972,
"min": 0.05476249791940467,
"max": 0.15389319697316464,
"count": 99
},
"Agent.Losses.ValueLoss.mean": {
"value": 8362.757220247608,
"min": 301.55181486710256,
"max": 21009.97041015625,
"count": 99
},
"Agent.Losses.ValueLoss.sum": {
"value": 16725.514440495215,
"min": 301.55181486710256,
"max": 30410.252495659723,
"count": 99
},
"Agent.Policy.LearningRate.mean": {
"value": 1.0980996339999984e-06,
"min": 1.0980996339999984e-06,
"max": 0.00029796000068000006,
"count": 99
},
"Agent.Policy.LearningRate.sum": {
"value": 2.1961992679999967e-06,
"min": 2.1961992679999967e-06,
"max": 0.0005911860029379999,
"count": 99
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10036600000000001,
"min": 0.10036600000000001,
"max": 0.19932,
"count": 99
},
"Agent.Policy.Epsilon.sum": {
"value": 0.20073200000000002,
"min": 0.10149800000000003,
"max": 0.397062,
"count": 99
},
"Agent.Policy.Beta.mean": {
"value": 4.656339999999997e-05,
"min": 4.656339999999997e-05,
"max": 0.009932068000000002,
"count": 99
},
"Agent.Policy.Beta.sum": {
"value": 9.312679999999994e-05,
"min": 9.312679999999994e-05,
"max": 0.019706493800000002,
"count": 99
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.0022638690914241577,
"min": 0.0015879822761120125,
"max": 0.37060696955608285,
"count": 99
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.004527738182848315,
"min": 0.0015879822761120125,
"max": 0.37060696955608285,
"count": 99
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 3.9615057295277003,
"min": 3.838027181047382,
"max": 4.391513147215912,
"count": 99
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 7.923011459055401,
"min": 3.838027181047382,
"max": 8.72829988494254,
"count": 99
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716652710",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/WildfireResourceManagement_difficulty_4_task_2_run_id_0_train.yaml --run-id=WildfireResourceManagement/train/WildfireResourceManagement_difficulty_4_task_2_run_id_0_train --base-port 5006",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1716654492"
},
"total": 1782.0044866,
"count": 1,
"self": 0.20458140000005187,
"children": {
"run_training.setup": {
"total": 0.05663320000000005,
"count": 1,
"self": 0.05663320000000005
},
"TrainerController.start_learning": {
"total": 1781.743272,
"count": 1,
"self": 1.1691789000110475,
"children": {
"TrainerController._reset_env": {
"total": 2.115984,
"count": 1,
"self": 2.115984
},
"TrainerController.advance": {
"total": 1778.3634315999889,
"count": 50210,
"self": 1.0052072999892516,
"children": {
"env_step": {
"total": 1777.3582242999996,
"count": 50210,
"self": 1501.7532938000159,
"children": {
"SubprocessEnvManager._take_step": {
"total": 275.113631199983,
"count": 50210,
"self": 1.7641998000028138,
"children": {
"TorchPolicy.evaluate": {
"total": 273.3494313999802,
"count": 50210,
"self": 273.3494313999802
}
}
},
"workers": {
"total": 0.49129930000063515,
"count": 50210,
"self": 0.0,
"children": {
"worker_root": {
"total": 1779.1295885000159,
"count": 50210,
"is_parallel": true,
"self": 335.8695537000117,
"children": {
"steps_from_proto": {
"total": 0.00024120000000005248,
"count": 1,
"is_parallel": true,
"self": 0.00011600000000000499,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0001252000000000475,
"count": 2,
"is_parallel": true,
"self": 0.0001252000000000475
}
}
},
"UnityEnvironment.step": {
"total": 1443.2597936000043,
"count": 50210,
"is_parallel": true,
"self": 3.2992694999989,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.078019399997862,
"count": 50210,
"is_parallel": true,
"self": 4.078019399997862
},
"communicator.exchange": {
"total": 1426.4108941000034,
"count": 50210,
"is_parallel": true,
"self": 1426.4108941000034
},
"steps_from_proto": {
"total": 9.47161060000408,
"count": 50210,
"is_parallel": true,
"self": 4.886855700005743,
"children": {
"_process_rank_one_or_two_observation": {
"total": 4.584754899998337,
"count": 100420,
"is_parallel": true,
"self": 4.584754899998337
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.9400000155656016e-05,
"count": 1,
"self": 1.9400000155656016e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1778.4394830000017,
"count": 89000,
"is_parallel": true,
"self": 2.936941700023681,
"children": {
"process_trajectory": {
"total": 1411.5376232999786,
"count": 89000,
"is_parallel": true,
"self": 1411.5376232999786
},
"_update_policy": {
"total": 363.96491799999944,
"count": 148,
"is_parallel": true,
"self": 178.18578389999726,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.77913410000218,
"count": 10317,
"is_parallel": true,
"self": 185.77913410000218
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09465809999983321,
"count": 1,
"self": 0.005292699999927208,
"children": {
"RLTrainer._checkpoint": {
"total": 0.089365399999906,
"count": 1,
"self": 0.089365399999906
}
}
}
}
}
}
}