philippds's picture
Upload 13 files
027a5d9 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4330978393554688,
"min": 1.4228324890136719,
"max": 1.4406050443649292,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8396.5205078125,
"min": 7413.75537109375,
"max": 10191.8935546875,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 1.0,
"min": 0.47619047619047616,
"max": 1.4666666666666666,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 15.0,
"min": 9.0,
"max": 22.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 9.8,
"min": 8.733333333333333,
"max": 64.6,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 147.0,
"min": 131.0,
"max": 969.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 1.0,
"min": 0.4666666666666667,
"max": 1.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 15.0,
"min": 7.0,
"max": 20.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 61.8855702718099,
"min": 18.07352263132731,
"max": 99.01873881022135,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 928.2835540771484,
"min": 271.10283946990967,
"max": 1485.2810821533203,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 7.041964666048686,
"min": 0.6274208890067207,
"max": 10.10590693851312,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 105.62946999073029,
"min": 10.675592601299286,
"max": 167.09604838490486,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 2.2734446922938027,
"min": 0.2157174030939738,
"max": 3.787388292948405,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 34.10167038440704,
"min": 3.235761046409607,
"max": 56.810824394226074,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.2273444707194964,
"min": 0.02157174050807953,
"max": 0.3787388304869334,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 3.410167060792446,
"min": 0.32357610762119293,
"max": 5.681082457304001,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 53.37832997639974,
"min": 25.41759978135427,
"max": 114.9529899597168,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 800.6749496459961,
"min": 381.263996720314,
"max": 1740.7570915222168,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 389.6,
"min": 295.42857142857144,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5844.0,
"min": 5148.0,
"max": 7098.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199726.0,
"min": 5987.0,
"max": 1199726.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199726.0,
"min": 5987.0,
"max": 1199726.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.41979676485061646,
"min": 0.02073347195982933,
"max": 1.0294127464294434,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 6.2969512939453125,
"min": 0.31100207567214966,
"max": 19.077856063842773,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3845311105251312,
"min": -0.04111192002892494,
"max": 1.2897236347198486,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 5.7679667472839355,
"min": -0.616678774356842,
"max": 20.635578155517578,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 10.318418216705322,
"min": 0.8063467979431153,
"max": 14.622659891843796,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 154.77627325057983,
"min": 12.095201969146729,
"max": 233.96255826950073,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 1.424801246325175,
"min": 0.0,
"max": 13.730506579081217,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 21.372018694877625,
"min": 0.0,
"max": 205.95759868621826,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 9.286575818061829,
"min": 0.7257114847501119,
"max": 13.1603940166533,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 139.29863727092743,
"min": 10.885672271251678,
"max": 210.5663042664528,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.02060388573833431,
"min": 0.012787539705944559,
"max": 0.0351080271863827,
"count": 138
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.02060388573833431,
"min": 0.012787539705944559,
"max": 0.0351080271863827,
"count": 138
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.6114289710919062,
"min": 0.1380375986918807,
"max": 1.4280904904007912,
"count": 138
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.6114289710919062,
"min": 0.1380375986918807,
"max": 1.4280904904007912,
"count": 138
},
"Agent.Policy.LearningRate.mean": {
"value": 1.680999440000015e-07,
"min": 1.680999440000015e-07,
"max": 0.00029780325073225,
"count": 138
},
"Agent.Policy.LearningRate.sum": {
"value": 1.680999440000015e-07,
"min": 1.680999440000015e-07,
"max": 0.00029780325073225,
"count": 138
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10005599999999999,
"min": 0.10005599999999999,
"max": 0.19926775000000005,
"count": 138
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10005599999999999,
"min": 0.10005599999999999,
"max": 0.19926775000000005,
"count": 138
},
"Agent.Policy.Beta.mean": {
"value": 1.2794400000000026e-05,
"min": 1.2794400000000026e-05,
"max": 0.004963460725,
"count": 138
},
"Agent.Policy.Beta.sum": {
"value": 1.2794400000000026e-05,
"min": 1.2794400000000026e-05,
"max": 0.004963460725,
"count": 138
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.035483790251115956,
"min": 0.03262007015722769,
"max": 0.6027635087569555,
"count": 138
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.035483790251115956,
"min": 0.03262007015722769,
"max": 0.6027635087569555,
"count": 138
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.7481442888577778,
"min": 1.698238621155421,
"max": 3.315477500359217,
"count": 138
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.7481442888577778,
"min": 1.698238621155421,
"max": 3.315477500359217,
"count": 138
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716659058",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_1_task_0_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_1_task_0_run_id_2_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1716662675"
},
"total": 3616.7352253,
"count": 1,
"self": 0.38807509999969625,
"children": {
"run_training.setup": {
"total": 0.05774099999999993,
"count": 1,
"self": 0.05774099999999993
},
"TrainerController.start_learning": {
"total": 3616.2894092,
"count": 1,
"self": 7.146410799917248,
"children": {
"TrainerController._reset_env": {
"total": 2.1801370999999996,
"count": 1,
"self": 2.1801370999999996
},
"TrainerController.advance": {
"total": 3606.783314300083,
"count": 401129,
"self": 6.780138699951294,
"children": {
"env_step": {
"total": 3600.0031756001317,
"count": 401129,
"self": 1747.4815330000858,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1848.0716910999993,
"count": 401129,
"self": 11.836096399968255,
"children": {
"TorchPolicy.evaluate": {
"total": 1836.235594700031,
"count": 400178,
"self": 1836.235594700031
}
}
},
"workers": {
"total": 4.449951500046675,
"count": 401129,
"self": 0.0,
"children": {
"worker_root": {
"total": 3607.8694048000893,
"count": 401129,
"is_parallel": true,
"self": 2085.5975822000473,
"children": {
"steps_from_proto": {
"total": 0.0069782000000000455,
"count": 1,
"is_parallel": true,
"self": 0.00011409999999978382,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006812400000000052,
"count": 2,
"is_parallel": true,
"self": 3.470000000005413e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006777699999999998,
"count": 3,
"is_parallel": true,
"self": 3.360000000007801e-05,
"children": {
"process_pixels": {
"total": 0.00674409999999992,
"count": 3,
"is_parallel": true,
"self": 0.00025719999999984644,
"children": {
"image_decompress": {
"total": 0.006486900000000073,
"count": 3,
"is_parallel": true,
"self": 0.006486900000000073
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 5.170000000020991e-05,
"count": 2,
"is_parallel": true,
"self": 5.170000000020991e-05
}
}
},
"UnityEnvironment.step": {
"total": 1522.264844400042,
"count": 401129,
"is_parallel": true,
"self": 17.803467900114356,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.16704540004008,
"count": 401129,
"is_parallel": true,
"self": 18.16704540004008
},
"communicator.exchange": {
"total": 1331.4173454000334,
"count": 401129,
"is_parallel": true,
"self": 1331.4173454000334
},
"steps_from_proto": {
"total": 154.87698569985412,
"count": 401129,
"is_parallel": true,
"self": 30.8827834995369,
"children": {
"_process_maybe_compressed_observation": {
"total": 111.34152750011636,
"count": 802258,
"is_parallel": true,
"self": 8.22217940032688,
"children": {
"_observation_to_np_array": {
"total": 103.11934809978948,
"count": 1203636,
"is_parallel": true,
"self": 7.640617299942647,
"children": {
"process_pixels": {
"total": 95.47873079984683,
"count": 1203636,
"is_parallel": true,
"self": 44.28413199976671,
"children": {
"image_decompress": {
"total": 51.19459880008012,
"count": 1203636,
"is_parallel": true,
"self": 51.19459880008012
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 12.652674700200862,
"count": 802258,
"is_parallel": true,
"self": 12.652674700200862
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.519999998185085e-05,
"count": 1,
"self": 2.519999998185085e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3612.1196556999744,
"count": 178287,
"is_parallel": true,
"self": 5.795580399979826,
"children": {
"process_trajectory": {
"total": 2925.131991299994,
"count": 178287,
"is_parallel": true,
"self": 2924.755848799994,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3761424999997871,
"count": 2,
"is_parallel": true,
"self": 0.3761424999997871
}
}
},
"_update_policy": {
"total": 681.1920840000005,
"count": 138,
"is_parallel": true,
"self": 452.29166269999706,
"children": {
"TorchPPOOptimizer.update": {
"total": 228.90042130000342,
"count": 3420,
"is_parallel": true,
"self": 228.90042130000342
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1795217999997476,
"count": 1,
"self": 0.012365099999897211,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1671566999998504,
"count": 1,
"self": 0.1671566999998504
}
}
}
}
}
}
}