rwr20's picture
First Push
e4e08ba verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.816674530506134,
"min": 0.7926225066184998,
"max": 2.8568437099456787,
"count": 36
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7743.7080078125,
"min": 7741.38330078125,
"max": 29256.9375,
"count": 36
},
"SnowballTarget.Step.mean": {
"value": 359944.0,
"min": 9952.0,
"max": 359944.0,
"count": 36
},
"SnowballTarget.Step.sum": {
"value": 359944.0,
"min": 9952.0,
"max": 359944.0,
"count": 36
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.722535133361816,
"min": 0.29261475801467896,
"max": 13.722535133361816,
"count": 36
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2662.171875,
"min": 56.76726150512695,
"max": 2802.098876953125,
"count": 36
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 36
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 36
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06622977422118899,
"min": 0.06128641730337061,
"max": 0.0728793583022814,
"count": 36
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26491909688475596,
"min": 0.24921138306101553,
"max": 0.364396791511407,
"count": 36
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17329898845477432,
"min": 0.1065496731236778,
"max": 0.3044767375378048,
"count": 36
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6931959538190973,
"min": 0.4261986924947112,
"max": 1.295578857555109,
"count": 36
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.68728710424e-05,
"min": 8.68728710424e-05,
"max": 0.00029675280108239997,
"count": 36
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0003474914841696,
"min": 0.0003474914841696,
"max": 0.001454064015312,
"count": 36
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1289576,
"min": 0.1289576,
"max": 0.19891759999999997,
"count": 36
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5158304,
"min": 0.5158304,
"max": 0.984688,
"count": 36
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0014549842400000002,
"min": 0.0014549842400000002,
"max": 0.00494598824,
"count": 36
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.005819936960000001,
"min": 0.005819936960000001,
"max": 0.0242359312,
"count": 36
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.90909090909091,
"min": 3.022727272727273,
"max": 26.90909090909091,
"count": 36
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1184.0,
"min": 133.0,
"max": 1480.0,
"count": 36
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.90909090909091,
"min": 3.022727272727273,
"max": 26.90909090909091,
"count": 36
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1184.0,
"min": 133.0,
"max": 1480.0,
"count": 36
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714813702",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --force --run-id=SnowballTarget3 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714814511"
},
"total": 808.9773728229998,
"count": 1,
"self": 0.012453610999273224,
"children": {
"run_training.setup": {
"total": 0.05065045300034399,
"count": 1,
"self": 0.05065045300034399
},
"TrainerController.start_learning": {
"total": 808.9142687590002,
"count": 1,
"self": 1.9795725669919193,
"children": {
"TrainerController._reset_env": {
"total": 2.5851128809999864,
"count": 1,
"self": 2.5851128809999864
},
"TrainerController.advance": {
"total": 804.0989159740084,
"count": 33001,
"self": 0.4834191440154427,
"children": {
"env_step": {
"total": 803.6154968299929,
"count": 33001,
"self": 520.7575256590162,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.35716918894013,
"count": 33001,
"self": 2.5989544428734916,
"children": {
"TorchPolicy.evaluate": {
"total": 279.75821474606664,
"count": 33001,
"self": 279.75821474606664
}
}
},
"workers": {
"total": 0.5008019820365917,
"count": 33000,
"self": 0.0,
"children": {
"worker_root": {
"total": 805.5951890579781,
"count": 33000,
"is_parallel": true,
"self": 402.1318842810192,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032390200003646896,
"count": 1,
"is_parallel": true,
"self": 0.0008766969999669527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002362323000397737,
"count": 10,
"is_parallel": true,
"self": 0.002362323000397737
}
}
},
"UnityEnvironment.step": {
"total": 0.03659387499965305,
"count": 1,
"is_parallel": true,
"self": 0.0006553569996867736,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003889899999194313,
"count": 1,
"is_parallel": true,
"self": 0.0003889899999194313
},
"communicator.exchange": {
"total": 0.0336073779999424,
"count": 1,
"is_parallel": true,
"self": 0.0336073779999424
},
"steps_from_proto": {
"total": 0.0019421500001044478,
"count": 1,
"is_parallel": true,
"self": 0.0003677839999909338,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001574366000113514,
"count": 10,
"is_parallel": true,
"self": 0.001574366000113514
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 403.4633047769589,
"count": 32999,
"is_parallel": true,
"self": 18.4526843290937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.606960499925663,
"count": 32999,
"is_parallel": true,
"self": 9.606960499925663
},
"communicator.exchange": {
"total": 315.35880058793555,
"count": 32999,
"is_parallel": true,
"self": 315.35880058793555
},
"steps_from_proto": {
"total": 60.04485936000401,
"count": 32999,
"is_parallel": true,
"self": 11.246593000818393,
"children": {
"_process_rank_one_or_two_observation": {
"total": 48.79826635918562,
"count": 329990,
"is_parallel": true,
"self": 48.79826635918562
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010824699984368635,
"count": 1,
"self": 0.00010824699984368635,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 794.0796448990204,
"count": 1171886,
"is_parallel": true,
"self": 24.530406085900722,
"children": {
"process_trajectory": {
"total": 437.4762247271183,
"count": 1171887,
"is_parallel": true,
"self": 437.0641351051181,
"children": {
"RLTrainer._checkpoint": {
"total": 0.41208962200016686,
"count": 3,
"is_parallel": true,
"self": 0.41208962200016686
}
}
},
"_update_policy": {
"total": 332.0730140860014,
"count": 164,
"is_parallel": true,
"self": 97.32214598001883,
"children": {
"TorchPPOOptimizer.update": {
"total": 234.7508681059826,
"count": 8378,
"is_parallel": true,
"self": 234.7508681059826
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2505590900000243,
"count": 1,
"self": 0.001279290000184119,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24927979999984018,
"count": 1,
"self": 0.24927979999984018
}
}
}
}
}
}
}