claterza's picture
First training of SnowballTarget
9f9aac0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6138330101966858,
"min": 0.5982843637466431,
"max": 2.8661022186279297,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5935.1513671875,
"min": 5692.67578125,
"max": 29351.75390625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.643736839294434,
"min": 0.34030818939208984,
"max": 13.909550666809082,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2796.966064453125,
"min": 66.01979064941406,
"max": 2837.54833984375,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07495916548901804,
"min": 0.058975814851959615,
"max": 0.0768201368819242,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3747958274450902,
"min": 0.23590325940783846,
"max": 0.379641228532373,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17615228327465993,
"min": 0.12042085507738531,
"max": 0.28825011652182125,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8807614163732996,
"min": 0.48168342030954125,
"max": 1.3815676464753994,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.927272727272726,
"min": 3.6136363636363638,
"max": 27.59090909090909,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1481.0,
"min": 159.0,
"max": 1491.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.927272727272726,
"min": 3.6136363636363638,
"max": 27.59090909090909,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1481.0,
"min": 159.0,
"max": 1491.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673729570",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673730827"
},
"total": 1256.9398604440003,
"count": 1,
"self": 0.43337760600002184,
"children": {
"run_training.setup": {
"total": 0.12042391399995722,
"count": 1,
"self": 0.12042391399995722
},
"TrainerController.start_learning": {
"total": 1256.3860589240003,
"count": 1,
"self": 1.6929161720047432,
"children": {
"TrainerController._reset_env": {
"total": 9.462465933999965,
"count": 1,
"self": 9.462465933999965
},
"TrainerController.advance": {
"total": 1245.0935455479953,
"count": 45478,
"self": 0.9184973359995183,
"children": {
"env_step": {
"total": 1244.1750482119958,
"count": 45478,
"self": 809.519269636061,
"children": {
"SubprocessEnvManager._take_step": {
"total": 433.7204756379575,
"count": 45478,
"self": 4.562935671942228,
"children": {
"TorchPolicy.evaluate": {
"total": 429.1575399660153,
"count": 45478,
"self": 98.74015167205437,
"children": {
"TorchPolicy.sample_actions": {
"total": 330.4173882939609,
"count": 45478,
"self": 330.4173882939609
}
}
}
}
},
"workers": {
"total": 0.9353029379772124,
"count": 45478,
"self": 0.0,
"children": {
"worker_root": {
"total": 1252.3015198579822,
"count": 45478,
"is_parallel": true,
"self": 586.893606557018,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006008595000025707,
"count": 1,
"is_parallel": true,
"self": 0.0035346090000984987,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024739859999272085,
"count": 10,
"is_parallel": true,
"self": 0.0024739859999272085
}
}
},
"UnityEnvironment.step": {
"total": 0.042407957999898827,
"count": 1,
"is_parallel": true,
"self": 0.0005192299998952876,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003826810000191472,
"count": 1,
"is_parallel": true,
"self": 0.0003826810000191472
},
"communicator.exchange": {
"total": 0.03951786400000401,
"count": 1,
"is_parallel": true,
"self": 0.03951786400000401
},
"steps_from_proto": {
"total": 0.0019881829999803813,
"count": 1,
"is_parallel": true,
"self": 0.000446302999762338,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015418800002180433,
"count": 10,
"is_parallel": true,
"self": 0.0015418800002180433
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 665.4079133009642,
"count": 45477,
"is_parallel": true,
"self": 25.563743958932832,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.177753634024498,
"count": 45477,
"is_parallel": true,
"self": 15.177753634024498
},
"communicator.exchange": {
"total": 531.5319991319697,
"count": 45477,
"is_parallel": true,
"self": 531.5319991319697
},
"steps_from_proto": {
"total": 93.13441657603721,
"count": 45477,
"is_parallel": true,
"self": 21.147710709059538,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.98670586697767,
"count": 454770,
"is_parallel": true,
"self": 71.98670586697767
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.874500018559047e-05,
"count": 1,
"self": 4.874500018559047e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1235.0488839378331,
"count": 1044754,
"is_parallel": true,
"self": 30.31840426593044,
"children": {
"process_trajectory": {
"total": 709.4848491439031,
"count": 1044754,
"is_parallel": true,
"self": 707.4348334189035,
"children": {
"RLTrainer._checkpoint": {
"total": 2.050015724999639,
"count": 10,
"is_parallel": true,
"self": 2.050015724999639
}
}
},
"_update_policy": {
"total": 495.24563052799965,
"count": 227,
"is_parallel": true,
"self": 116.36708063000538,
"children": {
"TorchPPOOptimizer.update": {
"total": 378.8785498979943,
"count": 11574,
"is_parallel": true,
"self": 378.8785498979943
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1370825250000962,
"count": 1,
"self": 0.0010324790000595385,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13605004600003667,
"count": 1,
"self": 0.13605004600003667
}
}
}
}
}
}
}