Ditrip's picture
First training of SnowballTarget
928645f
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0062370300292969,
"min": 0.9987362623214722,
"max": 2.862666368484497,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9651.8251953125,
"min": 9651.8251953125,
"max": 29411.033203125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.698565483093262,
"min": 0.3684185743331909,
"max": 12.698565483093262,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2476.22021484375,
"min": 71.47320556640625,
"max": 2544.06298828125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07574474943263056,
"min": 0.058955292969211065,
"max": 0.07877923299298238,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3029789977305222,
"min": 0.23582117187684426,
"max": 0.3811708424224943,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2135055417231485,
"min": 0.1078404410783311,
"max": 0.2806170344937081,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.854022166892594,
"min": 0.4313617643133244,
"max": 1.4030851724685407,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.386363636363637,
"min": 3.090909090909091,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1117.0,
"min": 136.0,
"max": 1364.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.386363636363637,
"min": 3.090909090909091,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1117.0,
"min": 136.0,
"max": 1364.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686825817",
"python_version": "3.10.8 | packaged by conda-forge | (main, Nov 22 2022, 08:23:14) [GCC 10.4.0]",
"command_line_arguments": "/home/ditrip/anaconda3/envs/my-env/bin/mlagents-learn ./config/ppo/Snowball.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686826124"
},
"total": 307.23808749599993,
"count": 1,
"self": 0.31976061599993955,
"children": {
"run_training.setup": {
"total": 0.012911376999909407,
"count": 1,
"self": 0.012911376999909407
},
"TrainerController.start_learning": {
"total": 306.9054155030001,
"count": 1,
"self": 0.4000332929833803,
"children": {
"TrainerController._reset_env": {
"total": 2.996944405000022,
"count": 1,
"self": 2.996944405000022
},
"TrainerController.advance": {
"total": 303.38715521101676,
"count": 18207,
"self": 0.18431304903106138,
"children": {
"env_step": {
"total": 303.2028421619857,
"count": 18207,
"self": 227.83535544499546,
"children": {
"SubprocessEnvManager._take_step": {
"total": 75.16362555598494,
"count": 18207,
"self": 1.0080539509824575,
"children": {
"TorchPolicy.evaluate": {
"total": 74.15557160500248,
"count": 18207,
"self": 74.15557160500248
}
}
},
"workers": {
"total": 0.2038611610053067,
"count": 18207,
"self": 0.0,
"children": {
"worker_root": {
"total": 306.3877610700074,
"count": 18207,
"is_parallel": true,
"self": 144.67336760799844,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001157206000016231,
"count": 1,
"is_parallel": true,
"self": 0.0002993599996443663,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008578460003718646,
"count": 10,
"is_parallel": true,
"self": 0.0008578460003718646
}
}
},
"UnityEnvironment.step": {
"total": 0.020352067999965584,
"count": 1,
"is_parallel": true,
"self": 0.00023720000012872333,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00016904399990380625,
"count": 1,
"is_parallel": true,
"self": 0.00016904399990380625
},
"communicator.exchange": {
"total": 0.01879860600001848,
"count": 1,
"is_parallel": true,
"self": 0.01879860600001848
},
"steps_from_proto": {
"total": 0.0011472179999145737,
"count": 1,
"is_parallel": true,
"self": 0.0001755979999416013,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009716199999729724,
"count": 10,
"is_parallel": true,
"self": 0.0009716199999729724
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 161.71439346200896,
"count": 18206,
"is_parallel": true,
"self": 7.8627620820207085,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.386946510992175,
"count": 18206,
"is_parallel": true,
"self": 3.386946510992175
},
"communicator.exchange": {
"total": 127.08761051300326,
"count": 18206,
"is_parallel": true,
"self": 127.08761051300326
},
"steps_from_proto": {
"total": 23.377074355992818,
"count": 18206,
"is_parallel": true,
"self": 4.1012710920315385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 19.27580326396128,
"count": 182060,
"is_parallel": true,
"self": 19.27580326396128
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.004500002949499e-05,
"count": 1,
"self": 7.004500002949499e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 302.99567769493797,
"count": 167271,
"is_parallel": true,
"self": 1.8508805489718725,
"children": {
"process_trajectory": {
"total": 171.40193413996667,
"count": 167271,
"is_parallel": true,
"self": 170.77409671096666,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6278374290000102,
"count": 4,
"is_parallel": true,
"self": 0.6278374290000102
}
}
},
"_update_policy": {
"total": 129.74286300599942,
"count": 90,
"is_parallel": true,
"self": 36.643872000000556,
"children": {
"TorchPPOOptimizer.update": {
"total": 93.09899100599887,
"count": 4587,
"is_parallel": true,
"self": 93.09899100599887
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12121254899989253,
"count": 1,
"self": 0.0005936419997851772,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12061890700010736,
"count": 1,
"self": 0.12061890700010736
}
}
}
}
}
}
}