israel-avihail's picture
First model push
c318568
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.291130781173706,
"min": 1.291130781173706,
"max": 2.878911256790161,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11475.5703125,
"min": 11475.5703125,
"max": 31150.3671875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199800.0,
"min": 9800.0,
"max": 199800.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199800.0,
"min": 9800.0,
"max": 199800.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.051081657409668,
"min": 0.42596545815467834,
"max": 12.051081657409668,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 602.5540771484375,
"min": 16.186687469482422,
"max": 602.5540771484375,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.96,
"min": 3.0526315789473686,
"max": 23.96,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1198.0,
"min": 116.0,
"max": 1198.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.96,
"min": 3.0526315789473686,
"max": 23.96,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1198.0,
"min": 116.0,
"max": 1198.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06341956207481916,
"min": 0.06341956207481916,
"max": 0.07217810911471115,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25367824829927665,
"min": 0.21035494657196835,
"max": 0.36059830695112216,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.25478293929322093,
"min": 0.12959027144254423,
"max": 0.29669092478705383,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0191317571728837,
"min": 0.38877081432763266,
"max": 1.4040658488577489,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.950097350000008e-06,
"min": 7.950097350000008e-06,
"max": 0.0002901000033,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.180038940000003e-05,
"min": 3.180038940000003e-05,
"max": 0.0013845000385,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.09999999999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.3999999999999999,
"min": 0.29999999999999993,
"max": 0.4999999999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014223500000000013,
"min": 0.00014223500000000013,
"max": 0.00483533,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005689400000000005,
"min": 0.0005689400000000005,
"max": 0.023078849999999998,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679758535",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --resume --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679758997"
},
"total": 462.62853802400014,
"count": 1,
"self": 0.7331137520002358,
"children": {
"run_training.setup": {
"total": 0.11181566999994175,
"count": 1,
"self": 0.11181566999994175
},
"TrainerController.start_learning": {
"total": 461.78360860199996,
"count": 1,
"self": 0.6278564299893787,
"children": {
"TrainerController._reset_env": {
"total": 7.318766889000017,
"count": 1,
"self": 7.318766889000017
},
"TrainerController.advance": {
"total": 453.6172709870102,
"count": 18029,
"self": 0.3143252820229918,
"children": {
"env_step": {
"total": 453.3029457049872,
"count": 18029,
"self": 312.04665822098946,
"children": {
"SubprocessEnvManager._take_step": {
"total": 140.9704863059917,
"count": 18029,
"self": 2.5887571140085583,
"children": {
"TorchPolicy.evaluate": {
"total": 138.38172919198314,
"count": 18029,
"self": 138.38172919198314
}
}
},
"workers": {
"total": 0.28580117800606786,
"count": 18029,
"self": 0.0,
"children": {
"worker_root": {
"total": 460.18479266799545,
"count": 18029,
"is_parallel": true,
"self": 206.00380126198115,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019452389999514708,
"count": 1,
"is_parallel": true,
"self": 0.0005798810000214871,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013653579999299836,
"count": 10,
"is_parallel": true,
"self": 0.0013653579999299836
}
}
},
"UnityEnvironment.step": {
"total": 0.08938840999985587,
"count": 1,
"is_parallel": true,
"self": 0.0005805409996355593,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003952120000576542,
"count": 1,
"is_parallel": true,
"self": 0.0003952120000576542
},
"communicator.exchange": {
"total": 0.08144523300006767,
"count": 1,
"is_parallel": true,
"self": 0.08144523300006767
},
"steps_from_proto": {
"total": 0.006967424000094979,
"count": 1,
"is_parallel": true,
"self": 0.00040292100038641365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006564502999708566,
"count": 10,
"is_parallel": true,
"self": 0.006564502999708566
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 254.1809914060143,
"count": 18028,
"is_parallel": true,
"self": 9.910173913995095,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.216550940020852,
"count": 18028,
"is_parallel": true,
"self": 5.216550940020852
},
"communicator.exchange": {
"total": 207.30248558798417,
"count": 18028,
"is_parallel": true,
"self": 207.30248558798417
},
"steps_from_proto": {
"total": 31.751780964014188,
"count": 18028,
"is_parallel": true,
"self": 6.4292545150851765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.32252644892901,
"count": 180280,
"is_parallel": true,
"self": 25.32252644892901
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00025295500017818995,
"count": 1,
"self": 0.00025295500017818995,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 452.0203354418736,
"count": 223705,
"is_parallel": true,
"self": 5.892522370847473,
"children": {
"process_trajectory": {
"total": 158.07684845802555,
"count": 223705,
"is_parallel": true,
"self": 156.64376512402555,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4330833340000027,
"count": 4,
"is_parallel": true,
"self": 1.4330833340000027
}
}
},
"_update_policy": {
"total": 288.0509646130006,
"count": 89,
"is_parallel": true,
"self": 90.87923115800072,
"children": {
"TorchPPOOptimizer.update": {
"total": 197.17173345499987,
"count": 4539,
"is_parallel": true,
"self": 197.17173345499987
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.21946134100016934,
"count": 1,
"self": 0.0021894239998800913,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21727191700028925,
"count": 1,
"self": 0.21727191700028925
}
}
}
}
}
}
}