Dhanraj1503's picture
SnowballTarget
13026dd verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8852450251579285,
"min": 0.8852450251579285,
"max": 2.8695950508117676,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8452.3193359375,
"min": 8452.3193359375,
"max": 29387.5234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.545845031738281,
"min": 0.42879679799079895,
"max": 12.545845031738281,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2446.439697265625,
"min": 83.18657684326172,
"max": 2514.96728515625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06565504732505217,
"min": 0.058871790462837775,
"max": 0.07319415045283534,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2626201893002087,
"min": 0.2354871618513511,
"max": 0.3388364633433144,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21799378834810912,
"min": 0.13114399301554716,
"max": 0.28334402108893675,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8719751533924365,
"min": 0.5245759720621886,
"max": 1.4167201054446836,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.795454545454547,
"min": 3.840909090909091,
"max": 24.795454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1091.0,
"min": 169.0,
"max": 1363.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.795454545454547,
"min": 3.840909090909091,
"max": 24.795454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1091.0,
"min": 169.0,
"max": 1363.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706336438",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706336915"
},
"total": 476.68251384599995,
"count": 1,
"self": 0.5967720539999846,
"children": {
"run_training.setup": {
"total": 0.05177993499995637,
"count": 1,
"self": 0.05177993499995637
},
"TrainerController.start_learning": {
"total": 476.033961857,
"count": 1,
"self": 0.5961952389956195,
"children": {
"TrainerController._reset_env": {
"total": 3.342416455000034,
"count": 1,
"self": 3.342416455000034
},
"TrainerController.advance": {
"total": 472.00007322400427,
"count": 18198,
"self": 0.2889711919992237,
"children": {
"env_step": {
"total": 471.71110203200504,
"count": 18198,
"self": 306.5936702390118,
"children": {
"SubprocessEnvManager._take_step": {
"total": 164.81279513999914,
"count": 18198,
"self": 1.6637083059985684,
"children": {
"TorchPolicy.evaluate": {
"total": 163.14908683400057,
"count": 18198,
"self": 163.14908683400057
}
}
},
"workers": {
"total": 0.3046366529941338,
"count": 18198,
"self": 0.0,
"children": {
"worker_root": {
"total": 474.7494763660088,
"count": 18198,
"is_parallel": true,
"self": 236.15026786101384,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005190614000014193,
"count": 1,
"is_parallel": true,
"self": 0.0036168310001016835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015737829999125097,
"count": 10,
"is_parallel": true,
"self": 0.0015737829999125097
}
}
},
"UnityEnvironment.step": {
"total": 0.04017879999997831,
"count": 1,
"is_parallel": true,
"self": 0.000762572999974509,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004309700000248995,
"count": 1,
"is_parallel": true,
"self": 0.0004309700000248995
},
"communicator.exchange": {
"total": 0.03652410899996994,
"count": 1,
"is_parallel": true,
"self": 0.03652410899996994
},
"steps_from_proto": {
"total": 0.0024611480000089614,
"count": 1,
"is_parallel": true,
"self": 0.0006509020000748933,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001810245999934068,
"count": 10,
"is_parallel": true,
"self": 0.001810245999934068
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 238.59920850499498,
"count": 18197,
"is_parallel": true,
"self": 11.407982728986156,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.916942354005528,
"count": 18197,
"is_parallel": true,
"self": 5.916942354005528
},
"communicator.exchange": {
"total": 183.8512240609984,
"count": 18197,
"is_parallel": true,
"self": 183.8512240609984
},
"steps_from_proto": {
"total": 37.4230593610049,
"count": 18197,
"is_parallel": true,
"self": 7.0590490030339765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.364010357970926,
"count": 181970,
"is_parallel": true,
"self": 30.364010357970926
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001360620000241397,
"count": 1,
"self": 0.0001360620000241397,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 465.61833507796274,
"count": 712882,
"is_parallel": true,
"self": 16.079432804999442,
"children": {
"process_trajectory": {
"total": 257.6581284899624,
"count": 712882,
"is_parallel": true,
"self": 256.68562269296245,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9725057969999398,
"count": 4,
"is_parallel": true,
"self": 0.9725057969999398
}
}
},
"_update_policy": {
"total": 191.8807737830009,
"count": 90,
"is_parallel": true,
"self": 56.822251664000305,
"children": {
"TorchPPOOptimizer.update": {
"total": 135.0585221190006,
"count": 4584,
"is_parallel": true,
"self": 135.0585221190006
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09514087700006257,
"count": 1,
"self": 0.0009526540001161266,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09418822299994645,
"count": 1,
"self": 0.09418822299994645
}
}
}
}
}
}
}