Leon-Zsl's picture
First Push
2d61a67 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9739418625831604,
"min": 0.9739418625831604,
"max": 2.866896390914917,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9331.3369140625,
"min": 9331.3369140625,
"max": 29454.494140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.872129440307617,
"min": 0.41358691453933716,
"max": 12.872129440307617,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2510.065185546875,
"min": 80.2358627319336,
"max": 2581.85546875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06860031069278621,
"min": 0.06617654776331905,
"max": 0.0747166568486921,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27440124277114486,
"min": 0.2647061910532762,
"max": 0.372320572121466,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2117634826138908,
"min": 0.11916757048582476,
"max": 0.299558982764389,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8470539304555632,
"min": 0.476670281943299,
"max": 1.4541599056409564,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.545454545454547,
"min": 3.25,
"max": 25.6,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1124.0,
"min": 143.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.545454545454547,
"min": 3.25,
"max": 25.6,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1124.0,
"min": 143.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724407493",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/home/leonslzhang/workspace/deep-rl-class/.venv/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1724407771"
},
"total": 277.7958323849998,
"count": 1,
"self": 0.16711632299939083,
"children": {
"run_training.setup": {
"total": 0.018326049000279454,
"count": 1,
"self": 0.018326049000279454
},
"TrainerController.start_learning": {
"total": 277.61039001300014,
"count": 1,
"self": 0.2925242949841049,
"children": {
"TrainerController._reset_env": {
"total": 0.9105995919999259,
"count": 1,
"self": 0.9105995919999259
},
"TrainerController.advance": {
"total": 276.3540347130165,
"count": 18204,
"self": 0.1411382920032338,
"children": {
"env_step": {
"total": 276.2128964210133,
"count": 18204,
"self": 166.4070330169452,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.66429564505233,
"count": 18204,
"self": 0.7343646159943091,
"children": {
"TorchPolicy.evaluate": {
"total": 108.92993102905803,
"count": 18204,
"self": 108.92993102905803
}
}
},
"workers": {
"total": 0.14156775901574292,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 277.07648573194456,
"count": 18204,
"is_parallel": true,
"self": 153.2533017909982,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016711650005163392,
"count": 1,
"is_parallel": true,
"self": 0.0006406160027836449,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010305489977326943,
"count": 10,
"is_parallel": true,
"self": 0.0010305489977326943
}
}
},
"UnityEnvironment.step": {
"total": 0.014845924999463023,
"count": 1,
"is_parallel": true,
"self": 0.00018586999885883415,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00013294600012159208,
"count": 1,
"is_parallel": true,
"self": 0.00013294600012159208
},
"communicator.exchange": {
"total": 0.014057667000088259,
"count": 1,
"is_parallel": true,
"self": 0.014057667000088259
},
"steps_from_proto": {
"total": 0.0004694420003943378,
"count": 1,
"is_parallel": true,
"self": 0.000127314000565093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0003421279998292448,
"count": 10,
"is_parallel": true,
"self": 0.0003421279998292448
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 123.82318394094636,
"count": 18203,
"is_parallel": true,
"self": 2.756135363796602,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.538528083023266,
"count": 18203,
"is_parallel": true,
"self": 1.538528083023266
},
"communicator.exchange": {
"total": 111.90780550910222,
"count": 18203,
"is_parallel": true,
"self": 111.90780550910222
},
"steps_from_proto": {
"total": 7.620714985024279,
"count": 18203,
"is_parallel": true,
"self": 1.6481082002364928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5.972606784787786,
"count": 182030,
"is_parallel": true,
"self": 5.972606784787786
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.326499962800881e-05,
"count": 1,
"self": 7.326499962800881e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 275.58798077289157,
"count": 248310,
"is_parallel": true,
"self": 2.5622105668780932,
"children": {
"process_trajectory": {
"total": 154.68356519701047,
"count": 248310,
"is_parallel": true,
"self": 154.3258906940091,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3576745030013626,
"count": 4,
"is_parallel": true,
"self": 0.3576745030013626
}
}
},
"_update_policy": {
"total": 118.34220500900301,
"count": 90,
"is_parallel": true,
"self": 13.91603055599262,
"children": {
"TorchPPOOptimizer.update": {
"total": 104.42617445301039,
"count": 4584,
"is_parallel": true,
"self": 104.42617445301039
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.05315814799996588,
"count": 1,
"self": 0.000371242999790411,
"children": {
"RLTrainer._checkpoint": {
"total": 0.052786905000175466,
"count": 1,
"self": 0.052786905000175466
}
}
}
}
}
}
}