PyraMIDz / run_logs /timers.json
kurohige's picture
Selam, first push
6f29e17
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.02970881387591362,
"min": 0.005502134095877409,
"max": 1.458825945854187,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 883.658935546875,
"min": 164.7999267578125,
"max": 44254.9453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989895.0,
"min": 29976.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989895.0,
"min": 29976.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10165281593799591,
"min": -0.11501456052064896,
"max": -0.05839862674474716,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -24.193370819091797,
"min": -27.60349464416504,
"max": -14.132467269897461,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.7020528316497803,
"min": 0.170526921749115,
"max": 1.8837361335754395,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 405.08856201171875,
"min": 40.58540725708008,
"max": 452.0966796875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06676959379212441,
"min": 0.06504628872538963,
"max": 0.07305271990977844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8680047192976172,
"min": 0.511369039368449,
"max": 0.9294779065483632,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010006159010320688,
"min": 0.007283555994805915,
"max": 0.013096564808702471,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13008006713416895,
"min": 0.053525364753395796,
"max": 0.15715877770442965,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.401997532700006e-06,
"min": 7.401997532700006e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 9.622596792510008e-05,
"min": 9.622596792510008e-05,
"max": 0.0034935184354938993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246730000000003,
"min": 0.10246730000000003,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.3320749000000003,
"min": 1.2658052000000002,
"max": 2.4645061,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025648327000000017,
"min": 0.00025648327000000017,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003334282510000002,
"min": 0.003334282510000002,
"max": 0.11646415938999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 1.722501277923584,
"min": 0.4974973201751709,
"max": 1.8506916761398315,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 22.39251708984375,
"min": 3.4824812412261963,
"max": 23.83445930480957,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 963.2647058823529,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 20979.0,
"min": 16775.0,
"max": 32751.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9997143358701751,
"min": -0.9998533844947814,
"max": -0.7876647563979906,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -20.994001053273678,
"min": -31.994001626968384,
"max": -14.791000872850418,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9997143358701751,
"min": -0.9998533844947814,
"max": -0.7876647563979906,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -20.994001053273678,
"min": -31.994001626968384,
"max": -14.791000872850418,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 16.717697711489606,
"min": 7.600432110621648,
"max": 18.57363071364741,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 351.07165194128174,
"min": 148.92243483662605,
"max": 592.772442755755,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675945792",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675948146"
},
"total": 2354.0459201940002,
"count": 1,
"self": 0.6439772789999552,
"children": {
"run_training.setup": {
"total": 0.12278756199998497,
"count": 1,
"self": 0.12278756199998497
},
"TrainerController.start_learning": {
"total": 2353.279155353,
"count": 1,
"self": 2.081998420052514,
"children": {
"TrainerController._reset_env": {
"total": 7.86767890200008,
"count": 1,
"self": 7.86767890200008
},
"TrainerController.advance": {
"total": 2343.2377247389477,
"count": 62799,
"self": 2.22269723001682,
"children": {
"env_step": {
"total": 1503.2693276010125,
"count": 62799,
"self": 1340.8418320559715,
"children": {
"SubprocessEnvManager._take_step": {
"total": 161.13611666202473,
"count": 62799,
"self": 6.049010860062481,
"children": {
"TorchPolicy.evaluate": {
"total": 155.08710580196225,
"count": 62560,
"self": 51.99187968593924,
"children": {
"TorchPolicy.sample_actions": {
"total": 103.09522611602301,
"count": 62560,
"self": 103.09522611602301
}
}
}
}
},
"workers": {
"total": 1.2913788830162503,
"count": 62799,
"self": 0.0,
"children": {
"worker_root": {
"total": 2347.135065440056,
"count": 62799,
"is_parallel": true,
"self": 1158.8975543620913,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020950560001438134,
"count": 1,
"is_parallel": true,
"self": 0.000771768000049633,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013232880000941805,
"count": 8,
"is_parallel": true,
"self": 0.0013232880000941805
}
}
},
"UnityEnvironment.step": {
"total": 0.05489538200004063,
"count": 1,
"is_parallel": true,
"self": 0.0006656040000052599,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006200699999681092,
"count": 1,
"is_parallel": true,
"self": 0.0006200699999681092
},
"communicator.exchange": {
"total": 0.05146886800002903,
"count": 1,
"is_parallel": true,
"self": 0.05146886800002903
},
"steps_from_proto": {
"total": 0.0021408400000382244,
"count": 1,
"is_parallel": true,
"self": 0.0005915770002502541,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015492629997879703,
"count": 8,
"is_parallel": true,
"self": 0.0015492629997879703
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1188.2375110779649,
"count": 62798,
"is_parallel": true,
"self": 38.27477738599396,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.47892558299668,
"count": 62798,
"is_parallel": true,
"self": 28.47892558299668
},
"communicator.exchange": {
"total": 994.3962634630111,
"count": 62798,
"is_parallel": true,
"self": 994.3962634630111
},
"steps_from_proto": {
"total": 127.08754464596313,
"count": 62798,
"is_parallel": true,
"self": 29.499671642061685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 97.58787300390145,
"count": 502384,
"is_parallel": true,
"self": 97.58787300390145
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 837.7456999079184,
"count": 62799,
"self": 3.1884801059495658,
"children": {
"process_trajectory": {
"total": 191.33308089297088,
"count": 62799,
"self": 191.12629961497123,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20678127799965296,
"count": 2,
"self": 0.20678127799965296
}
}
},
"_update_policy": {
"total": 643.2241389089979,
"count": 411,
"self": 239.42813052697215,
"children": {
"TorchPPOOptimizer.update": {
"total": 403.7960083820258,
"count": 22896,
"self": 403.7960083820258
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.799997885944322e-07,
"count": 1,
"self": 8.799997885944322e-07
},
"TrainerController._save_models": {
"total": 0.09175241200000528,
"count": 1,
"self": 0.0015243359998748929,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09022807600013039,
"count": 1,
"self": 0.09022807600013039
}
}
}
}
}
}
}