ppo-Huggy / run_logs /timers.json
Rubywong123's picture
Huggy
d88b9db
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4084880352020264,
"min": 1.4084880352020264,
"max": 1.430176854133606,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69100.421875,
"min": 68008.765625,
"max": 77416.4375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.79553264604812,
"min": 78.90220820189275,
"max": 430.55172413793105,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48769.0,
"min": 48769.0,
"max": 50024.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999892.0,
"min": 49904.0,
"max": 1999892.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999892.0,
"min": 49904.0,
"max": 1999892.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4616518020629883,
"min": 0.13863198459148407,
"max": 2.470776081085205,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1432.681396484375,
"min": 15.942678451538086,
"max": 1528.669189453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8664396734991433,
"min": 1.8865574733070705,
"max": 3.9734972297209095,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2250.2678899765015,
"min": 216.9541094303131,
"max": 2344.6078567504883,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8664396734991433,
"min": 1.8865574733070705,
"max": 3.9734972297209095,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2250.2678899765015,
"min": 216.9541094303131,
"max": 2344.6078567504883,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016248468255273315,
"min": 0.01284480686445022,
"max": 0.019456996195690912,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04874540476581994,
"min": 0.02568961372890044,
"max": 0.058370988587072736,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.061369700771239065,
"min": 0.022206527894983688,
"max": 0.06480916049331427,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1841091023137172,
"min": 0.044413055789967376,
"max": 0.1841091023137172,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5401488199833277e-06,
"min": 3.5401488199833277e-06,
"max": 0.000295317826560725,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0620446459949983e-05,
"min": 1.0620446459949983e-05,
"max": 0.0008439414186861999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118001666666666,
"min": 0.10118001666666666,
"max": 0.19843927500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30354005,
"min": 0.20750495000000002,
"max": 0.5813138000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.888283166666659e-05,
"min": 6.888283166666659e-05,
"max": 0.0049221198224999995,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020664849499999976,
"min": 0.00020664849499999976,
"max": 0.01406755862,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675948209",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675950537"
},
"total": 2327.365509047,
"count": 1,
"self": 0.7379972109997652,
"children": {
"run_training.setup": {
"total": 0.10857909400010612,
"count": 1,
"self": 0.10857909400010612
},
"TrainerController.start_learning": {
"total": 2326.5189327420003,
"count": 1,
"self": 4.106991651091448,
"children": {
"TrainerController._reset_env": {
"total": 10.697211121999999,
"count": 1,
"self": 10.697211121999999
},
"TrainerController.advance": {
"total": 2311.537611729909,
"count": 232550,
"self": 4.506622485665957,
"children": {
"env_step": {
"total": 1804.8569615360736,
"count": 232550,
"self": 1508.4482230351273,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.73887384098566,
"count": 232550,
"self": 15.324721193002688,
"children": {
"TorchPolicy.evaluate": {
"total": 278.414152647983,
"count": 222892,
"self": 69.68852854192664,
"children": {
"TorchPolicy.sample_actions": {
"total": 208.72562410605633,
"count": 222892,
"self": 208.72562410605633
}
}
}
}
},
"workers": {
"total": 2.6698646599606946,
"count": 232550,
"self": 0.0,
"children": {
"worker_root": {
"total": 2318.0254101080054,
"count": 232550,
"is_parallel": true,
"self": 1091.8492411169816,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018781359999593406,
"count": 1,
"is_parallel": true,
"self": 0.00033357799998157134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015445579999777692,
"count": 2,
"is_parallel": true,
"self": 0.0015445579999777692
}
}
},
"UnityEnvironment.step": {
"total": 0.03025862300000881,
"count": 1,
"is_parallel": true,
"self": 0.00030663199981972866,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020583400009854813,
"count": 1,
"is_parallel": true,
"self": 0.00020583400009854813
},
"communicator.exchange": {
"total": 0.028975012000046263,
"count": 1,
"is_parallel": true,
"self": 0.028975012000046263
},
"steps_from_proto": {
"total": 0.000771145000044271,
"count": 1,
"is_parallel": true,
"self": 0.00023915799988571962,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005319870001585514,
"count": 2,
"is_parallel": true,
"self": 0.0005319870001585514
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1226.1761689910238,
"count": 232549,
"is_parallel": true,
"self": 37.61698352012331,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.3055818089615,
"count": 232549,
"is_parallel": true,
"self": 77.3055818089615
},
"communicator.exchange": {
"total": 1015.9780123749606,
"count": 232549,
"is_parallel": true,
"self": 1015.9780123749606
},
"steps_from_proto": {
"total": 95.2755912869784,
"count": 232549,
"is_parallel": true,
"self": 36.833470299933424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.44212098704497,
"count": 465098,
"is_parallel": true,
"self": 58.44212098704497
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 502.174027708169,
"count": 232550,
"self": 6.364506100269637,
"children": {
"process_trajectory": {
"total": 160.9200130899012,
"count": 232550,
"self": 159.51471948490052,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4052936050006792,
"count": 10,
"self": 1.4052936050006792
}
}
},
"_update_policy": {
"total": 334.88950851799814,
"count": 97,
"self": 278.2426285399897,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.64687997800843,
"count": 2910,
"self": 56.64687997800843
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3720000424655154e-06,
"count": 1,
"self": 1.3720000424655154e-06
},
"TrainerController._save_models": {
"total": 0.17711686700022256,
"count": 1,
"self": 0.002827371000421408,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17428949599980115,
"count": 1,
"self": 0.17428949599980115
}
}
}
}
}
}
}