testpyramidsrnd / run_logs /timers.json
AdiKompella's picture
First Pyramids
30325e8
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8238424062728882,
"min": 0.5983486175537109,
"max": 1.4793857336044312,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 24675.728515625,
"min": 17921.73828125,
"max": 44878.64453125,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479971.0,
"min": 29952.0,
"max": 479971.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479971.0,
"min": 29952.0,
"max": 479971.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03393407538533211,
"min": -0.11449132114648819,
"max": 0.06466787308454514,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 8.245980262756348,
"min": -27.706899642944336,
"max": 15.326286315917969,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.004330315627157688,
"min": -0.004330315627157688,
"max": 0.22298245131969452,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.0522667169570923,
"min": -1.0522667169570923,
"max": 52.846839904785156,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06582760475465052,
"min": 0.06582760475465052,
"max": 0.07190233596700106,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9215864665651072,
"min": 0.49718271729592556,
"max": 1.0425069393685966,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004342993218545286,
"min": 0.0006843273065821146,
"max": 0.005363659234896699,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.060801905059634,
"min": 0.0063687238930104015,
"max": 0.06255148644879013,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.070832166868571e-05,
"min": 2.070832166868571e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00028991650336159994,
"min": 0.00028991650336159994,
"max": 0.002852871349043,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10690274285714285,
"min": 0.10690274285714285,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4966384,
"min": 1.3382272,
"max": 2.3383966,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0006995840114285714,
"min": 0.0006995840114285714,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00979417616,
"min": 0.00979417616,
"max": 0.0951306043,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016969839110970497,
"min": 0.016969839110970497,
"max": 0.3948346674442291,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23757773637771606,
"min": 0.23757773637771606,
"max": 2.7638425827026367,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 900.1818181818181,
"min": 852.1351351351351,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29706.0,
"min": 15984.0,
"max": 32142.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.2945758043365045,
"min": -1.0000000521540642,
"max": -0.17778240911224308,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -9.721001543104649,
"min": -32.000001668930054,
"max": -6.044601909816265,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.2945758043365045,
"min": -1.0000000521540642,
"max": -0.17778240911224308,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -9.721001543104649,
"min": -32.000001668930054,
"max": -6.044601909816265,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.16064053057043842,
"min": 0.16064053057043842,
"max": 8.704850730486214,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.301137508824468,
"min": 5.301137508824468,
"max": 139.27761168777943,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657688449",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1657689431"
},
"total": 981.481013503,
"count": 1,
"self": 0.4849008919999278,
"children": {
"run_training.setup": {
"total": 0.04234659999997348,
"count": 1,
"self": 0.04234659999997348
},
"TrainerController.start_learning": {
"total": 980.9537660110001,
"count": 1,
"self": 0.7118365039901846,
"children": {
"TrainerController._reset_env": {
"total": 10.093847574999927,
"count": 1,
"self": 10.093847574999927
},
"TrainerController.advance": {
"total": 970.0519258690099,
"count": 31562,
"self": 0.7559808919854731,
"children": {
"env_step": {
"total": 609.220171189016,
"count": 31562,
"self": 555.4903335260225,
"children": {
"SubprocessEnvManager._take_step": {
"total": 53.36380215199665,
"count": 31562,
"self": 2.3488455790030685,
"children": {
"TorchPolicy.evaluate": {
"total": 51.01495657299358,
"count": 31301,
"self": 17.526318946007677,
"children": {
"TorchPolicy.sample_actions": {
"total": 33.488637626985906,
"count": 31301,
"self": 33.488637626985906
}
}
}
}
},
"workers": {
"total": 0.3660355109968805,
"count": 31562,
"self": 0.0,
"children": {
"worker_root": {
"total": 978.865999423011,
"count": 31562,
"is_parallel": true,
"self": 473.5258167499974,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007647147000056975,
"count": 1,
"is_parallel": true,
"self": 0.004001559000016641,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003645588000040334,
"count": 8,
"is_parallel": true,
"self": 0.003645588000040334
}
}
},
"UnityEnvironment.step": {
"total": 0.04716679300008764,
"count": 1,
"is_parallel": true,
"self": 0.000538208000079976,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004840750000312255,
"count": 1,
"is_parallel": true,
"self": 0.0004840750000312255
},
"communicator.exchange": {
"total": 0.04452872499996374,
"count": 1,
"is_parallel": true,
"self": 0.04452872499996374
},
"steps_from_proto": {
"total": 0.0016157850000126928,
"count": 1,
"is_parallel": true,
"self": 0.00043266899979244045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011831160002202523,
"count": 8,
"is_parallel": true,
"self": 0.0011831160002202523
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 505.3401826730136,
"count": 31561,
"is_parallel": true,
"self": 14.157218159027138,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.600098747987545,
"count": 31561,
"is_parallel": true,
"self": 11.600098747987545
},
"communicator.exchange": {
"total": 432.9098563040012,
"count": 31561,
"is_parallel": true,
"self": 432.9098563040012
},
"steps_from_proto": {
"total": 46.67300946199771,
"count": 31561,
"is_parallel": true,
"self": 11.6029381770403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.07007128495741,
"count": 252488,
"is_parallel": true,
"self": 35.07007128495741
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 360.0757737880084,
"count": 31562,
"self": 1.212380208996251,
"children": {
"process_trajectory": {
"total": 82.00099711201199,
"count": 31562,
"self": 81.89295775001199,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10803936199999953,
"count": 1,
"self": 0.10803936199999953
}
}
},
"_update_policy": {
"total": 276.86239646700017,
"count": 211,
"self": 109.21426200999713,
"children": {
"TorchPPOOptimizer.update": {
"total": 167.64813445700304,
"count": 11391,
"self": 167.64813445700304
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.526000005469541e-06,
"count": 1,
"self": 1.526000005469541e-06
},
"TrainerController._save_models": {
"total": 0.09615453700007492,
"count": 1,
"self": 0.00159229700011565,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09456223999995927,
"count": 1,
"self": 0.09456223999995927
}
}
}
}
}
}
}