diff --git "a/sf_log.txt" "b/sf_log.txt" new file mode 100644--- /dev/null +++ "b/sf_log.txt" @@ -0,0 +1,1706 @@ +[2024-08-06 07:27:23,021][00403] Saving configuration to /content/train_dir/default_experiment/config.json... +[2024-08-06 07:27:23,025][00403] Rollout worker 0 uses device cpu +[2024-08-06 07:27:23,026][00403] Rollout worker 1 uses device cpu +[2024-08-06 07:27:23,028][00403] Rollout worker 2 uses device cpu +[2024-08-06 07:27:23,030][00403] Rollout worker 3 uses device cpu +[2024-08-06 07:27:23,032][00403] Rollout worker 4 uses device cpu +[2024-08-06 07:27:23,037][00403] Rollout worker 5 uses device cpu +[2024-08-06 07:27:23,037][00403] Rollout worker 6 uses device cpu +[2024-08-06 07:27:23,039][00403] Rollout worker 7 uses device cpu +[2024-08-06 07:27:23,197][00403] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2024-08-06 07:27:23,199][00403] InferenceWorker_p0-w0: min num requests: 2 +[2024-08-06 07:27:23,234][00403] Starting all processes... +[2024-08-06 07:27:23,235][00403] Starting process learner_proc0 +[2024-08-06 07:27:24,516][00403] Starting all processes... +[2024-08-06 07:27:24,530][00403] Starting process inference_proc0-0 +[2024-08-06 07:27:24,531][00403] Starting process rollout_proc0 +[2024-08-06 07:27:24,531][00403] Starting process rollout_proc1 +[2024-08-06 07:27:24,531][00403] Starting process rollout_proc2 +[2024-08-06 07:27:24,531][00403] Starting process rollout_proc3 +[2024-08-06 07:27:24,531][00403] Starting process rollout_proc4 +[2024-08-06 07:27:24,531][00403] Starting process rollout_proc5 +[2024-08-06 07:27:24,531][00403] Starting process rollout_proc6 +[2024-08-06 07:27:24,531][00403] Starting process rollout_proc7 +[2024-08-06 07:27:39,294][08747] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2024-08-06 07:27:39,302][08747] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0 +[2024-08-06 07:27:39,431][08747] Num visible devices: 1 +[2024-08-06 07:27:39,470][08747] Starting seed is not provided +[2024-08-06 07:27:39,470][08747] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2024-08-06 07:27:39,470][08747] Initializing actor-critic model on device cuda:0 +[2024-08-06 07:27:39,471][08747] RunningMeanStd input shape: (3, 72, 128) +[2024-08-06 07:27:39,474][08747] RunningMeanStd input shape: (1,) +[2024-08-06 07:27:39,598][08771] Worker 7 uses CPU cores [1] +[2024-08-06 07:27:39,643][08747] ConvEncoder: input_channels=3 +[2024-08-06 07:27:39,949][08760] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2024-08-06 07:27:39,954][08760] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0 +[2024-08-06 07:27:39,970][08772] Worker 6 uses CPU cores [0] +[2024-08-06 07:27:40,067][08761] Worker 1 uses CPU cores [1] +[2024-08-06 07:27:40,078][08762] Worker 0 uses CPU cores [0] +[2024-08-06 07:27:40,077][08768] Worker 4 uses CPU cores [0] +[2024-08-06 07:27:40,082][08760] Num visible devices: 1 +[2024-08-06 07:27:40,109][08770] Worker 5 uses CPU cores [1] +[2024-08-06 07:27:40,186][08763] Worker 2 uses CPU cores [0] +[2024-08-06 07:27:40,191][08764] Worker 3 uses CPU cores [1] +[2024-08-06 07:27:40,300][08747] Conv encoder output size: 512 +[2024-08-06 07:27:40,300][08747] Policy head output size: 512 +[2024-08-06 07:27:40,374][08747] Created Actor Critic model with architecture: +[2024-08-06 07:27:40,374][08747] ActorCriticSharedWeights( + (obs_normalizer): ObservationNormalizer( + (running_mean_std): RunningMeanStdDictInPlace( + (running_mean_std): ModuleDict( + (obs): RunningMeanStdInPlace() + ) + ) + ) + (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace) + (encoder): VizdoomEncoder( + (basic_encoder): ConvEncoder( + (enc): RecursiveScriptModule( + original_name=ConvEncoderImpl + (conv_head): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Conv2d) + (1): RecursiveScriptModule(original_name=ELU) + (2): RecursiveScriptModule(original_name=Conv2d) + (3): RecursiveScriptModule(original_name=ELU) + (4): RecursiveScriptModule(original_name=Conv2d) + (5): RecursiveScriptModule(original_name=ELU) + ) + (mlp_layers): RecursiveScriptModule( + original_name=Sequential + (0): RecursiveScriptModule(original_name=Linear) + (1): RecursiveScriptModule(original_name=ELU) + ) + ) + ) + ) + (core): ModelCoreRNN( + (core): GRU(512, 512) + ) + (decoder): MlpDecoder( + (mlp): Identity() + ) + (critic_linear): Linear(in_features=512, out_features=1, bias=True) + (action_parameterization): ActionParameterizationDefault( + (distribution_linear): Linear(in_features=512, out_features=5, bias=True) + ) +) +[2024-08-06 07:27:40,823][08747] Using optimizer +[2024-08-06 07:27:41,883][08747] No checkpoints found +[2024-08-06 07:27:41,883][08747] Did not load from checkpoint, starting from scratch! +[2024-08-06 07:27:41,883][08747] Initialized policy 0 weights for model version 0 +[2024-08-06 07:27:41,886][08747] LearnerWorker_p0 finished initialization! +[2024-08-06 07:27:41,888][08747] Using GPUs [0] for process 0 (actually maps to GPUs [0]) +[2024-08-06 07:27:42,128][08760] RunningMeanStd input shape: (3, 72, 128) +[2024-08-06 07:27:42,129][08760] RunningMeanStd input shape: (1,) +[2024-08-06 07:27:42,141][08760] ConvEncoder: input_channels=3 +[2024-08-06 07:27:42,245][08760] Conv encoder output size: 512 +[2024-08-06 07:27:42,245][08760] Policy head output size: 512 +[2024-08-06 07:27:42,299][00403] Inference worker 0-0 is ready! +[2024-08-06 07:27:42,301][00403] All inference workers are ready! Signal rollout workers to start! +[2024-08-06 07:27:42,566][08768] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:27:42,573][08762] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:27:42,591][08761] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:27:42,596][08763] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:27:42,604][08764] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:27:42,602][08772] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:27:42,608][08771] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:27:42,610][08770] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:27:42,952][08762] VizDoom game.init() threw an exception ViZDoomUnexpectedExitException('Controlled ViZDoom instance exited unexpectedly.'). Terminate process... +[2024-08-06 07:27:42,954][08764] VizDoom game.init() threw an exception ViZDoomUnexpectedExitException('Controlled ViZDoom instance exited unexpectedly.'). Terminate process... +[2024-08-06 07:27:42,952][08771] VizDoom game.init() threw an exception ViZDoomUnexpectedExitException('Controlled ViZDoom instance exited unexpectedly.'). Terminate process... +[2024-08-06 07:27:42,956][08772] VizDoom game.init() threw an exception ViZDoomUnexpectedExitException('Controlled ViZDoom instance exited unexpectedly.'). Terminate process... +[2024-08-06 07:27:42,960][08772] EvtLoop [rollout_proc6_evt_loop, process=rollout_proc6] unhandled exception in slot='init' connected to emitter=Emitter(object_id='Sampler', signal_name='_inference_workers_initialized'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 228, in _game_init + self.game.init() +vizdoom.vizdoom.ViZDoomUnexpectedExitException: Controlled ViZDoom instance exited unexpectedly. + +During handling of the above exception, another exception occurred: + +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 150, in init + env_runner.init(self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 418, in init + self._reset() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 430, in _reset + observations, info = e.reset(seed=seed) # new way of doing seeding since Gym 0.26.0 + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 125, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 110, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 30, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 515, in reset + obs, info = self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 82, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 51, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 323, in reset + self._ensure_initialized() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 274, in _ensure_initialized + self.initialize() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 269, in initialize + self._game_init() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 244, in _game_init + raise EnvCriticalError() +sample_factory.envs.env_utils.EnvCriticalError +[2024-08-06 07:27:42,963][08772] Unhandled exception in evt loop rollout_proc6_evt_loop +[2024-08-06 07:27:42,954][08762] EvtLoop [rollout_proc0_evt_loop, process=rollout_proc0] unhandled exception in slot='init' connected to emitter=Emitter(object_id='Sampler', signal_name='_inference_workers_initialized'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 228, in _game_init + self.game.init() +vizdoom.vizdoom.ViZDoomUnexpectedExitException: Controlled ViZDoom instance exited unexpectedly. + +During handling of the above exception, another exception occurred: + +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 150, in init + env_runner.init(self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 418, in init + self._reset() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 430, in _reset + observations, info = e.reset(seed=seed) # new way of doing seeding since Gym 0.26.0 + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 125, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 110, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 30, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 515, in reset + obs, info = self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 82, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 51, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 323, in reset + self._ensure_initialized() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 274, in _ensure_initialized + self.initialize() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 269, in initialize + self._game_init() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 244, in _game_init + raise EnvCriticalError() +sample_factory.envs.env_utils.EnvCriticalError +[2024-08-06 07:27:42,964][08762] Unhandled exception in evt loop rollout_proc0_evt_loop +[2024-08-06 07:27:42,956][08761] VizDoom game.init() threw an exception ViZDoomUnexpectedExitException('Controlled ViZDoom instance exited unexpectedly.'). Terminate process... +[2024-08-06 07:27:42,958][08771] EvtLoop [rollout_proc7_evt_loop, process=rollout_proc7] unhandled exception in slot='init' connected to emitter=Emitter(object_id='Sampler', signal_name='_inference_workers_initialized'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 228, in _game_init + self.game.init() +vizdoom.vizdoom.ViZDoomUnexpectedExitException: Controlled ViZDoom instance exited unexpectedly. + +During handling of the above exception, another exception occurred: + +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 150, in init + env_runner.init(self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 418, in init + self._reset() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 430, in _reset + observations, info = e.reset(seed=seed) # new way of doing seeding since Gym 0.26.0 + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 125, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 110, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 30, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 515, in reset + obs, info = self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 82, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 51, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 323, in reset + self._ensure_initialized() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 274, in _ensure_initialized + self.initialize() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 269, in initialize + self._game_init() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 244, in _game_init + raise EnvCriticalError() +sample_factory.envs.env_utils.EnvCriticalError +[2024-08-06 07:27:42,978][08771] Unhandled exception in evt loop rollout_proc7_evt_loop +[2024-08-06 07:27:42,966][08764] EvtLoop [rollout_proc3_evt_loop, process=rollout_proc3] unhandled exception in slot='init' connected to emitter=Emitter(object_id='Sampler', signal_name='_inference_workers_initialized'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 228, in _game_init + self.game.init() +vizdoom.vizdoom.ViZDoomUnexpectedExitException: Controlled ViZDoom instance exited unexpectedly. + +During handling of the above exception, another exception occurred: + +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 150, in init + env_runner.init(self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 418, in init + self._reset() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 430, in _reset + observations, info = e.reset(seed=seed) # new way of doing seeding since Gym 0.26.0 + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 125, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 110, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 30, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 515, in reset + obs, info = self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 82, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 51, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 323, in reset + self._ensure_initialized() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 274, in _ensure_initialized + self.initialize() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 269, in initialize + self._game_init() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 244, in _game_init + raise EnvCriticalError() +sample_factory.envs.env_utils.EnvCriticalError +[2024-08-06 07:27:42,985][08764] Unhandled exception in evt loop rollout_proc3_evt_loop +[2024-08-06 07:27:42,972][08761] EvtLoop [rollout_proc1_evt_loop, process=rollout_proc1] unhandled exception in slot='init' connected to emitter=Emitter(object_id='Sampler', signal_name='_inference_workers_initialized'), args=() +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 228, in _game_init + self.game.init() +vizdoom.vizdoom.ViZDoomUnexpectedExitException: Controlled ViZDoom instance exited unexpectedly. + +During handling of the above exception, another exception occurred: + +Traceback (most recent call last): + File "/usr/local/lib/python3.10/dist-packages/signal_slot/signal_slot.py", line 355, in _process_signal + slot_callable(*args) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/rollout_worker.py", line 150, in init + env_runner.init(self.timing) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 418, in init + self._reset() + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/sampling/non_batched_sampling.py", line 430, in _reset + observations, info = e.reset(seed=seed) # new way of doing seeding since Gym 0.26.0 + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 125, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/algo/utils/make_env.py", line 110, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/scenario_wrappers/gathering_reward_shaping.py", line 30, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 515, in reset + obs, info = self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sample_factory/envs/env_wrappers.py", line 82, in reset + obs, info = self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/gymnasium/core.py", line 467, in reset + return self.env.reset(seed=seed, options=options) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/wrappers/multiplayer_stats.py", line 51, in reset + return self.env.reset(**kwargs) + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 323, in reset + self._ensure_initialized() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 274, in _ensure_initialized + self.initialize() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 269, in initialize + self._game_init() + File "/usr/local/lib/python3.10/dist-packages/sf_examples/vizdoom/doom/doom_gym.py", line 244, in _game_init + raise EnvCriticalError() +sample_factory.envs.env_utils.EnvCriticalError +[2024-08-06 07:27:42,990][08761] Unhandled exception in evt loop rollout_proc1_evt_loop +[2024-08-06 07:27:43,189][00403] Heartbeat connected on Batcher_0 +[2024-08-06 07:27:43,193][00403] Heartbeat connected on LearnerWorker_p0 +[2024-08-06 07:27:43,252][00403] Heartbeat connected on InferenceWorker_p0-w0 +[2024-08-06 07:27:43,968][00403] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2024-08-06 07:27:45,024][08763] Decorrelating experience for 0 frames... +[2024-08-06 07:27:45,025][08768] Decorrelating experience for 0 frames... +[2024-08-06 07:27:45,774][08770] Decorrelating experience for 0 frames... +[2024-08-06 07:27:45,902][08768] Decorrelating experience for 32 frames... +[2024-08-06 07:27:46,294][08770] Decorrelating experience for 32 frames... +[2024-08-06 07:27:46,445][08763] Decorrelating experience for 32 frames... +[2024-08-06 07:27:46,677][08768] Decorrelating experience for 64 frames... +[2024-08-06 07:27:47,032][08763] Decorrelating experience for 64 frames... +[2024-08-06 07:27:47,135][08770] Decorrelating experience for 64 frames... +[2024-08-06 07:27:47,559][08770] Decorrelating experience for 96 frames... +[2024-08-06 07:27:47,652][00403] Heartbeat connected on RolloutWorker_w5 +[2024-08-06 07:27:47,726][08763] Decorrelating experience for 96 frames... +[2024-08-06 07:27:47,841][00403] Heartbeat connected on RolloutWorker_w2 +[2024-08-06 07:27:47,883][08768] Decorrelating experience for 96 frames... +[2024-08-06 07:27:47,956][00403] Heartbeat connected on RolloutWorker_w4 +[2024-08-06 07:27:48,968][00403] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 3.2. Samples: 16. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0) +[2024-08-06 07:27:48,970][00403] Avg episode reward: [(0, '1.067')] +[2024-08-06 07:27:51,194][08747] Signal inference workers to stop experience collection... +[2024-08-06 07:27:51,213][08760] InferenceWorker_p0-w0: stopping experience collection +[2024-08-06 07:27:53,880][08747] Signal inference workers to resume experience collection... +[2024-08-06 07:27:53,880][08760] InferenceWorker_p0-w0: resuming experience collection +[2024-08-06 07:27:53,972][00403] Fps is (10 sec: 409.5, 60 sec: 409.5, 300 sec: 409.5). Total num frames: 4096. Throughput: 0: 223.1. Samples: 2232. Policy #0 lag: (min: 0.0, avg: 0.0, max: 0.0) +[2024-08-06 07:27:53,983][00403] Avg episode reward: [(0, '3.351')] +[2024-08-06 07:27:58,968][00403] Fps is (10 sec: 2048.0, 60 sec: 1365.3, 300 sec: 1365.3). Total num frames: 20480. Throughput: 0: 299.6. Samples: 4494. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0) +[2024-08-06 07:27:58,973][00403] Avg episode reward: [(0, '3.870')] +[2024-08-06 07:28:03,030][08760] Updated weights for policy 0, policy_version 10 (0.0016) +[2024-08-06 07:28:03,968][00403] Fps is (10 sec: 3687.7, 60 sec: 2048.0, 300 sec: 2048.0). Total num frames: 40960. Throughput: 0: 536.5. Samples: 10730. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:28:03,973][00403] Avg episode reward: [(0, '4.206')] +[2024-08-06 07:28:08,972][00403] Fps is (10 sec: 3685.1, 60 sec: 2293.4, 300 sec: 2293.4). Total num frames: 57344. Throughput: 0: 540.6. Samples: 13516. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:28:08,975][00403] Avg episode reward: [(0, '4.201')] +[2024-08-06 07:28:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 73728. Throughput: 0: 593.5. Samples: 17806. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:28:13,972][00403] Avg episode reward: [(0, '4.405')] +[2024-08-06 07:28:15,330][08760] Updated weights for policy 0, policy_version 20 (0.0017) +[2024-08-06 07:28:18,968][00403] Fps is (10 sec: 3687.7, 60 sec: 2691.7, 300 sec: 2691.7). Total num frames: 94208. Throughput: 0: 678.9. Samples: 23760. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:28:18,971][00403] Avg episode reward: [(0, '4.442')] +[2024-08-06 07:28:23,969][00403] Fps is (10 sec: 3686.2, 60 sec: 2764.8, 300 sec: 2764.8). Total num frames: 110592. Throughput: 0: 662.3. Samples: 26494. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:28:23,975][00403] Avg episode reward: [(0, '4.406')] +[2024-08-06 07:28:23,978][08747] Saving new best policy, reward=4.406! +[2024-08-06 07:28:27,074][08760] Updated weights for policy 0, policy_version 30 (0.0013) +[2024-08-06 07:28:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 2821.7, 300 sec: 2821.7). Total num frames: 126976. Throughput: 0: 691.7. Samples: 31126. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:28:28,974][00403] Avg episode reward: [(0, '4.375')] +[2024-08-06 07:28:33,968][00403] Fps is (10 sec: 3686.6, 60 sec: 2949.1, 300 sec: 2949.1). Total num frames: 147456. Throughput: 0: 827.2. Samples: 37242. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:28:33,971][00403] Avg episode reward: [(0, '4.536')] +[2024-08-06 07:28:33,977][08747] Saving new best policy, reward=4.536! +[2024-08-06 07:28:38,849][08760] Updated weights for policy 0, policy_version 40 (0.0024) +[2024-08-06 07:28:38,969][00403] Fps is (10 sec: 3686.1, 60 sec: 2978.9, 300 sec: 2978.9). Total num frames: 163840. Throughput: 0: 830.9. Samples: 39622. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:28:38,972][00403] Avg episode reward: [(0, '4.509')] +[2024-08-06 07:28:43,968][00403] Fps is (10 sec: 3686.5, 60 sec: 3072.0, 300 sec: 3072.0). Total num frames: 184320. Throughput: 0: 890.9. Samples: 44584. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:28:43,970][00403] Avg episode reward: [(0, '4.585')] +[2024-08-06 07:28:43,975][08747] Saving new best policy, reward=4.585! +[2024-08-06 07:28:48,968][00403] Fps is (10 sec: 3686.7, 60 sec: 3345.1, 300 sec: 3087.8). Total num frames: 200704. Throughput: 0: 885.5. Samples: 50578. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:28:48,971][00403] Avg episode reward: [(0, '4.544')] +[2024-08-06 07:28:49,069][08760] Updated weights for policy 0, policy_version 50 (0.0014) +[2024-08-06 07:28:53,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3550.1, 300 sec: 3101.3). Total num frames: 217088. Throughput: 0: 874.3. Samples: 52856. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:28:53,973][00403] Avg episode reward: [(0, '4.507')] +[2024-08-06 07:28:58,972][00403] Fps is (10 sec: 3685.1, 60 sec: 3617.9, 300 sec: 3167.4). Total num frames: 237568. Throughput: 0: 894.0. Samples: 58038. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:28:58,977][00403] Avg episode reward: [(0, '4.422')] +[2024-08-06 07:29:00,852][08760] Updated weights for policy 0, policy_version 60 (0.0016) +[2024-08-06 07:29:03,969][00403] Fps is (10 sec: 4095.9, 60 sec: 3618.1, 300 sec: 3225.6). Total num frames: 258048. Throughput: 0: 899.9. Samples: 64254. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:03,971][00403] Avg episode reward: [(0, '4.477')] +[2024-08-06 07:29:08,968][00403] Fps is (10 sec: 3278.0, 60 sec: 3550.1, 300 sec: 3180.4). Total num frames: 270336. Throughput: 0: 885.7. Samples: 66352. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:08,971][00403] Avg episode reward: [(0, '4.659')] +[2024-08-06 07:29:08,980][08747] Saving new best policy, reward=4.659! +[2024-08-06 07:29:12,586][08760] Updated weights for policy 0, policy_version 70 (0.0017) +[2024-08-06 07:29:13,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3618.1, 300 sec: 3231.3). Total num frames: 290816. Throughput: 0: 901.7. Samples: 71704. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:29:13,972][00403] Avg episode reward: [(0, '4.620')] +[2024-08-06 07:29:18,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3276.8). Total num frames: 311296. Throughput: 0: 901.5. Samples: 77808. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:18,971][00403] Avg episode reward: [(0, '4.445')] +[2024-08-06 07:29:18,981][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000076_311296.pth... +[2024-08-06 07:29:23,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3235.8). Total num frames: 323584. Throughput: 0: 890.5. Samples: 79696. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:29:23,975][00403] Avg episode reward: [(0, '4.480')] +[2024-08-06 07:29:24,242][08760] Updated weights for policy 0, policy_version 80 (0.0021) +[2024-08-06 07:29:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3276.8). Total num frames: 344064. Throughput: 0: 903.2. Samples: 85230. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:29:28,970][00403] Avg episode reward: [(0, '4.428')] +[2024-08-06 07:29:33,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3314.0). Total num frames: 364544. Throughput: 0: 905.2. Samples: 91314. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:33,971][00403] Avg episode reward: [(0, '4.467')] +[2024-08-06 07:29:34,881][08760] Updated weights for policy 0, policy_version 90 (0.0021) +[2024-08-06 07:29:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3312.4). Total num frames: 380928. Throughput: 0: 897.3. Samples: 93236. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:38,974][00403] Avg episode reward: [(0, '4.503')] +[2024-08-06 07:29:43,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3345.1). Total num frames: 401408. Throughput: 0: 910.4. Samples: 99004. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:43,975][00403] Avg episode reward: [(0, '4.493')] +[2024-08-06 07:29:45,881][08760] Updated weights for policy 0, policy_version 100 (0.0019) +[2024-08-06 07:29:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3342.3). Total num frames: 417792. Throughput: 0: 899.2. Samples: 104720. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:48,970][00403] Avg episode reward: [(0, '4.426')] +[2024-08-06 07:29:53,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3339.8). Total num frames: 434176. Throughput: 0: 896.3. Samples: 106684. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:53,971][00403] Avg episode reward: [(0, '4.425')] +[2024-08-06 07:29:57,383][08760] Updated weights for policy 0, policy_version 110 (0.0014) +[2024-08-06 07:29:58,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.4, 300 sec: 3367.8). Total num frames: 454656. Throughput: 0: 912.6. Samples: 112770. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:29:58,973][00403] Avg episode reward: [(0, '4.655')] +[2024-08-06 07:30:03,969][00403] Fps is (10 sec: 3686.1, 60 sec: 3549.8, 300 sec: 3364.6). Total num frames: 471040. Throughput: 0: 900.9. Samples: 118348. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:03,971][00403] Avg episode reward: [(0, '4.560')] +[2024-08-06 07:30:08,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3361.5). Total num frames: 487424. Throughput: 0: 901.3. Samples: 120256. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:08,972][00403] Avg episode reward: [(0, '4.509')] +[2024-08-06 07:30:09,115][08760] Updated weights for policy 0, policy_version 120 (0.0016) +[2024-08-06 07:30:13,968][00403] Fps is (10 sec: 4096.3, 60 sec: 3686.4, 300 sec: 3413.3). Total num frames: 512000. Throughput: 0: 915.6. Samples: 126430. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:13,970][00403] Avg episode reward: [(0, '4.425')] +[2024-08-06 07:30:18,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3382.5). Total num frames: 524288. Throughput: 0: 900.2. Samples: 131822. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:18,971][00403] Avg episode reward: [(0, '4.546')] +[2024-08-06 07:30:20,640][08760] Updated weights for policy 0, policy_version 130 (0.0016) +[2024-08-06 07:30:23,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3404.8). Total num frames: 544768. Throughput: 0: 902.4. Samples: 133844. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:23,971][00403] Avg episode reward: [(0, '4.566')] +[2024-08-06 07:30:28,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3425.7). Total num frames: 565248. Throughput: 0: 912.4. Samples: 140062. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:28,976][00403] Avg episode reward: [(0, '4.663')] +[2024-08-06 07:30:28,987][08747] Saving new best policy, reward=4.663! +[2024-08-06 07:30:30,694][08760] Updated weights for policy 0, policy_version 140 (0.0013) +[2024-08-06 07:30:33,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3421.4). Total num frames: 581632. Throughput: 0: 901.0. Samples: 145266. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:33,973][00403] Avg episode reward: [(0, '4.609')] +[2024-08-06 07:30:38,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3417.2). Total num frames: 598016. Throughput: 0: 906.8. Samples: 147490. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:38,973][00403] Avg episode reward: [(0, '4.511')] +[2024-08-06 07:30:42,456][08760] Updated weights for policy 0, policy_version 150 (0.0021) +[2024-08-06 07:30:43,970][00403] Fps is (10 sec: 3685.9, 60 sec: 3618.1, 300 sec: 3436.1). Total num frames: 618496. Throughput: 0: 909.2. Samples: 153686. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:30:43,972][00403] Avg episode reward: [(0, '4.424')] +[2024-08-06 07:30:48,974][00403] Fps is (10 sec: 3684.4, 60 sec: 3617.8, 300 sec: 3431.7). Total num frames: 634880. Throughput: 0: 898.5. Samples: 158784. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:30:48,976][00403] Avg episode reward: [(0, '4.551')] +[2024-08-06 07:30:53,970][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.0, 300 sec: 3427.7). Total num frames: 651264. Throughput: 0: 906.8. Samples: 161062. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:30:53,976][00403] Avg episode reward: [(0, '4.502')] +[2024-08-06 07:30:54,097][08760] Updated weights for policy 0, policy_version 160 (0.0013) +[2024-08-06 07:30:58,968][00403] Fps is (10 sec: 3688.4, 60 sec: 3618.1, 300 sec: 3444.8). Total num frames: 671744. Throughput: 0: 908.8. Samples: 167328. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:30:58,975][00403] Avg episode reward: [(0, '4.462')] +[2024-08-06 07:31:03,968][00403] Fps is (10 sec: 3687.0, 60 sec: 3618.2, 300 sec: 3440.6). Total num frames: 688128. Throughput: 0: 902.0. Samples: 172414. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:31:03,971][00403] Avg episode reward: [(0, '4.550')] +[2024-08-06 07:31:05,744][08760] Updated weights for policy 0, policy_version 170 (0.0020) +[2024-08-06 07:31:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3456.6). Total num frames: 708608. Throughput: 0: 912.0. Samples: 174886. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:31:08,973][00403] Avg episode reward: [(0, '4.517')] +[2024-08-06 07:31:13,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3471.8). Total num frames: 729088. Throughput: 0: 912.1. Samples: 181108. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:31:13,973][00403] Avg episode reward: [(0, '4.433')] +[2024-08-06 07:31:15,918][08760] Updated weights for policy 0, policy_version 180 (0.0013) +[2024-08-06 07:31:18,970][00403] Fps is (10 sec: 3276.3, 60 sec: 3618.0, 300 sec: 3448.2). Total num frames: 741376. Throughput: 0: 905.0. Samples: 185992. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:31:18,972][00403] Avg episode reward: [(0, '4.500')] +[2024-08-06 07:31:18,988][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000181_741376.pth... +[2024-08-06 07:31:23,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3463.0). Total num frames: 761856. Throughput: 0: 911.0. Samples: 188484. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:31:23,970][00403] Avg episode reward: [(0, '4.603')] +[2024-08-06 07:31:27,286][08760] Updated weights for policy 0, policy_version 190 (0.0019) +[2024-08-06 07:31:28,969][00403] Fps is (10 sec: 4096.6, 60 sec: 3618.1, 300 sec: 3477.0). Total num frames: 782336. Throughput: 0: 910.2. Samples: 194644. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:31:28,970][00403] Avg episode reward: [(0, '4.693')] +[2024-08-06 07:31:28,978][08747] Saving new best policy, reward=4.693! +[2024-08-06 07:31:33,970][00403] Fps is (10 sec: 3276.3, 60 sec: 3549.8, 300 sec: 3454.9). Total num frames: 794624. Throughput: 0: 903.4. Samples: 199432. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:31:33,973][00403] Avg episode reward: [(0, '4.541')] +[2024-08-06 07:31:38,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3618.1, 300 sec: 3468.5). Total num frames: 815104. Throughput: 0: 911.9. Samples: 202094. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:31:38,971][00403] Avg episode reward: [(0, '4.438')] +[2024-08-06 07:31:39,161][08760] Updated weights for policy 0, policy_version 200 (0.0013) +[2024-08-06 07:31:43,968][00403] Fps is (10 sec: 4096.6, 60 sec: 3618.2, 300 sec: 3481.6). Total num frames: 835584. Throughput: 0: 911.4. Samples: 208340. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:31:43,970][00403] Avg episode reward: [(0, '4.713')] +[2024-08-06 07:31:43,997][08747] Saving new best policy, reward=4.713! +[2024-08-06 07:31:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.5, 300 sec: 3477.4). Total num frames: 851968. Throughput: 0: 900.6. Samples: 212940. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:31:48,971][00403] Avg episode reward: [(0, '4.694')] +[2024-08-06 07:31:50,818][08760] Updated weights for policy 0, policy_version 210 (0.0022) +[2024-08-06 07:31:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.5, 300 sec: 3489.8). Total num frames: 872448. Throughput: 0: 909.7. Samples: 215822. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:31:53,973][00403] Avg episode reward: [(0, '4.797')] +[2024-08-06 07:31:53,979][08747] Saving new best policy, reward=4.797! +[2024-08-06 07:31:58,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3501.7). Total num frames: 892928. Throughput: 0: 906.8. Samples: 221912. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:31:58,974][00403] Avg episode reward: [(0, '5.081')] +[2024-08-06 07:31:58,984][08747] Saving new best policy, reward=5.081! +[2024-08-06 07:32:02,013][08760] Updated weights for policy 0, policy_version 220 (0.0013) +[2024-08-06 07:32:03,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3481.6). Total num frames: 905216. Throughput: 0: 896.2. Samples: 226318. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:32:03,972][00403] Avg episode reward: [(0, '5.189')] +[2024-08-06 07:32:03,976][08747] Saving new best policy, reward=5.189! +[2024-08-06 07:32:08,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3493.2). Total num frames: 925696. Throughput: 0: 905.7. Samples: 229242. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:32:08,972][00403] Avg episode reward: [(0, '5.179')] +[2024-08-06 07:32:12,484][08760] Updated weights for policy 0, policy_version 230 (0.0013) +[2024-08-06 07:32:13,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3504.4). Total num frames: 946176. Throughput: 0: 908.3. Samples: 235516. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:32:13,974][00403] Avg episode reward: [(0, '5.502')] +[2024-08-06 07:32:13,976][08747] Saving new best policy, reward=5.502! +[2024-08-06 07:32:18,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3485.3). Total num frames: 958464. Throughput: 0: 897.0. Samples: 239796. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:32:18,971][00403] Avg episode reward: [(0, '5.371')] +[2024-08-06 07:32:23,968][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3496.2). Total num frames: 978944. Throughput: 0: 906.5. Samples: 242886. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:32:23,971][00403] Avg episode reward: [(0, '4.960')] +[2024-08-06 07:32:24,168][08760] Updated weights for policy 0, policy_version 240 (0.0019) +[2024-08-06 07:32:28,969][00403] Fps is (10 sec: 4095.7, 60 sec: 3618.1, 300 sec: 3506.7). Total num frames: 999424. Throughput: 0: 906.1. Samples: 249116. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:32:28,972][00403] Avg episode reward: [(0, '4.970')] +[2024-08-06 07:32:33,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.5, 300 sec: 3502.8). Total num frames: 1015808. Throughput: 0: 903.8. Samples: 253612. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:32:33,973][00403] Avg episode reward: [(0, '5.214')] +[2024-08-06 07:32:35,829][08760] Updated weights for policy 0, policy_version 250 (0.0017) +[2024-08-06 07:32:38,970][00403] Fps is (10 sec: 3685.9, 60 sec: 3686.3, 300 sec: 3512.8). Total num frames: 1036288. Throughput: 0: 908.0. Samples: 256684. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:32:38,972][00403] Avg episode reward: [(0, '5.445')] +[2024-08-06 07:32:43,968][00403] Fps is (10 sec: 3686.5, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 1052672. Throughput: 0: 910.5. Samples: 262884. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:32:43,974][00403] Avg episode reward: [(0, '5.547')] +[2024-08-06 07:32:43,976][08747] Saving new best policy, reward=5.547! +[2024-08-06 07:32:47,551][08760] Updated weights for policy 0, policy_version 260 (0.0019) +[2024-08-06 07:32:48,968][00403] Fps is (10 sec: 3277.5, 60 sec: 3618.1, 300 sec: 3610.1). Total num frames: 1069056. Throughput: 0: 908.7. Samples: 267210. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:32:48,970][00403] Avg episode reward: [(0, '5.624')] +[2024-08-06 07:32:48,983][08747] Saving new best policy, reward=5.624! +[2024-08-06 07:32:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 1089536. Throughput: 0: 911.3. Samples: 270252. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:32:53,970][00403] Avg episode reward: [(0, '5.762')] +[2024-08-06 07:32:53,976][08747] Saving new best policy, reward=5.762! +[2024-08-06 07:32:57,898][08760] Updated weights for policy 0, policy_version 270 (0.0014) +[2024-08-06 07:32:58,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3610.0). Total num frames: 1105920. Throughput: 0: 906.5. Samples: 276310. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:32:58,975][00403] Avg episode reward: [(0, '5.413')] +[2024-08-06 07:33:03,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3610.1). Total num frames: 1122304. Throughput: 0: 912.8. Samples: 280872. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:33:03,970][00403] Avg episode reward: [(0, '5.561')] +[2024-08-06 07:33:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 1142784. Throughput: 0: 912.2. Samples: 283936. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:33:08,973][00403] Avg episode reward: [(0, '5.584')] +[2024-08-06 07:33:09,220][08760] Updated weights for policy 0, policy_version 280 (0.0016) +[2024-08-06 07:33:13,969][00403] Fps is (10 sec: 3686.2, 60 sec: 3549.8, 300 sec: 3610.0). Total num frames: 1159168. Throughput: 0: 905.5. Samples: 289864. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:33:13,979][00403] Avg episode reward: [(0, '5.800')] +[2024-08-06 07:33:13,990][08747] Saving new best policy, reward=5.800! +[2024-08-06 07:33:18,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 1179648. Throughput: 0: 907.9. Samples: 294468. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:33:18,970][00403] Avg episode reward: [(0, '5.788')] +[2024-08-06 07:33:18,983][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000288_1179648.pth... +[2024-08-06 07:33:19,079][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000076_311296.pth +[2024-08-06 07:33:20,903][08760] Updated weights for policy 0, policy_version 290 (0.0013) +[2024-08-06 07:33:23,968][00403] Fps is (10 sec: 3277.0, 60 sec: 3549.9, 300 sec: 3610.0). Total num frames: 1191936. Throughput: 0: 906.5. Samples: 297476. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:33:23,972][00403] Avg episode reward: [(0, '6.265')] +[2024-08-06 07:33:24,012][08747] Saving new best policy, reward=6.265! +[2024-08-06 07:33:28,968][00403] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3596.1). Total num frames: 1208320. Throughput: 0: 858.2. Samples: 301504. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:33:28,975][00403] Avg episode reward: [(0, '6.173')] +[2024-08-06 07:33:33,970][00403] Fps is (10 sec: 3276.3, 60 sec: 3481.5, 300 sec: 3596.1). Total num frames: 1224704. Throughput: 0: 869.4. Samples: 306336. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:33:33,972][00403] Avg episode reward: [(0, '6.222')] +[2024-08-06 07:33:34,257][08760] Updated weights for policy 0, policy_version 300 (0.0017) +[2024-08-06 07:33:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3481.7, 300 sec: 3596.1). Total num frames: 1245184. Throughput: 0: 870.9. Samples: 309442. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:33:38,971][00403] Avg episode reward: [(0, '6.312')] +[2024-08-06 07:33:38,984][08747] Saving new best policy, reward=6.312! +[2024-08-06 07:33:43,968][00403] Fps is (10 sec: 3686.9, 60 sec: 3481.6, 300 sec: 3596.1). Total num frames: 1261568. Throughput: 0: 858.5. Samples: 314942. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:33:43,977][00403] Avg episode reward: [(0, '7.389')] +[2024-08-06 07:33:43,979][08747] Saving new best policy, reward=7.389! +[2024-08-06 07:33:46,228][08760] Updated weights for policy 0, policy_version 310 (0.0014) +[2024-08-06 07:33:48,968][00403] Fps is (10 sec: 3276.7, 60 sec: 3481.6, 300 sec: 3596.1). Total num frames: 1277952. Throughput: 0: 864.9. Samples: 319792. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:33:48,975][00403] Avg episode reward: [(0, '8.328')] +[2024-08-06 07:33:48,986][08747] Saving new best policy, reward=8.328! +[2024-08-06 07:33:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3596.2). Total num frames: 1298432. Throughput: 0: 864.8. Samples: 322852. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:33:53,970][00403] Avg episode reward: [(0, '8.433')] +[2024-08-06 07:33:53,975][08747] Saving new best policy, reward=8.433! +[2024-08-06 07:33:56,683][08760] Updated weights for policy 0, policy_version 320 (0.0016) +[2024-08-06 07:33:58,972][00403] Fps is (10 sec: 3685.2, 60 sec: 3481.4, 300 sec: 3582.2). Total num frames: 1314816. Throughput: 0: 851.4. Samples: 328178. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:33:58,974][00403] Avg episode reward: [(0, '8.191')] +[2024-08-06 07:34:03,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3610.0). Total num frames: 1335296. Throughput: 0: 866.7. Samples: 333470. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:34:03,971][00403] Avg episode reward: [(0, '7.203')] +[2024-08-06 07:34:07,842][08760] Updated weights for policy 0, policy_version 330 (0.0016) +[2024-08-06 07:34:08,968][00403] Fps is (10 sec: 4097.5, 60 sec: 3549.9, 300 sec: 3610.0). Total num frames: 1355776. Throughput: 0: 868.3. Samples: 336548. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:34:08,971][00403] Avg episode reward: [(0, '7.083')] +[2024-08-06 07:34:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 1368064. Throughput: 0: 891.8. Samples: 341636. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:34:13,973][00403] Avg episode reward: [(0, '6.903')] +[2024-08-06 07:34:18,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3610.0). Total num frames: 1388544. Throughput: 0: 906.3. Samples: 347118. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:34:18,970][00403] Avg episode reward: [(0, '6.325')] +[2024-08-06 07:34:19,649][08760] Updated weights for policy 0, policy_version 340 (0.0014) +[2024-08-06 07:34:23,970][00403] Fps is (10 sec: 4095.4, 60 sec: 3618.0, 300 sec: 3610.0). Total num frames: 1409024. Throughput: 0: 904.9. Samples: 350166. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:34:23,972][00403] Avg episode reward: [(0, '6.527')] +[2024-08-06 07:34:28,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3549.8, 300 sec: 3582.3). Total num frames: 1421312. Throughput: 0: 888.7. Samples: 354936. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:34:28,975][00403] Avg episode reward: [(0, '6.798')] +[2024-08-06 07:34:31,135][08760] Updated weights for policy 0, policy_version 350 (0.0019) +[2024-08-06 07:34:33,968][00403] Fps is (10 sec: 3277.3, 60 sec: 3618.2, 300 sec: 3596.1). Total num frames: 1441792. Throughput: 0: 911.0. Samples: 360786. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:34:33,971][00403] Avg episode reward: [(0, '7.952')] +[2024-08-06 07:34:38,970][00403] Fps is (10 sec: 4095.6, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1462272. Throughput: 0: 911.4. Samples: 363866. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:34:38,972][00403] Avg episode reward: [(0, '8.517')] +[2024-08-06 07:34:38,984][08747] Saving new best policy, reward=8.517! +[2024-08-06 07:34:42,794][08760] Updated weights for policy 0, policy_version 360 (0.0019) +[2024-08-06 07:34:43,969][00403] Fps is (10 sec: 3686.3, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1478656. Throughput: 0: 895.6. Samples: 368478. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:34:43,975][00403] Avg episode reward: [(0, '8.934')] +[2024-08-06 07:34:43,979][08747] Saving new best policy, reward=8.934! +[2024-08-06 07:34:48,968][00403] Fps is (10 sec: 3686.9, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 1499136. Throughput: 0: 909.6. Samples: 374402. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:34:48,973][00403] Avg episode reward: [(0, '8.741')] +[2024-08-06 07:34:52,824][08760] Updated weights for policy 0, policy_version 370 (0.0016) +[2024-08-06 07:34:53,968][00403] Fps is (10 sec: 3686.5, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1515520. Throughput: 0: 909.2. Samples: 377464. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:34:53,977][00403] Avg episode reward: [(0, '8.969')] +[2024-08-06 07:34:53,980][08747] Saving new best policy, reward=8.969! +[2024-08-06 07:34:58,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.3, 300 sec: 3596.2). Total num frames: 1531904. Throughput: 0: 894.3. Samples: 381880. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:34:58,974][00403] Avg episode reward: [(0, '8.988')] +[2024-08-06 07:34:58,983][08747] Saving new best policy, reward=8.988! +[2024-08-06 07:35:03,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3610.0). Total num frames: 1552384. Throughput: 0: 909.3. Samples: 388036. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:35:03,975][00403] Avg episode reward: [(0, '9.529')] +[2024-08-06 07:35:03,991][08747] Saving new best policy, reward=9.529! +[2024-08-06 07:35:04,648][08760] Updated weights for policy 0, policy_version 380 (0.0016) +[2024-08-06 07:35:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 1568768. Throughput: 0: 908.1. Samples: 391028. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:08,976][00403] Avg episode reward: [(0, '9.606')] +[2024-08-06 07:35:08,985][08747] Saving new best policy, reward=9.606! +[2024-08-06 07:35:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1585152. Throughput: 0: 898.5. Samples: 395368. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:35:13,974][00403] Avg episode reward: [(0, '10.285')] +[2024-08-06 07:35:13,976][08747] Saving new best policy, reward=10.285! +[2024-08-06 07:35:16,452][08760] Updated weights for policy 0, policy_version 390 (0.0013) +[2024-08-06 07:35:18,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1605632. Throughput: 0: 904.5. Samples: 401490. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:18,974][00403] Avg episode reward: [(0, '9.504')] +[2024-08-06 07:35:18,982][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000392_1605632.pth... +[2024-08-06 07:35:19,075][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000181_741376.pth +[2024-08-06 07:35:23,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 1622016. Throughput: 0: 903.7. Samples: 404532. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:23,978][00403] Avg episode reward: [(0, '10.035')] +[2024-08-06 07:35:28,129][08760] Updated weights for policy 0, policy_version 400 (0.0016) +[2024-08-06 07:35:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.2, 300 sec: 3582.3). Total num frames: 1638400. Throughput: 0: 898.2. Samples: 408898. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:28,973][00403] Avg episode reward: [(0, '10.294')] +[2024-08-06 07:35:28,981][08747] Saving new best policy, reward=10.294! +[2024-08-06 07:35:33,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1658880. Throughput: 0: 903.2. Samples: 415046. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:33,976][00403] Avg episode reward: [(0, '10.229')] +[2024-08-06 07:35:38,973][00403] Fps is (10 sec: 3684.7, 60 sec: 3549.7, 300 sec: 3582.2). Total num frames: 1675264. Throughput: 0: 901.0. Samples: 418012. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:35:38,975][00403] Avg episode reward: [(0, '9.872')] +[2024-08-06 07:35:39,198][08760] Updated weights for policy 0, policy_version 410 (0.0016) +[2024-08-06 07:35:43,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3596.2). Total num frames: 1695744. Throughput: 0: 905.9. Samples: 422644. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:43,973][00403] Avg episode reward: [(0, '9.398')] +[2024-08-06 07:35:48,969][00403] Fps is (10 sec: 4097.8, 60 sec: 3618.1, 300 sec: 3610.0). Total num frames: 1716224. Throughput: 0: 905.8. Samples: 428796. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:48,971][00403] Avg episode reward: [(0, '9.920')] +[2024-08-06 07:35:49,810][08760] Updated weights for policy 0, policy_version 420 (0.0013) +[2024-08-06 07:35:53,970][00403] Fps is (10 sec: 3276.1, 60 sec: 3549.7, 300 sec: 3582.2). Total num frames: 1728512. Throughput: 0: 900.4. Samples: 431548. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:53,973][00403] Avg episode reward: [(0, '10.078')] +[2024-08-06 07:35:58,970][00403] Fps is (10 sec: 3276.5, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1748992. Throughput: 0: 911.1. Samples: 436368. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:35:58,974][00403] Avg episode reward: [(0, '11.427')] +[2024-08-06 07:35:58,982][08747] Saving new best policy, reward=11.427! +[2024-08-06 07:36:01,388][08760] Updated weights for policy 0, policy_version 430 (0.0015) +[2024-08-06 07:36:03,968][00403] Fps is (10 sec: 4096.9, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1769472. Throughput: 0: 911.1. Samples: 442488. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:03,972][00403] Avg episode reward: [(0, '12.586')] +[2024-08-06 07:36:03,978][08747] Saving new best policy, reward=12.586! +[2024-08-06 07:36:08,970][00403] Fps is (10 sec: 3686.3, 60 sec: 3618.0, 300 sec: 3582.2). Total num frames: 1785856. Throughput: 0: 898.9. Samples: 444984. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:08,974][00403] Avg episode reward: [(0, '13.203')] +[2024-08-06 07:36:08,983][08747] Saving new best policy, reward=13.203! +[2024-08-06 07:36:13,149][08760] Updated weights for policy 0, policy_version 440 (0.0014) +[2024-08-06 07:36:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3596.2). Total num frames: 1802240. Throughput: 0: 911.0. Samples: 449894. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:36:13,970][00403] Avg episode reward: [(0, '13.480')] +[2024-08-06 07:36:13,973][08747] Saving new best policy, reward=13.480! +[2024-08-06 07:36:18,968][00403] Fps is (10 sec: 3687.0, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1822720. Throughput: 0: 910.0. Samples: 455998. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:18,971][00403] Avg episode reward: [(0, '13.666')] +[2024-08-06 07:36:18,984][08747] Saving new best policy, reward=13.666! +[2024-08-06 07:36:23,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 1835008. Throughput: 0: 896.0. Samples: 458326. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:23,975][00403] Avg episode reward: [(0, '14.469')] +[2024-08-06 07:36:24,009][08747] Saving new best policy, reward=14.469! +[2024-08-06 07:36:24,949][08760] Updated weights for policy 0, policy_version 450 (0.0015) +[2024-08-06 07:36:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3596.2). Total num frames: 1855488. Throughput: 0: 906.0. Samples: 463414. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:36:28,970][00403] Avg episode reward: [(0, '14.272')] +[2024-08-06 07:36:33,968][00403] Fps is (10 sec: 4505.6, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 1880064. Throughput: 0: 908.0. Samples: 469654. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:33,973][00403] Avg episode reward: [(0, '14.775')] +[2024-08-06 07:36:33,981][08747] Saving new best policy, reward=14.775! +[2024-08-06 07:36:35,463][08760] Updated weights for policy 0, policy_version 460 (0.0013) +[2024-08-06 07:36:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.4, 300 sec: 3582.3). Total num frames: 1892352. Throughput: 0: 893.9. Samples: 471770. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:38,971][00403] Avg episode reward: [(0, '16.345')] +[2024-08-06 07:36:38,978][08747] Saving new best policy, reward=16.345! +[2024-08-06 07:36:43,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1912832. Throughput: 0: 904.9. Samples: 477088. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:43,975][00403] Avg episode reward: [(0, '16.263')] +[2024-08-06 07:36:46,538][08760] Updated weights for policy 0, policy_version 470 (0.0013) +[2024-08-06 07:36:48,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1933312. Throughput: 0: 906.6. Samples: 483284. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:48,971][00403] Avg episode reward: [(0, '15.695')] +[2024-08-06 07:36:53,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3618.3, 300 sec: 3568.4). Total num frames: 1945600. Throughput: 0: 894.7. Samples: 485246. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:36:53,971][00403] Avg episode reward: [(0, '16.009')] +[2024-08-06 07:36:58,278][08760] Updated weights for policy 0, policy_version 480 (0.0014) +[2024-08-06 07:36:58,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.2, 300 sec: 3596.1). Total num frames: 1966080. Throughput: 0: 905.9. Samples: 490662. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:36:58,970][00403] Avg episode reward: [(0, '16.963')] +[2024-08-06 07:36:58,979][08747] Saving new best policy, reward=16.963! +[2024-08-06 07:37:03,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 1986560. Throughput: 0: 905.5. Samples: 496744. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:37:03,971][00403] Avg episode reward: [(0, '16.638')] +[2024-08-06 07:37:08,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3550.0, 300 sec: 3568.4). Total num frames: 1998848. Throughput: 0: 896.0. Samples: 498646. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:37:08,971][00403] Avg episode reward: [(0, '17.421')] +[2024-08-06 07:37:08,986][08747] Saving new best policy, reward=17.421! +[2024-08-06 07:37:09,988][08760] Updated weights for policy 0, policy_version 490 (0.0014) +[2024-08-06 07:37:13,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 2023424. Throughput: 0: 909.8. Samples: 504356. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:37:13,977][00403] Avg episode reward: [(0, '17.409')] +[2024-08-06 07:37:18,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3596.2). Total num frames: 2039808. Throughput: 0: 903.2. Samples: 510298. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:37:18,975][00403] Avg episode reward: [(0, '16.747')] +[2024-08-06 07:37:18,983][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000498_2039808.pth... +[2024-08-06 07:37:18,986][00403] Components not started: RolloutWorker_w0, RolloutWorker_w1, RolloutWorker_w3, RolloutWorker_w6, RolloutWorker_w7, wait_time=600.0 seconds +[2024-08-06 07:37:19,107][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000288_1179648.pth +[2024-08-06 07:37:21,440][08760] Updated weights for policy 0, policy_version 500 (0.0016) +[2024-08-06 07:37:23,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 2056192. Throughput: 0: 896.7. Samples: 512120. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:37:23,972][00403] Avg episode reward: [(0, '16.557')] +[2024-08-06 07:37:28,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3596.2). Total num frames: 2076672. Throughput: 0: 909.8. Samples: 518030. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:37:28,974][00403] Avg episode reward: [(0, '16.234')] +[2024-08-06 07:37:31,623][08760] Updated weights for policy 0, policy_version 510 (0.0015) +[2024-08-06 07:37:33,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2093056. Throughput: 0: 890.7. Samples: 523366. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:37:33,975][00403] Avg episode reward: [(0, '16.380')] +[2024-08-06 07:37:38,970][00403] Fps is (10 sec: 2866.6, 60 sec: 3549.7, 300 sec: 3568.4). Total num frames: 2105344. Throughput: 0: 884.9. Samples: 525068. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:37:38,975][00403] Avg episode reward: [(0, '16.397')] +[2024-08-06 07:37:43,971][00403] Fps is (10 sec: 3276.0, 60 sec: 3549.7, 300 sec: 3582.2). Total num frames: 2125824. Throughput: 0: 873.0. Samples: 529950. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:37:43,976][00403] Avg episode reward: [(0, '16.302')] +[2024-08-06 07:37:44,961][08760] Updated weights for policy 0, policy_version 520 (0.0019) +[2024-08-06 07:37:48,968][00403] Fps is (10 sec: 3687.2, 60 sec: 3481.6, 300 sec: 3568.4). Total num frames: 2142208. Throughput: 0: 875.3. Samples: 536134. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:37:48,971][00403] Avg episode reward: [(0, '16.122')] +[2024-08-06 07:37:53,968][00403] Fps is (10 sec: 3277.6, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 2158592. Throughput: 0: 883.1. Samples: 538386. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:37:53,970][00403] Avg episode reward: [(0, '16.408')] +[2024-08-06 07:37:56,635][08760] Updated weights for policy 0, policy_version 530 (0.0015) +[2024-08-06 07:37:58,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2179072. Throughput: 0: 873.1. Samples: 543646. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:37:58,976][00403] Avg episode reward: [(0, '15.897')] +[2024-08-06 07:38:03,968][00403] Fps is (10 sec: 4095.9, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2199552. Throughput: 0: 880.4. Samples: 549918. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:38:03,971][00403] Avg episode reward: [(0, '15.503')] +[2024-08-06 07:38:08,239][08760] Updated weights for policy 0, policy_version 540 (0.0016) +[2024-08-06 07:38:08,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 2211840. Throughput: 0: 885.4. Samples: 551962. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:38:08,976][00403] Avg episode reward: [(0, '16.149')] +[2024-08-06 07:38:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3568.4). Total num frames: 2232320. Throughput: 0: 874.8. Samples: 557398. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:38:13,979][00403] Avg episode reward: [(0, '16.046')] +[2024-08-06 07:38:17,985][08760] Updated weights for policy 0, policy_version 550 (0.0019) +[2024-08-06 07:38:18,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3596.1). Total num frames: 2252800. Throughput: 0: 891.3. Samples: 563474. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:38:18,977][00403] Avg episode reward: [(0, '15.713')] +[2024-08-06 07:38:23,971][00403] Fps is (10 sec: 3685.6, 60 sec: 3549.7, 300 sec: 3596.1). Total num frames: 2269184. Throughput: 0: 895.2. Samples: 565352. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:38:23,973][00403] Avg episode reward: [(0, '15.259')] +[2024-08-06 07:38:28,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3610.1). Total num frames: 2289664. Throughput: 0: 913.9. Samples: 571074. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:38:28,976][00403] Avg episode reward: [(0, '15.810')] +[2024-08-06 07:38:29,828][08760] Updated weights for policy 0, policy_version 560 (0.0023) +[2024-08-06 07:38:33,968][00403] Fps is (10 sec: 3687.3, 60 sec: 3549.9, 300 sec: 3596.1). Total num frames: 2306048. Throughput: 0: 906.9. Samples: 576946. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:38:33,974][00403] Avg episode reward: [(0, '16.007')] +[2024-08-06 07:38:38,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.3, 300 sec: 3596.1). Total num frames: 2322432. Throughput: 0: 900.1. Samples: 578890. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:38:38,974][00403] Avg episode reward: [(0, '15.877')] +[2024-08-06 07:38:41,464][08760] Updated weights for policy 0, policy_version 570 (0.0016) +[2024-08-06 07:38:43,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.3, 300 sec: 3610.0). Total num frames: 2342912. Throughput: 0: 914.6. Samples: 584802. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:38:43,974][00403] Avg episode reward: [(0, '17.306')] +[2024-08-06 07:38:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 2359296. Throughput: 0: 903.2. Samples: 590564. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:38:48,974][00403] Avg episode reward: [(0, '17.182')] +[2024-08-06 07:38:53,253][08760] Updated weights for policy 0, policy_version 580 (0.0012) +[2024-08-06 07:38:53,968][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3596.2). Total num frames: 2375680. Throughput: 0: 899.6. Samples: 592442. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:38:53,971][00403] Avg episode reward: [(0, '17.721')] +[2024-08-06 07:38:53,978][08747] Saving new best policy, reward=17.721! +[2024-08-06 07:38:58,976][00403] Fps is (10 sec: 3683.5, 60 sec: 3617.7, 300 sec: 3596.1). Total num frames: 2396160. Throughput: 0: 909.6. Samples: 598338. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:38:58,988][00403] Avg episode reward: [(0, '18.887')] +[2024-08-06 07:38:59,005][08747] Saving new best policy, reward=18.887! +[2024-08-06 07:39:03,881][08760] Updated weights for policy 0, policy_version 590 (0.0013) +[2024-08-06 07:39:03,968][00403] Fps is (10 sec: 4096.1, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 2416640. Throughput: 0: 898.8. Samples: 603918. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:39:03,973][00403] Avg episode reward: [(0, '19.927')] +[2024-08-06 07:39:03,980][08747] Saving new best policy, reward=19.927! +[2024-08-06 07:39:08,968][00403] Fps is (10 sec: 3689.3, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 2433024. Throughput: 0: 897.1. Samples: 605720. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:39:08,974][00403] Avg episode reward: [(0, '19.966')] +[2024-08-06 07:39:08,985][08747] Saving new best policy, reward=19.966! +[2024-08-06 07:39:13,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 2453504. Throughput: 0: 906.5. Samples: 611866. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:39:13,971][00403] Avg episode reward: [(0, '20.160')] +[2024-08-06 07:39:13,977][08747] Saving new best policy, reward=20.160! +[2024-08-06 07:39:14,925][08760] Updated weights for policy 0, policy_version 600 (0.0015) +[2024-08-06 07:39:18,971][00403] Fps is (10 sec: 3685.5, 60 sec: 3618.0, 300 sec: 3596.1). Total num frames: 2469888. Throughput: 0: 898.3. Samples: 617372. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:39:18,977][00403] Avg episode reward: [(0, '18.525')] +[2024-08-06 07:39:18,986][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000603_2469888.pth... +[2024-08-06 07:39:19,100][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000392_1605632.pth +[2024-08-06 07:39:23,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.3, 300 sec: 3610.0). Total num frames: 2486272. Throughput: 0: 897.2. Samples: 619266. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:39:23,976][00403] Avg episode reward: [(0, '18.324')] +[2024-08-06 07:39:26,828][08760] Updated weights for policy 0, policy_version 610 (0.0014) +[2024-08-06 07:39:28,968][00403] Fps is (10 sec: 3687.3, 60 sec: 3618.1, 300 sec: 3610.0). Total num frames: 2506752. Throughput: 0: 903.0. Samples: 625436. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:39:28,973][00403] Avg episode reward: [(0, '16.840')] +[2024-08-06 07:39:33,968][00403] Fps is (10 sec: 3686.5, 60 sec: 3618.1, 300 sec: 3596.2). Total num frames: 2523136. Throughput: 0: 894.7. Samples: 630824. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:39:33,977][00403] Avg episode reward: [(0, '17.446')] +[2024-08-06 07:39:38,417][08760] Updated weights for policy 0, policy_version 620 (0.0013) +[2024-08-06 07:39:38,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3596.2). Total num frames: 2539520. Throughput: 0: 900.4. Samples: 632960. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:39:38,975][00403] Avg episode reward: [(0, '17.997')] +[2024-08-06 07:39:43,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 2560000. Throughput: 0: 907.1. Samples: 639150. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:39:43,973][00403] Avg episode reward: [(0, '18.798')] +[2024-08-06 07:39:48,974][00403] Fps is (10 sec: 3684.3, 60 sec: 3617.8, 300 sec: 3596.1). Total num frames: 2576384. Throughput: 0: 894.9. Samples: 644192. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:39:48,976][00403] Avg episode reward: [(0, '18.890')] +[2024-08-06 07:39:50,442][08760] Updated weights for policy 0, policy_version 630 (0.0013) +[2024-08-06 07:39:53,968][00403] Fps is (10 sec: 2867.2, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2588672. Throughput: 0: 891.4. Samples: 645832. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:39:53,972][00403] Avg episode reward: [(0, '18.627')] +[2024-08-06 07:39:58,968][00403] Fps is (10 sec: 3278.6, 60 sec: 3550.3, 300 sec: 3582.3). Total num frames: 2609152. Throughput: 0: 869.8. Samples: 651008. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:39:58,973][00403] Avg episode reward: [(0, '18.468')] +[2024-08-06 07:40:01,667][08760] Updated weights for policy 0, policy_version 640 (0.0013) +[2024-08-06 07:40:03,972][00403] Fps is (10 sec: 3685.1, 60 sec: 3481.4, 300 sec: 3582.2). Total num frames: 2625536. Throughput: 0: 879.5. Samples: 656950. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:03,975][00403] Avg episode reward: [(0, '17.984')] +[2024-08-06 07:40:08,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 2641920. Throughput: 0: 880.0. Samples: 658864. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:40:08,970][00403] Avg episode reward: [(0, '18.692')] +[2024-08-06 07:40:13,295][08760] Updated weights for policy 0, policy_version 650 (0.0014) +[2024-08-06 07:40:13,968][00403] Fps is (10 sec: 3687.7, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 2662400. Throughput: 0: 873.6. Samples: 664748. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:13,971][00403] Avg episode reward: [(0, '19.257')] +[2024-08-06 07:40:18,971][00403] Fps is (10 sec: 4094.9, 60 sec: 3549.9, 300 sec: 3596.1). Total num frames: 2682880. Throughput: 0: 882.8. Samples: 670554. Policy #0 lag: (min: 0.0, avg: 0.0, max: 1.0) +[2024-08-06 07:40:18,975][00403] Avg episode reward: [(0, '20.044')] +[2024-08-06 07:40:23,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3596.1). Total num frames: 2699264. Throughput: 0: 878.6. Samples: 672496. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:23,972][00403] Avg episode reward: [(0, '20.695')] +[2024-08-06 07:40:23,977][08747] Saving new best policy, reward=20.695! +[2024-08-06 07:40:24,968][08760] Updated weights for policy 0, policy_version 660 (0.0019) +[2024-08-06 07:40:28,968][00403] Fps is (10 sec: 3277.6, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 2715648. Throughput: 0: 872.9. Samples: 678430. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:40:28,975][00403] Avg episode reward: [(0, '21.525')] +[2024-08-06 07:40:29,052][08747] Saving new best policy, reward=21.525! +[2024-08-06 07:40:33,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3596.2). Total num frames: 2736128. Throughput: 0: 887.0. Samples: 684100. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:33,973][00403] Avg episode reward: [(0, '22.805')] +[2024-08-06 07:40:33,978][08747] Saving new best policy, reward=22.805! +[2024-08-06 07:40:36,650][08760] Updated weights for policy 0, policy_version 670 (0.0026) +[2024-08-06 07:40:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2752512. Throughput: 0: 892.6. Samples: 685998. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:38,975][00403] Avg episode reward: [(0, '21.844')] +[2024-08-06 07:40:43,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2772992. Throughput: 0: 913.9. Samples: 692134. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:43,970][00403] Avg episode reward: [(0, '21.626')] +[2024-08-06 07:40:46,623][08760] Updated weights for policy 0, policy_version 680 (0.0020) +[2024-08-06 07:40:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3550.2, 300 sec: 3596.2). Total num frames: 2789376. Throughput: 0: 904.4. Samples: 697646. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:48,976][00403] Avg episode reward: [(0, '22.444')] +[2024-08-06 07:40:53,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 2805760. Throughput: 0: 905.1. Samples: 699594. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:53,975][00403] Avg episode reward: [(0, '22.177')] +[2024-08-06 07:40:58,328][08760] Updated weights for policy 0, policy_version 690 (0.0014) +[2024-08-06 07:40:58,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 2826240. Throughput: 0: 911.2. Samples: 705750. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:40:58,974][00403] Avg episode reward: [(0, '21.546')] +[2024-08-06 07:41:03,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.3, 300 sec: 3582.3). Total num frames: 2842624. Throughput: 0: 901.5. Samples: 711120. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:41:03,977][00403] Avg episode reward: [(0, '21.899')] +[2024-08-06 07:41:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3596.1). Total num frames: 2863104. Throughput: 0: 905.9. Samples: 713262. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:41:08,974][00403] Avg episode reward: [(0, '23.330')] +[2024-08-06 07:41:08,984][08747] Saving new best policy, reward=23.330! +[2024-08-06 07:41:09,991][08760] Updated weights for policy 0, policy_version 700 (0.0017) +[2024-08-06 07:41:13,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3596.1). Total num frames: 2883584. Throughput: 0: 910.2. Samples: 719390. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:41:13,973][00403] Avg episode reward: [(0, '23.612')] +[2024-08-06 07:41:13,977][08747] Saving new best policy, reward=23.612! +[2024-08-06 07:41:18,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3550.0, 300 sec: 3596.1). Total num frames: 2895872. Throughput: 0: 899.0. Samples: 724554. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:41:18,978][00403] Avg episode reward: [(0, '22.964')] +[2024-08-06 07:41:18,988][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000707_2895872.pth... +[2024-08-06 07:41:19,109][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000498_2039808.pth +[2024-08-06 07:41:21,744][08760] Updated weights for policy 0, policy_version 710 (0.0019) +[2024-08-06 07:41:23,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 2916352. Throughput: 0: 906.9. Samples: 726810. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:41:23,971][00403] Avg episode reward: [(0, '22.304')] +[2024-08-06 07:41:28,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 2936832. Throughput: 0: 907.2. Samples: 732960. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:41:28,976][00403] Avg episode reward: [(0, '22.579')] +[2024-08-06 07:41:32,625][08760] Updated weights for policy 0, policy_version 720 (0.0015) +[2024-08-06 07:41:33,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 2949120. Throughput: 0: 896.5. Samples: 737988. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:41:33,974][00403] Avg episode reward: [(0, '19.849')] +[2024-08-06 07:41:38,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 2969600. Throughput: 0: 910.3. Samples: 740558. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:41:38,971][00403] Avg episode reward: [(0, '16.854')] +[2024-08-06 07:41:43,163][08760] Updated weights for policy 0, policy_version 730 (0.0014) +[2024-08-06 07:41:43,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 2990080. Throughput: 0: 912.4. Samples: 746808. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:41:43,971][00403] Avg episode reward: [(0, '16.913')] +[2024-08-06 07:41:48,968][00403] Fps is (10 sec: 3686.5, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 3006464. Throughput: 0: 899.6. Samples: 751602. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:41:48,974][00403] Avg episode reward: [(0, '15.342')] +[2024-08-06 07:41:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3596.2). Total num frames: 3026944. Throughput: 0: 912.2. Samples: 754310. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:41:53,975][00403] Avg episode reward: [(0, '15.258')] +[2024-08-06 07:41:54,837][08760] Updated weights for policy 0, policy_version 740 (0.0014) +[2024-08-06 07:41:58,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3596.1). Total num frames: 3047424. Throughput: 0: 913.2. Samples: 760484. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:41:58,973][00403] Avg episode reward: [(0, '16.532')] +[2024-08-06 07:42:03,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 3059712. Throughput: 0: 900.5. Samples: 765076. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:42:03,970][00403] Avg episode reward: [(0, '17.251')] +[2024-08-06 07:42:06,443][08760] Updated weights for policy 0, policy_version 750 (0.0028) +[2024-08-06 07:42:08,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 3080192. Throughput: 0: 915.9. Samples: 768024. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:42:08,970][00403] Avg episode reward: [(0, '17.286')] +[2024-08-06 07:42:13,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 3100672. Throughput: 0: 917.5. Samples: 774246. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:42:13,972][00403] Avg episode reward: [(0, '19.924')] +[2024-08-06 07:42:17,768][08760] Updated weights for policy 0, policy_version 760 (0.0022) +[2024-08-06 07:42:18,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 3112960. Throughput: 0: 904.8. Samples: 778704. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:42:18,974][00403] Avg episode reward: [(0, '20.434')] +[2024-08-06 07:42:23,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3596.2). Total num frames: 3137536. Throughput: 0: 914.9. Samples: 781728. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:42:23,970][00403] Avg episode reward: [(0, '20.972')] +[2024-08-06 07:42:27,967][08760] Updated weights for policy 0, policy_version 770 (0.0014) +[2024-08-06 07:42:28,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 3153920. Throughput: 0: 912.9. Samples: 787888. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:42:28,971][00403] Avg episode reward: [(0, '21.295')] +[2024-08-06 07:42:33,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3610.1). Total num frames: 3170304. Throughput: 0: 906.1. Samples: 792376. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:42:33,971][00403] Avg episode reward: [(0, '20.832')] +[2024-08-06 07:42:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3610.1). Total num frames: 3190784. Throughput: 0: 914.4. Samples: 795456. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:42:38,970][00403] Avg episode reward: [(0, '21.253')] +[2024-08-06 07:42:39,658][08760] Updated weights for policy 0, policy_version 780 (0.0021) +[2024-08-06 07:42:43,969][00403] Fps is (10 sec: 4095.8, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 3211264. Throughput: 0: 915.8. Samples: 801694. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:42:43,971][00403] Avg episode reward: [(0, '21.287')] +[2024-08-06 07:42:48,971][00403] Fps is (10 sec: 3276.0, 60 sec: 3618.0, 300 sec: 3610.0). Total num frames: 3223552. Throughput: 0: 910.7. Samples: 806060. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:42:48,972][00403] Avg episode reward: [(0, '21.166')] +[2024-08-06 07:42:51,349][08760] Updated weights for policy 0, policy_version 790 (0.0013) +[2024-08-06 07:42:53,971][00403] Fps is (10 sec: 3276.1, 60 sec: 3618.0, 300 sec: 3610.0). Total num frames: 3244032. Throughput: 0: 914.3. Samples: 809170. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:42:53,979][00403] Avg episode reward: [(0, '23.221')] +[2024-08-06 07:42:58,968][00403] Fps is (10 sec: 4097.0, 60 sec: 3618.1, 300 sec: 3610.0). Total num frames: 3264512. Throughput: 0: 913.7. Samples: 815364. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:42:58,975][00403] Avg episode reward: [(0, '22.878')] +[2024-08-06 07:43:02,873][08760] Updated weights for policy 0, policy_version 800 (0.0022) +[2024-08-06 07:43:03,968][00403] Fps is (10 sec: 3687.4, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 3280896. Throughput: 0: 914.0. Samples: 819836. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:43:03,971][00403] Avg episode reward: [(0, '25.226')] +[2024-08-06 07:43:03,978][08747] Saving new best policy, reward=25.226! +[2024-08-06 07:43:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 3301376. Throughput: 0: 914.7. Samples: 822888. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:43:08,971][00403] Avg episode reward: [(0, '22.992')] +[2024-08-06 07:43:13,367][08760] Updated weights for policy 0, policy_version 810 (0.0013) +[2024-08-06 07:43:13,969][00403] Fps is (10 sec: 3686.2, 60 sec: 3618.1, 300 sec: 3610.0). Total num frames: 3317760. Throughput: 0: 911.8. Samples: 828920. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:43:13,974][00403] Avg episode reward: [(0, '22.061')] +[2024-08-06 07:43:18,971][00403] Fps is (10 sec: 3276.0, 60 sec: 3686.2, 300 sec: 3610.0). Total num frames: 3334144. Throughput: 0: 915.5. Samples: 833574. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:43:18,973][00403] Avg episode reward: [(0, '21.957')] +[2024-08-06 07:43:18,989][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000814_3334144.pth... +[2024-08-06 07:43:19,090][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000603_2469888.pth +[2024-08-06 07:43:23,968][00403] Fps is (10 sec: 3277.0, 60 sec: 3549.9, 300 sec: 3596.1). Total num frames: 3350528. Throughput: 0: 912.1. Samples: 836500. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:43:23,971][00403] Avg episode reward: [(0, '21.237')] +[2024-08-06 07:43:26,953][08760] Updated weights for policy 0, policy_version 820 (0.0013) +[2024-08-06 07:43:28,968][00403] Fps is (10 sec: 2867.9, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 3362816. Throughput: 0: 854.0. Samples: 840124. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:43:28,980][00403] Avg episode reward: [(0, '22.626')] +[2024-08-06 07:43:33,968][00403] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 3379200. Throughput: 0: 861.8. Samples: 844840. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:43:33,971][00403] Avg episode reward: [(0, '22.240')] +[2024-08-06 07:43:38,517][08760] Updated weights for policy 0, policy_version 830 (0.0014) +[2024-08-06 07:43:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 3399680. Throughput: 0: 861.1. Samples: 847918. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:43:38,970][00403] Avg episode reward: [(0, '23.095')] +[2024-08-06 07:43:43,968][00403] Fps is (10 sec: 3686.3, 60 sec: 3413.4, 300 sec: 3582.3). Total num frames: 3416064. Throughput: 0: 849.4. Samples: 853586. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:43:43,971][00403] Avg episode reward: [(0, '23.314')] +[2024-08-06 07:43:48,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.7, 300 sec: 3582.3). Total num frames: 3432448. Throughput: 0: 858.3. Samples: 858458. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:43:48,971][00403] Avg episode reward: [(0, '24.029')] +[2024-08-06 07:43:50,053][08760] Updated weights for policy 0, policy_version 840 (0.0015) +[2024-08-06 07:43:53,970][00403] Fps is (10 sec: 3685.8, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 3452928. Throughput: 0: 858.6. Samples: 861528. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:43:53,977][00403] Avg episode reward: [(0, '23.806')] +[2024-08-06 07:43:58,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3568.4). Total num frames: 3469312. Throughput: 0: 845.0. Samples: 866946. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:43:58,971][00403] Avg episode reward: [(0, '23.270')] +[2024-08-06 07:44:02,012][08760] Updated weights for policy 0, policy_version 850 (0.0015) +[2024-08-06 07:44:03,968][00403] Fps is (10 sec: 3277.4, 60 sec: 3413.3, 300 sec: 3568.4). Total num frames: 3485696. Throughput: 0: 850.6. Samples: 871848. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:44:03,972][00403] Avg episode reward: [(0, '22.360')] +[2024-08-06 07:44:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3568.4). Total num frames: 3506176. Throughput: 0: 852.5. Samples: 874862. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:44:08,972][00403] Avg episode reward: [(0, '23.044')] +[2024-08-06 07:44:13,131][08760] Updated weights for policy 0, policy_version 860 (0.0018) +[2024-08-06 07:44:13,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3413.4, 300 sec: 3568.4). Total num frames: 3522560. Throughput: 0: 891.6. Samples: 880248. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:44:13,973][00403] Avg episode reward: [(0, '23.004')] +[2024-08-06 07:44:18,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3481.8, 300 sec: 3582.3). Total num frames: 3543040. Throughput: 0: 901.9. Samples: 885424. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:44:18,971][00403] Avg episode reward: [(0, '22.308')] +[2024-08-06 07:44:23,887][08760] Updated weights for policy 0, policy_version 870 (0.0017) +[2024-08-06 07:44:23,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 3563520. Throughput: 0: 902.1. Samples: 888514. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:44:23,971][00403] Avg episode reward: [(0, '22.900')] +[2024-08-06 07:44:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 3575808. Throughput: 0: 892.0. Samples: 893724. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:44:28,973][00403] Avg episode reward: [(0, '22.911')] +[2024-08-06 07:44:33,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 3596288. Throughput: 0: 901.8. Samples: 899040. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:44:33,975][00403] Avg episode reward: [(0, '21.613')] +[2024-08-06 07:44:35,575][08760] Updated weights for policy 0, policy_version 880 (0.0013) +[2024-08-06 07:44:38,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 3616768. Throughput: 0: 902.0. Samples: 902116. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:44:38,978][00403] Avg episode reward: [(0, '19.944')] +[2024-08-06 07:44:43,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3568.4). Total num frames: 3629056. Throughput: 0: 894.6. Samples: 907204. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:44:43,971][00403] Avg episode reward: [(0, '19.633')] +[2024-08-06 07:44:47,293][08760] Updated weights for policy 0, policy_version 890 (0.0014) +[2024-08-06 07:44:48,969][00403] Fps is (10 sec: 3276.5, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 3649536. Throughput: 0: 905.4. Samples: 912590. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:44:48,976][00403] Avg episode reward: [(0, '19.246')] +[2024-08-06 07:44:53,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3596.1). Total num frames: 3670016. Throughput: 0: 907.1. Samples: 915682. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:44:53,971][00403] Avg episode reward: [(0, '19.491')] +[2024-08-06 07:44:58,968][00403] Fps is (10 sec: 3277.1, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 3682304. Throughput: 0: 896.9. Samples: 920610. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:44:58,971][00403] Avg episode reward: [(0, '19.430')] +[2024-08-06 07:44:59,113][08760] Updated weights for policy 0, policy_version 900 (0.0013) +[2024-08-06 07:45:03,969][00403] Fps is (10 sec: 3276.5, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 3702784. Throughput: 0: 905.1. Samples: 926154. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:45:03,972][00403] Avg episode reward: [(0, '20.591')] +[2024-08-06 07:45:08,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 3723264. Throughput: 0: 903.2. Samples: 929156. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:45:08,971][00403] Avg episode reward: [(0, '20.908')] +[2024-08-06 07:45:09,491][08760] Updated weights for policy 0, policy_version 910 (0.0013) +[2024-08-06 07:45:13,968][00403] Fps is (10 sec: 3686.7, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 3739648. Throughput: 0: 890.9. Samples: 933816. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:45:13,971][00403] Avg episode reward: [(0, '21.505')] +[2024-08-06 07:45:18,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 3756032. Throughput: 0: 901.2. Samples: 939594. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:45:18,970][00403] Avg episode reward: [(0, '22.075')] +[2024-08-06 07:45:18,982][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000918_3760128.pth... +[2024-08-06 07:45:19,082][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000707_2895872.pth +[2024-08-06 07:45:20,995][08760] Updated weights for policy 0, policy_version 920 (0.0017) +[2024-08-06 07:45:23,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3582.3). Total num frames: 3772416. Throughput: 0: 899.0. Samples: 942570. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:45:23,985][00403] Avg episode reward: [(0, '22.075')] +[2024-08-06 07:45:28,969][00403] Fps is (10 sec: 2867.0, 60 sec: 3481.6, 300 sec: 3554.5). Total num frames: 3784704. Throughput: 0: 864.8. Samples: 946120. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:45:28,971][00403] Avg episode reward: [(0, '22.820')] +[2024-08-06 07:45:33,969][00403] Fps is (10 sec: 3276.6, 60 sec: 3481.6, 300 sec: 3568.4). Total num frames: 3805184. Throughput: 0: 856.3. Samples: 951124. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:45:33,977][00403] Avg episode reward: [(0, '23.458')] +[2024-08-06 07:45:34,623][08760] Updated weights for policy 0, policy_version 930 (0.0020) +[2024-08-06 07:45:38,971][00403] Fps is (10 sec: 4095.3, 60 sec: 3481.5, 300 sec: 3568.4). Total num frames: 3825664. Throughput: 0: 854.9. Samples: 954156. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:45:38,973][00403] Avg episode reward: [(0, '23.294')] +[2024-08-06 07:45:43,969][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3554.5). Total num frames: 3837952. Throughput: 0: 858.4. Samples: 959238. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:45:43,974][00403] Avg episode reward: [(0, '22.832')] +[2024-08-06 07:45:48,390][08760] Updated weights for policy 0, policy_version 940 (0.0025) +[2024-08-06 07:45:48,968][00403] Fps is (10 sec: 2458.1, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 3850240. Throughput: 0: 813.9. Samples: 962778. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:45:48,974][00403] Avg episode reward: [(0, '21.728')] +[2024-08-06 07:45:53,968][00403] Fps is (10 sec: 3277.1, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 3870720. Throughput: 0: 814.4. Samples: 965804. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:45:53,971][00403] Avg episode reward: [(0, '21.753')] +[2024-08-06 07:45:58,971][00403] Fps is (10 sec: 3685.4, 60 sec: 3413.2, 300 sec: 3540.6). Total num frames: 3887104. Throughput: 0: 838.2. Samples: 971536. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:45:58,973][00403] Avg episode reward: [(0, '22.171')] +[2024-08-06 07:45:59,576][08760] Updated weights for policy 0, policy_version 950 (0.0013) +[2024-08-06 07:46:03,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3345.1, 300 sec: 3526.7). Total num frames: 3903488. Throughput: 0: 815.5. Samples: 976290. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:46:03,971][00403] Avg episode reward: [(0, '22.624')] +[2024-08-06 07:46:08,970][00403] Fps is (10 sec: 3686.8, 60 sec: 3345.0, 300 sec: 3526.7). Total num frames: 3923968. Throughput: 0: 816.5. Samples: 979314. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:46:08,973][00403] Avg episode reward: [(0, '23.292')] +[2024-08-06 07:46:10,176][08760] Updated weights for policy 0, policy_version 960 (0.0017) +[2024-08-06 07:46:13,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3345.1, 300 sec: 3540.6). Total num frames: 3940352. Throughput: 0: 862.1. Samples: 984916. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:46:13,972][00403] Avg episode reward: [(0, '24.130')] +[2024-08-06 07:46:18,968][00403] Fps is (10 sec: 3687.0, 60 sec: 3413.3, 300 sec: 3540.6). Total num frames: 3960832. Throughput: 0: 857.4. Samples: 989708. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:46:18,972][00403] Avg episode reward: [(0, '25.089')] +[2024-08-06 07:46:22,093][08760] Updated weights for policy 0, policy_version 970 (0.0016) +[2024-08-06 07:46:23,971][00403] Fps is (10 sec: 3685.4, 60 sec: 3413.2, 300 sec: 3526.7). Total num frames: 3977216. Throughput: 0: 857.9. Samples: 992764. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:46:23,975][00403] Avg episode reward: [(0, '25.391')] +[2024-08-06 07:46:23,977][08747] Saving new best policy, reward=25.391! +[2024-08-06 07:46:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 3993600. Throughput: 0: 861.3. Samples: 997994. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:46:28,973][00403] Avg episode reward: [(0, '24.422')] +[2024-08-06 07:46:33,956][08760] Updated weights for policy 0, policy_version 980 (0.0019) +[2024-08-06 07:46:33,968][00403] Fps is (10 sec: 3687.4, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 4014080. Throughput: 0: 894.7. Samples: 1003038. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:46:33,976][00403] Avg episode reward: [(0, '25.458')] +[2024-08-06 07:46:33,981][08747] Saving new best policy, reward=25.458! +[2024-08-06 07:46:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3413.5, 300 sec: 3526.7). Total num frames: 4030464. Throughput: 0: 893.5. Samples: 1006012. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:46:38,976][00403] Avg episode reward: [(0, '26.123')] +[2024-08-06 07:46:39,056][08747] Saving new best policy, reward=26.123! +[2024-08-06 07:46:43,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3526.7). Total num frames: 4046848. Throughput: 0: 882.2. Samples: 1011232. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:46:43,974][00403] Avg episode reward: [(0, '26.317')] +[2024-08-06 07:46:43,981][08747] Saving new best policy, reward=26.317! +[2024-08-06 07:46:45,901][08760] Updated weights for policy 0, policy_version 990 (0.0014) +[2024-08-06 07:46:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 4067328. Throughput: 0: 889.6. Samples: 1016322. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:46:48,971][00403] Avg episode reward: [(0, '26.288')] +[2024-08-06 07:46:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 4083712. Throughput: 0: 890.8. Samples: 1019398. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:46:53,972][00403] Avg episode reward: [(0, '26.723')] +[2024-08-06 07:46:54,039][08747] Saving new best policy, reward=26.723! +[2024-08-06 07:46:57,075][08760] Updated weights for policy 0, policy_version 1000 (0.0016) +[2024-08-06 07:46:58,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3550.0, 300 sec: 3526.7). Total num frames: 4100096. Throughput: 0: 874.0. Samples: 1024244. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:46:58,971][00403] Avg episode reward: [(0, '27.658')] +[2024-08-06 07:46:58,989][08747] Saving new best policy, reward=27.658! +[2024-08-06 07:47:03,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3526.7). Total num frames: 4120576. Throughput: 0: 888.6. Samples: 1029696. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:47:03,973][00403] Avg episode reward: [(0, '28.980')] +[2024-08-06 07:47:03,975][08747] Saving new best policy, reward=28.980! +[2024-08-06 07:47:08,045][08760] Updated weights for policy 0, policy_version 1010 (0.0017) +[2024-08-06 07:47:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3550.0, 300 sec: 3512.8). Total num frames: 4136960. Throughput: 0: 887.4. Samples: 1032694. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:47:08,971][00403] Avg episode reward: [(0, '28.973')] +[2024-08-06 07:47:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 4153344. Throughput: 0: 876.9. Samples: 1037454. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:47:13,972][00403] Avg episode reward: [(0, '28.685')] +[2024-08-06 07:47:18,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 4169728. Throughput: 0: 886.5. Samples: 1042932. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:47:18,971][00403] Avg episode reward: [(0, '28.579')] +[2024-08-06 07:47:18,981][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001019_4173824.pth... +[2024-08-06 07:47:18,993][00403] Components not started: RolloutWorker_w0, RolloutWorker_w1, RolloutWorker_w3, RolloutWorker_w6, RolloutWorker_w7, wait_time=1200.0 seconds +[2024-08-06 07:47:19,087][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000814_3334144.pth +[2024-08-06 07:47:20,113][08760] Updated weights for policy 0, policy_version 1020 (0.0019) +[2024-08-06 07:47:23,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3550.0, 300 sec: 3512.8). Total num frames: 4190208. Throughput: 0: 884.7. Samples: 1045822. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:47:23,974][00403] Avg episode reward: [(0, '28.986')] +[2024-08-06 07:47:23,977][08747] Saving new best policy, reward=28.986! +[2024-08-06 07:47:28,970][00403] Fps is (10 sec: 3276.4, 60 sec: 3481.5, 300 sec: 3498.9). Total num frames: 4202496. Throughput: 0: 865.1. Samples: 1050164. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:47:28,972][00403] Avg episode reward: [(0, '27.726')] +[2024-08-06 07:47:32,178][08760] Updated weights for policy 0, policy_version 1030 (0.0019) +[2024-08-06 07:47:33,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 4222976. Throughput: 0: 880.0. Samples: 1055920. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:47:33,970][00403] Avg episode reward: [(0, '27.762')] +[2024-08-06 07:47:38,968][00403] Fps is (10 sec: 4096.6, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 4243456. Throughput: 0: 879.9. Samples: 1058992. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:47:38,971][00403] Avg episode reward: [(0, '26.647')] +[2024-08-06 07:47:43,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 4255744. Throughput: 0: 867.4. Samples: 1063276. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:47:43,971][00403] Avg episode reward: [(0, '27.091')] +[2024-08-06 07:47:44,338][08760] Updated weights for policy 0, policy_version 1040 (0.0018) +[2024-08-06 07:47:48,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 4276224. Throughput: 0: 877.4. Samples: 1069180. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:47:48,976][00403] Avg episode reward: [(0, '25.772')] +[2024-08-06 07:47:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 4292608. Throughput: 0: 876.6. Samples: 1072142. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:47:53,974][00403] Avg episode reward: [(0, '26.314')] +[2024-08-06 07:47:55,802][08760] Updated weights for policy 0, policy_version 1050 (0.0016) +[2024-08-06 07:47:58,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 4308992. Throughput: 0: 865.1. Samples: 1076382. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:47:58,977][00403] Avg episode reward: [(0, '26.651')] +[2024-08-06 07:48:03,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 4329472. Throughput: 0: 874.8. Samples: 1082300. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:48:03,970][00403] Avg episode reward: [(0, '26.515')] +[2024-08-06 07:48:06,636][08760] Updated weights for policy 0, policy_version 1060 (0.0014) +[2024-08-06 07:48:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 4345856. Throughput: 0: 877.3. Samples: 1085300. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:48:08,976][00403] Avg episode reward: [(0, '26.702')] +[2024-08-06 07:48:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 4362240. Throughput: 0: 873.2. Samples: 1089458. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:48:13,978][00403] Avg episode reward: [(0, '28.069')] +[2024-08-06 07:48:18,792][08760] Updated weights for policy 0, policy_version 1070 (0.0019) +[2024-08-06 07:48:18,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 4382720. Throughput: 0: 877.0. Samples: 1095384. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:48:18,971][00403] Avg episode reward: [(0, '26.583')] +[2024-08-06 07:48:23,969][00403] Fps is (10 sec: 3686.3, 60 sec: 3481.6, 300 sec: 3512.8). Total num frames: 4399104. Throughput: 0: 874.3. Samples: 1098338. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:48:23,975][00403] Avg episode reward: [(0, '25.079')] +[2024-08-06 07:48:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3550.0, 300 sec: 3512.8). Total num frames: 4415488. Throughput: 0: 874.4. Samples: 1102626. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:48:28,970][00403] Avg episode reward: [(0, '24.363')] +[2024-08-06 07:48:30,775][08760] Updated weights for policy 0, policy_version 1080 (0.0014) +[2024-08-06 07:48:33,968][00403] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 4435968. Throughput: 0: 879.9. Samples: 1108774. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:48:33,970][00403] Avg episode reward: [(0, '23.482')] +[2024-08-06 07:48:38,970][00403] Fps is (10 sec: 3685.8, 60 sec: 3481.5, 300 sec: 3512.8). Total num frames: 4452352. Throughput: 0: 882.4. Samples: 1111850. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:48:38,973][00403] Avg episode reward: [(0, '21.772')] +[2024-08-06 07:48:42,517][08760] Updated weights for policy 0, policy_version 1090 (0.0018) +[2024-08-06 07:48:43,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 4468736. Throughput: 0: 885.0. Samples: 1116206. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:48:43,975][00403] Avg episode reward: [(0, '20.724')] +[2024-08-06 07:48:48,970][00403] Fps is (10 sec: 3686.3, 60 sec: 3549.7, 300 sec: 3512.8). Total num frames: 4489216. Throughput: 0: 890.7. Samples: 1122384. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:48:48,977][00403] Avg episode reward: [(0, '22.333')] +[2024-08-06 07:48:52,664][08760] Updated weights for policy 0, policy_version 1100 (0.0014) +[2024-08-06 07:48:53,969][00403] Fps is (10 sec: 3686.2, 60 sec: 3549.8, 300 sec: 3512.8). Total num frames: 4505600. Throughput: 0: 893.2. Samples: 1125494. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:48:53,972][00403] Avg episode reward: [(0, '23.146')] +[2024-08-06 07:48:58,968][00403] Fps is (10 sec: 3277.5, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 4521984. Throughput: 0: 898.8. Samples: 1129906. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:48:58,971][00403] Avg episode reward: [(0, '24.661')] +[2024-08-06 07:49:03,968][00403] Fps is (10 sec: 3686.6, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 4542464. Throughput: 0: 904.8. Samples: 1136102. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:03,975][00403] Avg episode reward: [(0, '25.617')] +[2024-08-06 07:49:04,037][08760] Updated weights for policy 0, policy_version 1110 (0.0015) +[2024-08-06 07:49:08,970][00403] Fps is (10 sec: 4095.4, 60 sec: 3618.0, 300 sec: 3526.7). Total num frames: 4562944. Throughput: 0: 907.4. Samples: 1139170. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:08,979][00403] Avg episode reward: [(0, '25.463')] +[2024-08-06 07:49:13,969][00403] Fps is (10 sec: 3686.3, 60 sec: 3618.1, 300 sec: 3512.8). Total num frames: 4579328. Throughput: 0: 908.2. Samples: 1143494. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:13,972][00403] Avg episode reward: [(0, '27.707')] +[2024-08-06 07:49:15,772][08760] Updated weights for policy 0, policy_version 1120 (0.0022) +[2024-08-06 07:49:18,968][00403] Fps is (10 sec: 3687.0, 60 sec: 3618.1, 300 sec: 3512.8). Total num frames: 4599808. Throughput: 0: 908.3. Samples: 1149646. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:18,971][00403] Avg episode reward: [(0, '28.275')] +[2024-08-06 07:49:18,979][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001123_4599808.pth... +[2024-08-06 07:49:19,083][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000918_3760128.pth +[2024-08-06 07:49:23,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 4612096. Throughput: 0: 904.8. Samples: 1152564. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:23,971][00403] Avg episode reward: [(0, '28.279')] +[2024-08-06 07:49:28,968][00403] Fps is (10 sec: 2457.6, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 4624384. Throughput: 0: 883.3. Samples: 1155954. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:49:28,971][00403] Avg episode reward: [(0, '27.978')] +[2024-08-06 07:49:29,302][08760] Updated weights for policy 0, policy_version 1130 (0.0014) +[2024-08-06 07:49:33,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3485.1). Total num frames: 4644864. Throughput: 0: 867.8. Samples: 1161434. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:33,975][00403] Avg episode reward: [(0, '28.514')] +[2024-08-06 07:49:38,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3550.0, 300 sec: 3512.8). Total num frames: 4665344. Throughput: 0: 867.6. Samples: 1164536. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:49:38,975][00403] Avg episode reward: [(0, '28.387')] +[2024-08-06 07:49:40,192][08760] Updated weights for policy 0, policy_version 1140 (0.0021) +[2024-08-06 07:49:43,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 4681728. Throughput: 0: 869.2. Samples: 1169020. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:49:43,976][00403] Avg episode reward: [(0, '27.575')] +[2024-08-06 07:49:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3550.0, 300 sec: 3499.0). Total num frames: 4702208. Throughput: 0: 869.7. Samples: 1175240. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:48,971][00403] Avg episode reward: [(0, '27.070')] +[2024-08-06 07:49:50,838][08760] Updated weights for policy 0, policy_version 1150 (0.0015) +[2024-08-06 07:49:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3512.8). Total num frames: 4718592. Throughput: 0: 870.4. Samples: 1178338. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:53,971][00403] Avg episode reward: [(0, '25.593')] +[2024-08-06 07:49:58,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 4734976. Throughput: 0: 870.3. Samples: 1182656. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:49:58,977][00403] Avg episode reward: [(0, '25.510')] +[2024-08-06 07:50:02,420][08760] Updated weights for policy 0, policy_version 1160 (0.0012) +[2024-08-06 07:50:03,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3499.0). Total num frames: 4755456. Throughput: 0: 873.2. Samples: 1188938. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:50:03,976][00403] Avg episode reward: [(0, '25.555')] +[2024-08-06 07:50:08,970][00403] Fps is (10 sec: 3685.8, 60 sec: 3481.6, 300 sec: 3498.9). Total num frames: 4771840. Throughput: 0: 876.7. Samples: 1192018. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:50:08,977][00403] Avg episode reward: [(0, '26.097')] +[2024-08-06 07:50:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3481.6, 300 sec: 3499.0). Total num frames: 4788224. Throughput: 0: 900.5. Samples: 1196476. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:50:13,976][00403] Avg episode reward: [(0, '27.083')] +[2024-08-06 07:50:14,127][08760] Updated weights for policy 0, policy_version 1170 (0.0015) +[2024-08-06 07:50:18,968][00403] Fps is (10 sec: 4096.6, 60 sec: 3549.9, 300 sec: 3526.7). Total num frames: 4812800. Throughput: 0: 917.8. Samples: 1202736. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:50:18,976][00403] Avg episode reward: [(0, '27.967')] +[2024-08-06 07:50:23,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.1, 300 sec: 3540.6). Total num frames: 4829184. Throughput: 0: 918.6. Samples: 1205872. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:50:23,975][00403] Avg episode reward: [(0, '27.468')] +[2024-08-06 07:50:25,512][08760] Updated weights for policy 0, policy_version 1180 (0.0019) +[2024-08-06 07:50:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3526.7). Total num frames: 4845568. Throughput: 0: 916.2. Samples: 1210250. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:50:28,971][00403] Avg episode reward: [(0, '28.129')] +[2024-08-06 07:50:33,969][00403] Fps is (10 sec: 3686.3, 60 sec: 3686.4, 300 sec: 3526.7). Total num frames: 4866048. Throughput: 0: 915.9. Samples: 1216456. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:50:33,982][00403] Avg episode reward: [(0, '27.143')] +[2024-08-06 07:50:35,570][08760] Updated weights for policy 0, policy_version 1190 (0.0018) +[2024-08-06 07:50:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3540.6). Total num frames: 4882432. Throughput: 0: 911.2. Samples: 1219340. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:50:38,974][00403] Avg episode reward: [(0, '27.366')] +[2024-08-06 07:50:43,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 4898816. Throughput: 0: 917.5. Samples: 1223944. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:50:43,970][00403] Avg episode reward: [(0, '25.582')] +[2024-08-06 07:50:47,132][08760] Updated weights for policy 0, policy_version 1200 (0.0014) +[2024-08-06 07:50:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 4919296. Throughput: 0: 916.2. Samples: 1230168. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:50:48,971][00403] Avg episode reward: [(0, '25.685')] +[2024-08-06 07:50:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 4935680. Throughput: 0: 910.4. Samples: 1232986. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:50:53,971][00403] Avg episode reward: [(0, '24.899')] +[2024-08-06 07:50:58,850][08760] Updated weights for policy 0, policy_version 1210 (0.0014) +[2024-08-06 07:50:58,970][00403] Fps is (10 sec: 3685.8, 60 sec: 3686.3, 300 sec: 3568.4). Total num frames: 4956160. Throughput: 0: 916.1. Samples: 1237702. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:50:58,977][00403] Avg episode reward: [(0, '24.443')] +[2024-08-06 07:51:03,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 4976640. Throughput: 0: 916.3. Samples: 1243970. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:03,975][00403] Avg episode reward: [(0, '23.892')] +[2024-08-06 07:51:08,968][00403] Fps is (10 sec: 3277.3, 60 sec: 3618.2, 300 sec: 3554.5). Total num frames: 4988928. Throughput: 0: 903.2. Samples: 1246514. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:51:08,970][00403] Avg episode reward: [(0, '23.602')] +[2024-08-06 07:51:10,468][08760] Updated weights for policy 0, policy_version 1220 (0.0021) +[2024-08-06 07:51:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 5009408. Throughput: 0: 917.4. Samples: 1251534. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:13,975][00403] Avg episode reward: [(0, '23.631')] +[2024-08-06 07:51:18,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 5025792. Throughput: 0: 903.0. Samples: 1257090. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:18,973][00403] Avg episode reward: [(0, '23.251')] +[2024-08-06 07:51:18,987][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001227_5025792.pth... +[2024-08-06 07:51:19,100][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001019_4173824.pth +[2024-08-06 07:51:22,824][08760] Updated weights for policy 0, policy_version 1230 (0.0013) +[2024-08-06 07:51:23,968][00403] Fps is (10 sec: 2867.2, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 5038080. Throughput: 0: 877.3. Samples: 1258820. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:51:23,975][00403] Avg episode reward: [(0, '23.801')] +[2024-08-06 07:51:28,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 5058560. Throughput: 0: 874.1. Samples: 1263280. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:51:28,974][00403] Avg episode reward: [(0, '24.911')] +[2024-08-06 07:51:33,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3481.6, 300 sec: 3540.6). Total num frames: 5074944. Throughput: 0: 855.6. Samples: 1268668. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:33,975][00403] Avg episode reward: [(0, '25.933')] +[2024-08-06 07:51:35,357][08760] Updated weights for policy 0, policy_version 1240 (0.0016) +[2024-08-06 07:51:38,968][00403] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3526.7). Total num frames: 5087232. Throughput: 0: 833.9. Samples: 1270510. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:38,974][00403] Avg episode reward: [(0, '26.615')] +[2024-08-06 07:51:43,968][00403] Fps is (10 sec: 2867.2, 60 sec: 3413.3, 300 sec: 3512.8). Total num frames: 5103616. Throughput: 0: 832.4. Samples: 1275160. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:43,975][00403] Avg episode reward: [(0, '26.358')] +[2024-08-06 07:51:47,239][08760] Updated weights for policy 0, policy_version 1250 (0.0015) +[2024-08-06 07:51:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3526.7). Total num frames: 5124096. Throughput: 0: 830.8. Samples: 1281356. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:48,971][00403] Avg episode reward: [(0, '26.589')] +[2024-08-06 07:51:53,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3413.3, 300 sec: 3526.7). Total num frames: 5140480. Throughput: 0: 833.4. Samples: 1284018. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:53,973][00403] Avg episode reward: [(0, '27.596')] +[2024-08-06 07:51:58,885][08760] Updated weights for policy 0, policy_version 1260 (0.0014) +[2024-08-06 07:51:58,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3413.4, 300 sec: 3526.7). Total num frames: 5160960. Throughput: 0: 830.5. Samples: 1288906. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:51:58,971][00403] Avg episode reward: [(0, '28.010')] +[2024-08-06 07:52:03,971][00403] Fps is (10 sec: 4095.0, 60 sec: 3413.2, 300 sec: 3540.6). Total num frames: 5181440. Throughput: 0: 845.0. Samples: 1295116. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:52:03,972][00403] Avg episode reward: [(0, '27.385')] +[2024-08-06 07:52:08,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 3526.7). Total num frames: 5193728. Throughput: 0: 860.3. Samples: 1297532. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:52:08,971][00403] Avg episode reward: [(0, '27.171')] +[2024-08-06 07:52:10,361][08760] Updated weights for policy 0, policy_version 1270 (0.0013) +[2024-08-06 07:52:13,969][00403] Fps is (10 sec: 3277.5, 60 sec: 3413.3, 300 sec: 3540.6). Total num frames: 5214208. Throughput: 0: 875.1. Samples: 1302658. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:52:13,971][00403] Avg episode reward: [(0, '27.920')] +[2024-08-06 07:52:18,970][00403] Fps is (10 sec: 4095.3, 60 sec: 3481.5, 300 sec: 3540.6). Total num frames: 5234688. Throughput: 0: 894.7. Samples: 1308930. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:52:18,972][00403] Avg episode reward: [(0, '28.413')] +[2024-08-06 07:52:21,000][08760] Updated weights for policy 0, policy_version 1280 (0.0014) +[2024-08-06 07:52:23,968][00403] Fps is (10 sec: 3686.5, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 5251072. Throughput: 0: 902.1. Samples: 1311106. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:52:23,974][00403] Avg episode reward: [(0, '27.808')] +[2024-08-06 07:52:28,968][00403] Fps is (10 sec: 3687.0, 60 sec: 3549.9, 300 sec: 3554.5). Total num frames: 5271552. Throughput: 0: 917.2. Samples: 1316436. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:52:28,970][00403] Avg episode reward: [(0, '27.669')] +[2024-08-06 07:52:31,898][08760] Updated weights for policy 0, policy_version 1290 (0.0014) +[2024-08-06 07:52:33,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3540.6). Total num frames: 5287936. Throughput: 0: 917.4. Samples: 1322638. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:52:33,971][00403] Avg episode reward: [(0, '28.294')] +[2024-08-06 07:52:38,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 5304320. Throughput: 0: 902.6. Samples: 1324636. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:52:38,971][00403] Avg episode reward: [(0, '28.595')] +[2024-08-06 07:52:43,463][08760] Updated weights for policy 0, policy_version 1300 (0.0016) +[2024-08-06 07:52:43,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3554.5). Total num frames: 5324800. Throughput: 0: 918.8. Samples: 1330252. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:52:43,973][00403] Avg episode reward: [(0, '27.773')] +[2024-08-06 07:52:48,971][00403] Fps is (10 sec: 4094.8, 60 sec: 3686.2, 300 sec: 3568.3). Total num frames: 5345280. Throughput: 0: 915.5. Samples: 1336312. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:52:48,974][00403] Avg episode reward: [(0, '28.587')] +[2024-08-06 07:52:53,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 5357568. Throughput: 0: 905.2. Samples: 1338268. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:52:53,972][00403] Avg episode reward: [(0, '27.877')] +[2024-08-06 07:52:55,075][08760] Updated weights for policy 0, policy_version 1310 (0.0014) +[2024-08-06 07:52:58,968][00403] Fps is (10 sec: 3277.8, 60 sec: 3618.1, 300 sec: 3554.5). Total num frames: 5378048. Throughput: 0: 915.4. Samples: 1343850. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:52:58,970][00403] Avg episode reward: [(0, '28.350')] +[2024-08-06 07:53:03,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.3, 300 sec: 3568.4). Total num frames: 5398528. Throughput: 0: 908.0. Samples: 1349788. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:53:03,976][00403] Avg episode reward: [(0, '27.173')] +[2024-08-06 07:53:06,556][08760] Updated weights for policy 0, policy_version 1320 (0.0015) +[2024-08-06 07:53:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 5414912. Throughput: 0: 902.3. Samples: 1351708. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:53:08,978][00403] Avg episode reward: [(0, '26.504')] +[2024-08-06 07:53:13,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3568.4). Total num frames: 5435392. Throughput: 0: 916.6. Samples: 1357684. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:53:13,971][00403] Avg episode reward: [(0, '27.401')] +[2024-08-06 07:53:16,622][08760] Updated weights for policy 0, policy_version 1330 (0.0019) +[2024-08-06 07:53:18,971][00403] Fps is (10 sec: 3685.4, 60 sec: 3618.1, 300 sec: 3568.3). Total num frames: 5451776. Throughput: 0: 904.2. Samples: 1363328. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:53:18,974][00403] Avg episode reward: [(0, '26.063')] +[2024-08-06 07:53:18,984][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001331_5451776.pth... +[2024-08-06 07:53:19,099][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001123_4599808.pth +[2024-08-06 07:53:23,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 5468160. Throughput: 0: 900.7. Samples: 1365166. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:53:23,970][00403] Avg episode reward: [(0, '26.364')] +[2024-08-06 07:53:28,258][08760] Updated weights for policy 0, policy_version 1340 (0.0015) +[2024-08-06 07:53:28,968][00403] Fps is (10 sec: 3687.4, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 5488640. Throughput: 0: 912.5. Samples: 1371314. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:53:28,971][00403] Avg episode reward: [(0, '25.072')] +[2024-08-06 07:53:33,970][00403] Fps is (10 sec: 3685.8, 60 sec: 3618.0, 300 sec: 3568.4). Total num frames: 5505024. Throughput: 0: 900.7. Samples: 1376840. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:53:33,972][00403] Avg episode reward: [(0, '26.167')] +[2024-08-06 07:53:38,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 5525504. Throughput: 0: 904.0. Samples: 1378946. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:53:38,974][00403] Avg episode reward: [(0, '25.462')] +[2024-08-06 07:53:39,809][08760] Updated weights for policy 0, policy_version 1350 (0.0019) +[2024-08-06 07:53:43,968][00403] Fps is (10 sec: 4096.7, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 5545984. Throughput: 0: 917.4. Samples: 1385132. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:53:43,976][00403] Avg episode reward: [(0, '26.035')] +[2024-08-06 07:53:48,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.3, 300 sec: 3582.3). Total num frames: 5562368. Throughput: 0: 902.8. Samples: 1390414. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:53:48,972][00403] Avg episode reward: [(0, '25.857')] +[2024-08-06 07:53:51,455][08760] Updated weights for policy 0, policy_version 1360 (0.0014) +[2024-08-06 07:53:53,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3582.3). Total num frames: 5578752. Throughput: 0: 910.4. Samples: 1392678. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:53:53,971][00403] Avg episode reward: [(0, '26.810')] +[2024-08-06 07:53:58,970][00403] Fps is (10 sec: 3685.8, 60 sec: 3686.3, 300 sec: 3582.2). Total num frames: 5599232. Throughput: 0: 912.7. Samples: 1398758. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:53:58,974][00403] Avg episode reward: [(0, '27.585')] +[2024-08-06 07:54:02,139][08760] Updated weights for policy 0, policy_version 1370 (0.0017) +[2024-08-06 07:54:03,969][00403] Fps is (10 sec: 3686.2, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 5615616. Throughput: 0: 900.4. Samples: 1403842. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:54:03,976][00403] Avg episode reward: [(0, '28.861')] +[2024-08-06 07:54:08,968][00403] Fps is (10 sec: 3277.3, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 5632000. Throughput: 0: 913.4. Samples: 1406268. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:54:08,973][00403] Avg episode reward: [(0, '28.329')] +[2024-08-06 07:54:13,160][08760] Updated weights for policy 0, policy_version 1380 (0.0014) +[2024-08-06 07:54:13,968][00403] Fps is (10 sec: 3686.6, 60 sec: 3618.1, 300 sec: 3568.4). Total num frames: 5652480. Throughput: 0: 915.6. Samples: 1412516. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:54:13,976][00403] Avg episode reward: [(0, '29.621')] +[2024-08-06 07:54:13,978][08747] Saving new best policy, reward=29.621! +[2024-08-06 07:54:18,970][00403] Fps is (10 sec: 3685.6, 60 sec: 3618.2, 300 sec: 3582.2). Total num frames: 5668864. Throughput: 0: 900.7. Samples: 1417370. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:54:18,972][00403] Avg episode reward: [(0, '28.260')] +[2024-08-06 07:54:23,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 5689344. Throughput: 0: 912.6. Samples: 1420012. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:54:23,974][00403] Avg episode reward: [(0, '29.069')] +[2024-08-06 07:54:24,839][08760] Updated weights for policy 0, policy_version 1390 (0.0016) +[2024-08-06 07:54:28,970][00403] Fps is (10 sec: 4096.1, 60 sec: 3686.3, 300 sec: 3610.0). Total num frames: 5709824. Throughput: 0: 910.4. Samples: 1426102. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:54:28,973][00403] Avg episode reward: [(0, '28.348')] +[2024-08-06 07:54:33,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.2, 300 sec: 3582.3). Total num frames: 5722112. Throughput: 0: 895.7. Samples: 1430722. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:54:33,971][00403] Avg episode reward: [(0, '27.477')] +[2024-08-06 07:54:36,532][08760] Updated weights for policy 0, policy_version 1400 (0.0013) +[2024-08-06 07:54:38,968][00403] Fps is (10 sec: 3277.4, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 5742592. Throughput: 0: 909.8. Samples: 1433618. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:54:38,971][00403] Avg episode reward: [(0, '27.912')] +[2024-08-06 07:54:43,968][00403] Fps is (10 sec: 4096.1, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 5763072. Throughput: 0: 912.5. Samples: 1439820. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:54:43,970][00403] Avg episode reward: [(0, '28.161')] +[2024-08-06 07:54:48,114][08760] Updated weights for policy 0, policy_version 1410 (0.0018) +[2024-08-06 07:54:48,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3549.9, 300 sec: 3582.3). Total num frames: 5775360. Throughput: 0: 897.4. Samples: 1444226. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:54:48,975][00403] Avg episode reward: [(0, '27.042')] +[2024-08-06 07:54:53,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 5795840. Throughput: 0: 911.9. Samples: 1447304. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:54:53,971][00403] Avg episode reward: [(0, '25.800')] +[2024-08-06 07:54:58,118][08760] Updated weights for policy 0, policy_version 1420 (0.0016) +[2024-08-06 07:54:58,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3596.1). Total num frames: 5816320. Throughput: 0: 909.5. Samples: 1453444. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:54:58,974][00403] Avg episode reward: [(0, '25.645')] +[2024-08-06 07:55:03,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3596.2). Total num frames: 5832704. Throughput: 0: 901.6. Samples: 1457938. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:55:03,975][00403] Avg episode reward: [(0, '25.827')] +[2024-08-06 07:55:08,969][00403] Fps is (10 sec: 3686.3, 60 sec: 3686.4, 300 sec: 3610.0). Total num frames: 5853184. Throughput: 0: 911.9. Samples: 1461048. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:55:08,975][00403] Avg episode reward: [(0, '25.751')] +[2024-08-06 07:55:09,714][08760] Updated weights for policy 0, policy_version 1430 (0.0014) +[2024-08-06 07:55:13,969][00403] Fps is (10 sec: 3686.2, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 5869568. Throughput: 0: 913.5. Samples: 1467210. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:55:13,972][00403] Avg episode reward: [(0, '24.909')] +[2024-08-06 07:55:18,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3618.3, 300 sec: 3582.3). Total num frames: 5885952. Throughput: 0: 910.1. Samples: 1471676. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:55:18,971][00403] Avg episode reward: [(0, '25.626')] +[2024-08-06 07:55:18,979][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001437_5885952.pth... +[2024-08-06 07:55:19,075][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001227_5025792.pth +[2024-08-06 07:55:21,334][08760] Updated weights for policy 0, policy_version 1440 (0.0025) +[2024-08-06 07:55:23,968][00403] Fps is (10 sec: 3686.6, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 5906432. Throughput: 0: 913.1. Samples: 1474708. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:55:23,971][00403] Avg episode reward: [(0, '26.967')] +[2024-08-06 07:55:28,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3618.2, 300 sec: 3596.2). Total num frames: 5926912. Throughput: 0: 910.2. Samples: 1480780. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:55:28,971][00403] Avg episode reward: [(0, '27.429')] +[2024-08-06 07:55:33,058][08760] Updated weights for policy 0, policy_version 1450 (0.0013) +[2024-08-06 07:55:33,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 5939200. Throughput: 0: 911.9. Samples: 1485262. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:55:33,971][00403] Avg episode reward: [(0, '27.237')] +[2024-08-06 07:55:38,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 5959680. Throughput: 0: 912.0. Samples: 1488346. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:55:38,975][00403] Avg episode reward: [(0, '26.826')] +[2024-08-06 07:55:43,660][08760] Updated weights for policy 0, policy_version 1460 (0.0017) +[2024-08-06 07:55:43,968][00403] Fps is (10 sec: 4096.1, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 5980160. Throughput: 0: 908.7. Samples: 1494336. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:55:43,972][00403] Avg episode reward: [(0, '27.017')] +[2024-08-06 07:55:48,970][00403] Fps is (10 sec: 3685.8, 60 sec: 3686.3, 300 sec: 3596.1). Total num frames: 5996544. Throughput: 0: 912.8. Samples: 1499016. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:55:48,976][00403] Avg episode reward: [(0, '26.277')] +[2024-08-06 07:55:53,968][00403] Fps is (10 sec: 3686.3, 60 sec: 3686.4, 300 sec: 3596.2). Total num frames: 6017024. Throughput: 0: 912.3. Samples: 1502100. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:55:53,971][00403] Avg episode reward: [(0, '27.933')] +[2024-08-06 07:55:54,636][08760] Updated weights for policy 0, policy_version 1470 (0.0016) +[2024-08-06 07:55:58,968][00403] Fps is (10 sec: 3687.0, 60 sec: 3618.1, 300 sec: 3582.3). Total num frames: 6033408. Throughput: 0: 903.5. Samples: 1507866. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:55:58,972][00403] Avg episode reward: [(0, '27.945')] +[2024-08-06 07:56:03,968][00403] Fps is (10 sec: 3276.9, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 6049792. Throughput: 0: 911.5. Samples: 1512694. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:56:03,970][00403] Avg episode reward: [(0, '27.276')] +[2024-08-06 07:56:06,266][08760] Updated weights for policy 0, policy_version 1480 (0.0018) +[2024-08-06 07:56:08,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.1, 300 sec: 3596.1). Total num frames: 6070272. Throughput: 0: 912.4. Samples: 1515768. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:56:08,970][00403] Avg episode reward: [(0, '26.815')] +[2024-08-06 07:56:13,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3618.2, 300 sec: 3596.2). Total num frames: 6086656. Throughput: 0: 906.1. Samples: 1521556. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:56:13,976][00403] Avg episode reward: [(0, '27.648')] +[2024-08-06 07:56:17,672][08760] Updated weights for policy 0, policy_version 1490 (0.0020) +[2024-08-06 07:56:18,968][00403] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 6107136. Throughput: 0: 917.6. Samples: 1526552. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:56:18,971][00403] Avg episode reward: [(0, '27.498')] +[2024-08-06 07:56:23,968][00403] Fps is (10 sec: 4096.0, 60 sec: 3686.4, 300 sec: 3623.9). Total num frames: 6127616. Throughput: 0: 918.3. Samples: 1529670. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:56:23,971][00403] Avg episode reward: [(0, '26.258')] +[2024-08-06 07:56:28,905][08760] Updated weights for policy 0, policy_version 1500 (0.0016) +[2024-08-06 07:56:28,974][00403] Fps is (10 sec: 3684.4, 60 sec: 3617.8, 300 sec: 3623.9). Total num frames: 6144000. Throughput: 0: 906.2. Samples: 1535120. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:56:28,976][00403] Avg episode reward: [(0, '25.453')] +[2024-08-06 07:56:33,969][00403] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3637.8). Total num frames: 6160384. Throughput: 0: 917.8. Samples: 1540314. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0) +[2024-08-06 07:56:33,971][00403] Avg episode reward: [(0, '25.979')] +[2024-08-06 07:56:38,970][00403] Fps is (10 sec: 3687.9, 60 sec: 3686.3, 300 sec: 3651.7). Total num frames: 6180864. Throughput: 0: 916.9. Samples: 1543360. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:56:38,972][00403] Avg episode reward: [(0, '27.944')] +[2024-08-06 07:56:39,325][08760] Updated weights for policy 0, policy_version 1510 (0.0014) +[2024-08-06 07:56:43,970][00403] Fps is (10 sec: 3685.9, 60 sec: 3618.0, 300 sec: 3637.8). Total num frames: 6197248. Throughput: 0: 907.9. Samples: 1548724. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:56:43,979][00403] Avg episode reward: [(0, '28.080')] +[2024-08-06 07:56:48,968][00403] Fps is (10 sec: 3277.3, 60 sec: 3618.2, 300 sec: 3637.8). Total num frames: 6213632. Throughput: 0: 917.6. Samples: 1553988. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:56:48,975][00403] Avg episode reward: [(0, '27.751')] +[2024-08-06 07:56:50,978][08760] Updated weights for policy 0, policy_version 1520 (0.0016) +[2024-08-06 07:56:53,968][00403] Fps is (10 sec: 4096.6, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 6238208. Throughput: 0: 917.3. Samples: 1557046. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:56:53,975][00403] Avg episode reward: [(0, '27.375')] +[2024-08-06 07:56:58,975][00403] Fps is (10 sec: 3684.1, 60 sec: 3617.8, 300 sec: 3623.9). Total num frames: 6250496. Throughput: 0: 902.5. Samples: 1562174. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:56:58,982][00403] Avg episode reward: [(0, '27.599')] +[2024-08-06 07:57:02,519][08760] Updated weights for policy 0, policy_version 1530 (0.0013) +[2024-08-06 07:57:03,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 6270976. Throughput: 0: 914.2. Samples: 1567690. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:57:03,971][00403] Avg episode reward: [(0, '27.252')] +[2024-08-06 07:57:08,969][00403] Fps is (10 sec: 4098.4, 60 sec: 3686.4, 300 sec: 3651.7). Total num frames: 6291456. Throughput: 0: 914.2. Samples: 1570810. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:57:08,972][00403] Avg episode reward: [(0, '26.248')] +[2024-08-06 07:57:13,968][00403] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3623.9). Total num frames: 6303744. Throughput: 0: 901.7. Samples: 1575692. Policy #0 lag: (min: 0.0, avg: 0.1, max: 1.0) +[2024-08-06 07:57:13,976][00403] Avg episode reward: [(0, '26.241')] +[2024-08-06 07:57:14,231][08760] Updated weights for policy 0, policy_version 1540 (0.0016) +[2024-08-06 07:57:18,970][00403] Fps is (10 sec: 3276.4, 60 sec: 3618.0, 300 sec: 3637.8). Total num frames: 6324224. Throughput: 0: 912.5. Samples: 1581376. Policy #0 lag: (min: 0.0, avg: 0.2, max: 1.0) +[2024-08-06 07:57:18,977][00403] Avg episode reward: [(0, '26.738')] +[2024-08-06 07:57:18,987][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001544_6324224.pth... +[2024-08-06 07:57:18,994][00403] Components not started: RolloutWorker_w0, RolloutWorker_w1, RolloutWorker_w3, RolloutWorker_w6, RolloutWorker_w7, wait_time=1800.0 seconds +[2024-08-06 07:57:18,997][00403] Components take too long to start: RolloutWorker_w0, RolloutWorker_w1, RolloutWorker_w3, RolloutWorker_w6, RolloutWorker_w7. Aborting the experiment! + + + +[2024-08-06 07:57:19,000][08747] Stopping Batcher_0... +[2024-08-06 07:57:19,000][08747] Loop batcher_evt_loop terminating... +[2024-08-06 07:57:19,001][00403] Component Batcher_0 stopped! +[2024-08-06 07:57:19,003][00403] Component RolloutWorker_w0 process died already! Don't wait for it. +[2024-08-06 07:57:19,006][00403] Component RolloutWorker_w1 process died already! Don't wait for it. +[2024-08-06 07:57:19,009][00403] Component RolloutWorker_w3 process died already! Don't wait for it. +[2024-08-06 07:57:19,013][00403] Component RolloutWorker_w6 process died already! Don't wait for it. +[2024-08-06 07:57:19,018][00403] Component RolloutWorker_w7 process died already! Don't wait for it. +[2024-08-06 07:57:19,023][00403] Waiting for ['LearnerWorker_p0', 'InferenceWorker_p0-w0', 'RolloutWorker_w2', 'RolloutWorker_w4', 'RolloutWorker_w5'] to stop... +[2024-08-06 07:57:19,052][08770] Stopping RolloutWorker_w5... +[2024-08-06 07:57:19,053][00403] Component RolloutWorker_w5 stopped! +[2024-08-06 07:57:19,054][00403] Waiting for ['LearnerWorker_p0', 'InferenceWorker_p0-w0', 'RolloutWorker_w2', 'RolloutWorker_w4'] to stop... +[2024-08-06 07:57:19,053][08770] Loop rollout_proc5_evt_loop terminating... +[2024-08-06 07:57:19,079][08760] Weights refcount: 2 0 +[2024-08-06 07:57:19,085][08760] Stopping InferenceWorker_p0-w0... +[2024-08-06 07:57:19,085][08760] Loop inference_proc0-0_evt_loop terminating... +[2024-08-06 07:57:19,088][00403] Component InferenceWorker_p0-w0 stopped! +[2024-08-06 07:57:19,095][00403] Waiting for ['LearnerWorker_p0', 'RolloutWorker_w2', 'RolloutWorker_w4'] to stop... +[2024-08-06 07:57:19,111][00403] Component RolloutWorker_w4 stopped! +[2024-08-06 07:57:19,114][00403] Waiting for ['LearnerWorker_p0', 'RolloutWorker_w2'] to stop... +[2024-08-06 07:57:19,118][08768] Stopping RolloutWorker_w4... +[2024-08-06 07:57:19,122][00403] Component RolloutWorker_w2 stopped! +[2024-08-06 07:57:19,127][00403] Waiting for ['LearnerWorker_p0'] to stop... +[2024-08-06 07:57:19,119][08768] Loop rollout_proc4_evt_loop terminating... +[2024-08-06 07:57:19,130][08763] Stopping RolloutWorker_w2... +[2024-08-06 07:57:19,132][08763] Loop rollout_proc2_evt_loop terminating... +[2024-08-06 07:57:19,154][08747] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001331_5451776.pth +[2024-08-06 07:57:19,171][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001544_6324224.pth... +[2024-08-06 07:57:19,363][08747] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001544_6324224.pth... +[2024-08-06 07:57:19,528][00403] Component LearnerWorker_p0 stopped! +[2024-08-06 07:57:19,532][00403] Waiting for process learner_proc0 to stop... +[2024-08-06 07:57:19,536][08747] Stopping LearnerWorker_p0... +[2024-08-06 07:57:19,537][08747] Loop learner_proc0_evt_loop terminating... +[2024-08-06 07:57:20,604][00403] Waiting for process inference_proc0-0 to join... +[2024-08-06 07:57:20,611][00403] Waiting for process rollout_proc0 to join... +[2024-08-06 07:57:20,616][00403] Waiting for process rollout_proc1 to join... +[2024-08-06 07:57:20,617][00403] Waiting for process rollout_proc2 to join... +[2024-08-06 07:57:21,050][00403] Waiting for process rollout_proc3 to join... +[2024-08-06 07:57:21,051][00403] Waiting for process rollout_proc4 to join... +[2024-08-06 07:57:21,056][00403] Waiting for process rollout_proc5 to join... +[2024-08-06 07:57:21,061][00403] Waiting for process rollout_proc6 to join... +[2024-08-06 07:57:21,063][00403] Waiting for process rollout_proc7 to join... +[2024-08-06 07:57:21,067][00403] Batcher 0 profile tree view: +batching: 33.5877, releasing_batches: 0.0419 +[2024-08-06 07:57:21,068][00403] InferenceWorker_p0-w0 profile tree view: +wait_policy: 0.0037 + wait_policy_total: 661.5802 +update_model: 15.9337 + weight_update: 0.0017 +one_step: 0.0023 + handle_policy_step: 1025.2033 + deserialize: 25.2536, stack: 6.2538, obs_to_device_normalize: 225.5553, forward: 552.5756, send_messages: 34.2414 + prepare_outputs: 129.6229 + to_cpu: 78.0787 +[2024-08-06 07:57:21,069][00403] Learner 0 profile tree view: +misc: 0.0086, prepare_batch: 19.6548 +train: 103.1083 + epoch_init: 0.0094, minibatch_init: 0.0130, losses_postprocess: 0.9034, kl_divergence: 0.9084, after_optimizer: 51.6894 + calculate_losses: 33.8661 + losses_init: 0.0123, forward_head: 1.5583, bptt_initial: 23.3629, tail: 1.3947, advantages_returns: 0.3519, losses: 4.2693 + bptt: 2.5256 + bptt_forward_core: 2.3934 + update: 14.9287 + clip: 1.3742 +[2024-08-06 07:57:21,071][00403] Loop Runner_EvtLoop terminating... +[2024-08-06 07:57:21,072][00403] Runner profile tree view: +main_loop: 1797.8381 +[2024-08-06 07:57:21,073][00403] Collected {0: 6324224}, FPS: 3517.7 +[2024-08-06 07:57:50,731][00403] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2024-08-06 07:57:50,732][00403] Overriding arg 'num_workers' with value 1 passed from command line +[2024-08-06 07:57:50,735][00403] Adding new argument 'no_render'=True that is not in the saved config file! +[2024-08-06 07:57:50,738][00403] Adding new argument 'save_video'=True that is not in the saved config file! +[2024-08-06 07:57:50,739][00403] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2024-08-06 07:57:50,741][00403] Adding new argument 'video_name'=None that is not in the saved config file! +[2024-08-06 07:57:50,743][00403] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file! +[2024-08-06 07:57:50,745][00403] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2024-08-06 07:57:50,745][00403] Adding new argument 'push_to_hub'=False that is not in the saved config file! +[2024-08-06 07:57:50,746][00403] Adding new argument 'hf_repository'=None that is not in the saved config file! +[2024-08-06 07:57:50,747][00403] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2024-08-06 07:57:50,748][00403] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2024-08-06 07:57:50,749][00403] Adding new argument 'train_script'=None that is not in the saved config file! +[2024-08-06 07:57:50,750][00403] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2024-08-06 07:57:50,752][00403] Using frameskip 1 and render_action_repeat=4 for evaluation +[2024-08-06 07:57:50,786][00403] Doom resolution: 160x120, resize resolution: (128, 72) +[2024-08-06 07:57:50,790][00403] RunningMeanStd input shape: (3, 72, 128) +[2024-08-06 07:57:50,793][00403] RunningMeanStd input shape: (1,) +[2024-08-06 07:57:50,809][00403] ConvEncoder: input_channels=3 +[2024-08-06 07:57:50,910][00403] Conv encoder output size: 512 +[2024-08-06 07:57:50,911][00403] Policy head output size: 512 +[2024-08-06 07:57:51,190][00403] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001544_6324224.pth... +[2024-08-06 07:57:51,986][00403] Num frames 100... +[2024-08-06 07:57:52,118][00403] Num frames 200... +[2024-08-06 07:57:52,244][00403] Num frames 300... +[2024-08-06 07:57:52,376][00403] Num frames 400... +[2024-08-06 07:57:52,524][00403] Num frames 500... +[2024-08-06 07:57:52,645][00403] Num frames 600... +[2024-08-06 07:57:52,767][00403] Num frames 700... +[2024-08-06 07:57:52,887][00403] Num frames 800... +[2024-08-06 07:57:53,004][00403] Num frames 900... +[2024-08-06 07:57:53,129][00403] Num frames 1000... +[2024-08-06 07:57:53,193][00403] Avg episode rewards: #0: 24.030, true rewards: #0: 10.030 +[2024-08-06 07:57:53,195][00403] Avg episode reward: 24.030, avg true_objective: 10.030 +[2024-08-06 07:57:53,322][00403] Num frames 1100... +[2024-08-06 07:57:53,448][00403] Num frames 1200... +[2024-08-06 07:57:53,568][00403] Num frames 1300... +[2024-08-06 07:57:53,693][00403] Num frames 1400... +[2024-08-06 07:57:53,813][00403] Num frames 1500... +[2024-08-06 07:57:53,937][00403] Num frames 1600... +[2024-08-06 07:57:54,063][00403] Num frames 1700... +[2024-08-06 07:57:54,191][00403] Num frames 1800... +[2024-08-06 07:57:54,318][00403] Num frames 1900... +[2024-08-06 07:57:54,441][00403] Num frames 2000... +[2024-08-06 07:57:54,559][00403] Num frames 2100... +[2024-08-06 07:57:54,680][00403] Num frames 2200... +[2024-08-06 07:57:54,800][00403] Num frames 2300... +[2024-08-06 07:57:54,918][00403] Num frames 2400... +[2024-08-06 07:57:55,038][00403] Num frames 2500... +[2024-08-06 07:57:55,160][00403] Num frames 2600... +[2024-08-06 07:57:55,288][00403] Num frames 2700... +[2024-08-06 07:57:55,464][00403] Avg episode rewards: #0: 33.975, true rewards: #0: 13.975 +[2024-08-06 07:57:55,466][00403] Avg episode reward: 33.975, avg true_objective: 13.975 +[2024-08-06 07:57:55,476][00403] Num frames 2800... +[2024-08-06 07:57:55,594][00403] Num frames 2900... +[2024-08-06 07:57:55,710][00403] Num frames 3000... +[2024-08-06 07:57:55,832][00403] Num frames 3100... +[2024-08-06 07:57:55,951][00403] Num frames 3200... +[2024-08-06 07:57:56,068][00403] Num frames 3300... +[2024-08-06 07:57:56,193][00403] Num frames 3400... +[2024-08-06 07:57:56,328][00403] Num frames 3500... +[2024-08-06 07:57:56,498][00403] Avg episode rewards: #0: 27.983, true rewards: #0: 11.983 +[2024-08-06 07:57:56,500][00403] Avg episode reward: 27.983, avg true_objective: 11.983 +[2024-08-06 07:57:56,508][00403] Num frames 3600... +[2024-08-06 07:57:56,634][00403] Num frames 3700... +[2024-08-06 07:57:56,754][00403] Num frames 3800... +[2024-08-06 07:57:56,876][00403] Num frames 3900... +[2024-08-06 07:57:56,996][00403] Num frames 4000... +[2024-08-06 07:57:57,119][00403] Num frames 4100... +[2024-08-06 07:57:57,255][00403] Num frames 4200... +[2024-08-06 07:57:57,384][00403] Num frames 4300... +[2024-08-06 07:57:57,505][00403] Num frames 4400... +[2024-08-06 07:57:57,594][00403] Avg episode rewards: #0: 25.317, true rewards: #0: 11.067 +[2024-08-06 07:57:57,596][00403] Avg episode reward: 25.317, avg true_objective: 11.067 +[2024-08-06 07:57:57,685][00403] Num frames 4500... +[2024-08-06 07:57:57,809][00403] Num frames 4600... +[2024-08-06 07:57:57,981][00403] Num frames 4700... +[2024-08-06 07:57:58,162][00403] Num frames 4800... +[2024-08-06 07:57:58,358][00403] Num frames 4900... +[2024-08-06 07:57:58,520][00403] Num frames 5000... +[2024-08-06 07:57:58,681][00403] Num frames 5100... +[2024-08-06 07:57:58,840][00403] Num frames 5200... +[2024-08-06 07:57:59,000][00403] Num frames 5300... +[2024-08-06 07:57:59,169][00403] Num frames 5400... +[2024-08-06 07:57:59,356][00403] Num frames 5500... +[2024-08-06 07:57:59,527][00403] Num frames 5600... +[2024-08-06 07:57:59,707][00403] Num frames 5700... +[2024-08-06 07:57:59,886][00403] Num frames 5800... +[2024-08-06 07:57:59,951][00403] Avg episode rewards: #0: 26.406, true rewards: #0: 11.606 +[2024-08-06 07:57:59,953][00403] Avg episode reward: 26.406, avg true_objective: 11.606 +[2024-08-06 07:58:00,125][00403] Num frames 5900... +[2024-08-06 07:58:00,280][00403] Num frames 6000... +[2024-08-06 07:58:00,416][00403] Num frames 6100... +[2024-08-06 07:58:00,541][00403] Num frames 6200... +[2024-08-06 07:58:00,663][00403] Num frames 6300... +[2024-08-06 07:58:00,783][00403] Num frames 6400... +[2024-08-06 07:58:00,907][00403] Num frames 6500... +[2024-08-06 07:58:01,029][00403] Num frames 6600... +[2024-08-06 07:58:01,151][00403] Num frames 6700... +[2024-08-06 07:58:01,273][00403] Num frames 6800... +[2024-08-06 07:58:01,368][00403] Avg episode rewards: #0: 25.378, true rewards: #0: 11.378 +[2024-08-06 07:58:01,370][00403] Avg episode reward: 25.378, avg true_objective: 11.378 +[2024-08-06 07:58:01,464][00403] Num frames 6900... +[2024-08-06 07:58:01,587][00403] Num frames 7000... +[2024-08-06 07:58:01,706][00403] Num frames 7100... +[2024-08-06 07:58:01,826][00403] Num frames 7200... +[2024-08-06 07:58:01,974][00403] Avg episode rewards: #0: 22.536, true rewards: #0: 10.393 +[2024-08-06 07:58:01,975][00403] Avg episode reward: 22.536, avg true_objective: 10.393 +[2024-08-06 07:58:02,009][00403] Num frames 7300... +[2024-08-06 07:58:02,140][00403] Num frames 7400... +[2024-08-06 07:58:02,263][00403] Num frames 7500... +[2024-08-06 07:58:02,401][00403] Num frames 7600... +[2024-08-06 07:58:02,523][00403] Num frames 7700... +[2024-08-06 07:58:02,647][00403] Num frames 7800... +[2024-08-06 07:58:02,771][00403] Num frames 7900... +[2024-08-06 07:58:02,890][00403] Num frames 8000... +[2024-08-06 07:58:03,009][00403] Num frames 8100... +[2024-08-06 07:58:03,136][00403] Num frames 8200... +[2024-08-06 07:58:03,261][00403] Num frames 8300... +[2024-08-06 07:58:03,390][00403] Num frames 8400... +[2024-08-06 07:58:03,564][00403] Avg episode rewards: #0: 23.614, true rewards: #0: 10.614 +[2024-08-06 07:58:03,566][00403] Avg episode reward: 23.614, avg true_objective: 10.614 +[2024-08-06 07:58:03,580][00403] Num frames 8500... +[2024-08-06 07:58:03,702][00403] Num frames 8600... +[2024-08-06 07:58:03,824][00403] Num frames 8700... +[2024-08-06 07:58:03,942][00403] Num frames 8800... +[2024-08-06 07:58:04,062][00403] Num frames 8900... +[2024-08-06 07:58:04,187][00403] Num frames 9000... +[2024-08-06 07:58:04,314][00403] Num frames 9100... +[2024-08-06 07:58:04,440][00403] Num frames 9200... +[2024-08-06 07:58:04,571][00403] Num frames 9300... +[2024-08-06 07:58:04,693][00403] Num frames 9400... +[2024-08-06 07:58:04,815][00403] Num frames 9500... +[2024-08-06 07:58:04,940][00403] Num frames 9600... +[2024-08-06 07:58:05,065][00403] Num frames 9700... +[2024-08-06 07:58:05,187][00403] Num frames 9800... +[2024-08-06 07:58:05,316][00403] Num frames 9900... +[2024-08-06 07:58:05,435][00403] Num frames 10000... +[2024-08-06 07:58:05,565][00403] Num frames 10100... +[2024-08-06 07:58:05,686][00403] Num frames 10200... +[2024-08-06 07:58:05,808][00403] Num frames 10300... +[2024-08-06 07:58:05,933][00403] Num frames 10400... +[2024-08-06 07:58:06,056][00403] Num frames 10500... +[2024-08-06 07:58:06,224][00403] Avg episode rewards: #0: 27.101, true rewards: #0: 11.768 +[2024-08-06 07:58:06,226][00403] Avg episode reward: 27.101, avg true_objective: 11.768 +[2024-08-06 07:58:06,239][00403] Num frames 10600... +[2024-08-06 07:58:06,369][00403] Num frames 10700... +[2024-08-06 07:58:06,492][00403] Num frames 10800... +[2024-08-06 07:58:06,621][00403] Num frames 10900... +[2024-08-06 07:58:06,746][00403] Num frames 11000... +[2024-08-06 07:58:06,867][00403] Num frames 11100... +[2024-08-06 07:58:06,993][00403] Num frames 11200... +[2024-08-06 07:58:07,115][00403] Num frames 11300... +[2024-08-06 07:58:07,234][00403] Num frames 11400... +[2024-08-06 07:58:07,368][00403] Num frames 11500... +[2024-08-06 07:58:07,474][00403] Avg episode rewards: #0: 26.636, true rewards: #0: 11.536 +[2024-08-06 07:58:07,476][00403] Avg episode reward: 26.636, avg true_objective: 11.536 +[2024-08-06 07:59:12,586][00403] Replay video saved to /content/train_dir/default_experiment/replay.mp4! +[2024-08-06 08:01:39,115][00403] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json +[2024-08-06 08:01:39,117][00403] Overriding arg 'num_workers' with value 1 passed from command line +[2024-08-06 08:01:39,120][00403] Adding new argument 'no_render'=True that is not in the saved config file! +[2024-08-06 08:01:39,122][00403] Adding new argument 'save_video'=True that is not in the saved config file! +[2024-08-06 08:01:39,124][00403] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file! +[2024-08-06 08:01:39,126][00403] Adding new argument 'video_name'=None that is not in the saved config file! +[2024-08-06 08:01:39,127][00403] Adding new argument 'max_num_frames'=100000 that is not in the saved config file! +[2024-08-06 08:01:39,129][00403] Adding new argument 'max_num_episodes'=10 that is not in the saved config file! +[2024-08-06 08:01:39,130][00403] Adding new argument 'push_to_hub'=True that is not in the saved config file! +[2024-08-06 08:01:39,131][00403] Adding new argument 'hf_repository'='Charles0831/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file! +[2024-08-06 08:01:39,132][00403] Adding new argument 'policy_index'=0 that is not in the saved config file! +[2024-08-06 08:01:39,133][00403] Adding new argument 'eval_deterministic'=False that is not in the saved config file! +[2024-08-06 08:01:39,134][00403] Adding new argument 'train_script'=None that is not in the saved config file! +[2024-08-06 08:01:39,135][00403] Adding new argument 'enjoy_script'=None that is not in the saved config file! +[2024-08-06 08:01:39,136][00403] Using frameskip 1 and render_action_repeat=4 for evaluation +[2024-08-06 08:01:39,165][00403] RunningMeanStd input shape: (3, 72, 128) +[2024-08-06 08:01:39,167][00403] RunningMeanStd input shape: (1,) +[2024-08-06 08:01:39,179][00403] ConvEncoder: input_channels=3 +[2024-08-06 08:01:39,226][00403] Conv encoder output size: 512 +[2024-08-06 08:01:39,228][00403] Policy head output size: 512 +[2024-08-06 08:01:39,245][00403] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000001544_6324224.pth... +[2024-08-06 08:01:39,677][00403] Num frames 100... +[2024-08-06 08:01:39,808][00403] Num frames 200... +[2024-08-06 08:01:39,993][00403] Num frames 300... +[2024-08-06 08:01:40,111][00403] Num frames 400... +[2024-08-06 08:01:40,239][00403] Num frames 500... +[2024-08-06 08:01:40,310][00403] Avg episode rewards: #0: 7.120, true rewards: #0: 5.120 +[2024-08-06 08:01:40,313][00403] Avg episode reward: 7.120, avg true_objective: 5.120 +[2024-08-06 08:01:40,421][00403] Num frames 600... +[2024-08-06 08:01:40,538][00403] Num frames 700... +[2024-08-06 08:01:40,659][00403] Num frames 800... +[2024-08-06 08:01:40,781][00403] Num frames 900... +[2024-08-06 08:01:40,897][00403] Num frames 1000... +[2024-08-06 08:01:41,016][00403] Num frames 1100... +[2024-08-06 08:01:41,134][00403] Num frames 1200... +[2024-08-06 08:01:41,261][00403] Num frames 1300... +[2024-08-06 08:01:41,390][00403] Num frames 1400... +[2024-08-06 08:01:41,510][00403] Num frames 1500... +[2024-08-06 08:01:41,642][00403] Num frames 1600... +[2024-08-06 08:01:41,765][00403] Num frames 1700... +[2024-08-06 08:01:41,887][00403] Num frames 1800... +[2024-08-06 08:01:42,009][00403] Num frames 1900... +[2024-08-06 08:01:42,127][00403] Num frames 2000... +[2024-08-06 08:01:42,248][00403] Num frames 2100... +[2024-08-06 08:01:42,382][00403] Num frames 2200... +[2024-08-06 08:01:42,503][00403] Num frames 2300... +[2024-08-06 08:01:42,659][00403] Avg episode rewards: #0: 24.420, true rewards: #0: 11.920 +[2024-08-06 08:01:42,661][00403] Avg episode reward: 24.420, avg true_objective: 11.920 +[2024-08-06 08:01:42,683][00403] Num frames 2400... +[2024-08-06 08:01:42,809][00403] Num frames 2500... +[2024-08-06 08:01:42,928][00403] Num frames 2600... +[2024-08-06 08:01:43,047][00403] Num frames 2700... +[2024-08-06 08:01:43,167][00403] Num frames 2800... +[2024-08-06 08:01:43,300][00403] Num frames 2900... +[2024-08-06 08:01:43,435][00403] Num frames 3000... +[2024-08-06 08:01:43,555][00403] Num frames 3100... +[2024-08-06 08:01:43,674][00403] Num frames 3200... +[2024-08-06 08:01:43,791][00403] Num frames 3300... +[2024-08-06 08:01:43,910][00403] Num frames 3400... +[2024-08-06 08:01:44,035][00403] Num frames 3500... +[2024-08-06 08:01:44,194][00403] Num frames 3600... +[2024-08-06 08:01:44,369][00403] Num frames 3700... +[2024-08-06 08:01:44,531][00403] Num frames 3800... +[2024-08-06 08:01:44,706][00403] Num frames 3900... +[2024-08-06 08:01:44,873][00403] Num frames 4000... +[2024-08-06 08:01:45,039][00403] Num frames 4100... +[2024-08-06 08:01:45,203][00403] Num frames 4200... +[2024-08-06 08:01:45,387][00403] Num frames 4300... +[2024-08-06 08:01:45,564][00403] Num frames 4400... +[2024-08-06 08:01:45,771][00403] Avg episode rewards: #0: 36.613, true rewards: #0: 14.947 +[2024-08-06 08:01:45,773][00403] Avg episode reward: 36.613, avg true_objective: 14.947 +[2024-08-06 08:01:45,805][00403] Num frames 4500... +[2024-08-06 08:01:45,977][00403] Num frames 4600... +[2024-08-06 08:01:46,149][00403] Num frames 4700... +[2024-08-06 08:01:46,333][00403] Num frames 4800... +[2024-08-06 08:01:46,515][00403] Num frames 4900... +[2024-08-06 08:01:46,653][00403] Num frames 5000... +[2024-08-06 08:01:46,776][00403] Num frames 5100... +[2024-08-06 08:01:46,894][00403] Num frames 5200... +[2024-08-06 08:01:47,018][00403] Num frames 5300... +[2024-08-06 08:01:47,142][00403] Num frames 5400... +[2024-08-06 08:01:47,213][00403] Avg episode rewards: #0: 32.030, true rewards: #0: 13.530 +[2024-08-06 08:01:47,214][00403] Avg episode reward: 32.030, avg true_objective: 13.530 +[2024-08-06 08:01:47,383][00403] Num frames 5500... +[2024-08-06 08:01:47,590][00403] Num frames 5600... +[2024-08-06 08:01:47,713][00403] Num frames 5700... +[2024-08-06 08:01:47,835][00403] Num frames 5800... +[2024-08-06 08:01:47,954][00403] Num frames 5900... +[2024-08-06 08:01:48,078][00403] Num frames 6000... +[2024-08-06 08:01:48,322][00403] Num frames 6100... +[2024-08-06 08:01:48,439][00403] Num frames 6200... +[2024-08-06 08:01:48,567][00403] Num frames 6300... +[2024-08-06 08:01:48,690][00403] Num frames 6400... +[2024-08-06 08:01:48,810][00403] Num frames 6500... +[2024-08-06 08:01:48,927][00403] Num frames 6600... +[2024-08-06 08:01:49,053][00403] Num frames 6700... +[2024-08-06 08:01:49,173][00403] Num frames 6800... +[2024-08-06 08:01:49,255][00403] Avg episode rewards: #0: 32.644, true rewards: #0: 13.644 +[2024-08-06 08:01:49,258][00403] Avg episode reward: 32.644, avg true_objective: 13.644 +[2024-08-06 08:01:49,361][00403] Num frames 6900... +[2024-08-06 08:01:49,482][00403] Num frames 7000... +[2024-08-06 08:01:49,612][00403] Num frames 7100... +[2024-08-06 08:01:49,733][00403] Num frames 7200... +[2024-08-06 08:01:49,850][00403] Num frames 7300... +[2024-08-06 08:01:49,967][00403] Num frames 7400... +[2024-08-06 08:01:50,084][00403] Num frames 7500... +[2024-08-06 08:01:50,203][00403] Num frames 7600... +[2024-08-06 08:01:50,335][00403] Num frames 7700... +[2024-08-06 08:01:50,413][00403] Avg episode rewards: #0: 29.863, true rewards: #0: 12.863 +[2024-08-06 08:01:50,415][00403] Avg episode reward: 29.863, avg true_objective: 12.863 +[2024-08-06 08:01:50,513][00403] Num frames 7800... +[2024-08-06 08:01:50,646][00403] Num frames 7900... +[2024-08-06 08:01:50,764][00403] Num frames 8000... +[2024-08-06 08:01:50,887][00403] Num frames 8100... +[2024-08-06 08:01:50,946][00403] Avg episode rewards: #0: 26.288, true rewards: #0: 11.574 +[2024-08-06 08:01:50,947][00403] Avg episode reward: 26.288, avg true_objective: 11.574 +[2024-08-06 08:01:51,065][00403] Num frames 8200... +[2024-08-06 08:01:51,185][00403] Num frames 8300... +[2024-08-06 08:01:51,312][00403] Num frames 8400... +[2024-08-06 08:01:51,437][00403] Num frames 8500... +[2024-08-06 08:01:51,571][00403] Num frames 8600... +[2024-08-06 08:01:51,694][00403] Num frames 8700... +[2024-08-06 08:01:51,815][00403] Num frames 8800... +[2024-08-06 08:01:51,937][00403] Num frames 8900... +[2024-08-06 08:01:52,062][00403] Num frames 9000... +[2024-08-06 08:01:52,185][00403] Num frames 9100... +[2024-08-06 08:01:52,311][00403] Num frames 9200... +[2024-08-06 08:01:52,432][00403] Num frames 9300... +[2024-08-06 08:01:52,555][00403] Num frames 9400... +[2024-08-06 08:01:52,688][00403] Num frames 9500... +[2024-08-06 08:01:52,812][00403] Num frames 9600... +[2024-08-06 08:01:52,936][00403] Num frames 9700... +[2024-08-06 08:01:53,080][00403] Avg episode rewards: #0: 28.338, true rewards: #0: 12.214 +[2024-08-06 08:01:53,082][00403] Avg episode reward: 28.338, avg true_objective: 12.214 +[2024-08-06 08:01:53,119][00403] Num frames 9800... +[2024-08-06 08:01:53,244][00403] Num frames 9900... +[2024-08-06 08:01:53,374][00403] Num frames 10000... +[2024-08-06 08:01:53,494][00403] Num frames 10100... +[2024-08-06 08:01:53,625][00403] Num frames 10200... +[2024-08-06 08:01:53,747][00403] Num frames 10300... +[2024-08-06 08:01:53,867][00403] Num frames 10400... +[2024-08-06 08:01:53,990][00403] Num frames 10500... +[2024-08-06 08:01:54,123][00403] Num frames 10600... +[2024-08-06 08:01:54,246][00403] Num frames 10700... +[2024-08-06 08:01:54,375][00403] Num frames 10800... +[2024-08-06 08:01:54,502][00403] Num frames 10900... +[2024-08-06 08:01:54,629][00403] Num frames 11000... +[2024-08-06 08:01:54,754][00403] Num frames 11100... +[2024-08-06 08:01:54,876][00403] Num frames 11200... +[2024-08-06 08:01:54,998][00403] Num frames 11300... +[2024-08-06 08:01:55,122][00403] Num frames 11400... +[2024-08-06 08:01:55,245][00403] Num frames 11500... +[2024-08-06 08:01:55,374][00403] Num frames 11600... +[2024-08-06 08:01:55,491][00403] Num frames 11700... +[2024-08-06 08:01:55,608][00403] Num frames 11800... +[2024-08-06 08:01:55,758][00403] Avg episode rewards: #0: 31.412, true rewards: #0: 13.190 +[2024-08-06 08:01:55,760][00403] Avg episode reward: 31.412, avg true_objective: 13.190 +[2024-08-06 08:01:55,797][00403] Num frames 11900... +[2024-08-06 08:01:55,915][00403] Num frames 12000... +[2024-08-06 08:01:56,035][00403] Num frames 12100... +[2024-08-06 08:01:56,158][00403] Num frames 12200... +[2024-08-06 08:01:56,279][00403] Num frames 12300... +[2024-08-06 08:01:56,407][00403] Num frames 12400... +[2024-08-06 08:01:56,534][00403] Num frames 12500... +[2024-08-06 08:01:56,738][00403] Num frames 12600... +[2024-08-06 08:01:56,906][00403] Num frames 12700... +[2024-08-06 08:01:57,079][00403] Num frames 12800... +[2024-08-06 08:01:57,239][00403] Num frames 12900... +[2024-08-06 08:01:57,405][00403] Num frames 13000... +[2024-08-06 08:01:57,568][00403] Num frames 13100... +[2024-08-06 08:01:57,714][00403] Avg episode rewards: #0: 31.551, true rewards: #0: 13.151 +[2024-08-06 08:01:57,717][00403] Avg episode reward: 31.551, avg true_objective: 13.151 +[2024-08-06 08:03:11,303][00403] Replay video saved to /content/train_dir/default_experiment/replay.mp4!