jerryvc commited on
Commit
33eb506
1 Parent(s): ba2cef8

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
.summary/0/events.out.tfevents.1729444491.90806ce10b21 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e316ff30c6e83fb553913b8413230fd63efbf326f96376452bc6c7ff55487a2
3
+ size 915741
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sample-factory
3
+ tags:
4
+ - deep-reinforcement-learning
5
+ - reinforcement-learning
6
+ - sample-factory
7
+ model-index:
8
+ - name: APPO
9
+ results:
10
+ - task:
11
+ type: reinforcement-learning
12
+ name: reinforcement-learning
13
+ dataset:
14
+ name: doom_health_gathering_supreme
15
+ type: doom_health_gathering_supreme
16
+ metrics:
17
+ - type: mean_reward
18
+ value: 4.48 +/- 0.66
19
+ name: mean_reward
20
+ verified: false
21
+ ---
22
+
23
+ A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
24
+
25
+ This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
26
+ Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
27
+
28
+
29
+ ## Downloading the model
30
+
31
+ After installing Sample-Factory, download the model with:
32
+ ```
33
+ python -m sample_factory.huggingface.load_from_hub -r jerryvc/rl_course_vizdoom_health_gathering_supreme
34
+ ```
35
+
36
+
37
+ ## Using the model
38
+
39
+ To run the model after download, use the `enjoy` script corresponding to this environment:
40
+ ```
41
+ python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
42
+ ```
43
+
44
+
45
+ You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
46
+ See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
47
+
48
+ ## Training with this model
49
+
50
+ To continue training with this model, use the `train` script corresponding to this environment:
51
+ ```
52
+ python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
53
+ ```
54
+
55
+ Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
56
+
checkpoint_p0/best_000000885_3624960_reward_5.461.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dbaba111d7271188901b91ca8061ed95d22cd78133bdb6b15f8d58aa0e2e20c
3
+ size 34929051
checkpoint_p0/checkpoint_000000955_3911680.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcdeac66b49f75be99ed85b5c669b82557c4df5cf56e247cbfae93b8847d8ad
3
+ size 34929541
checkpoint_p0/checkpoint_000000978_4005888.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ed53f888ced599dd929a852a22f480d0ae3bc3b27211de662a5f8675dc7a21d
3
+ size 34929541
config.json ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_health_gathering_supreme",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "/content/train_dir",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "lr_adaptive_min": 1e-06,
50
+ "lr_adaptive_max": 0.01,
51
+ "obs_subtract_mean": 0.0,
52
+ "obs_scale": 255.0,
53
+ "normalize_input": true,
54
+ "normalize_input_keys": null,
55
+ "decorrelate_experience_max_seconds": 0,
56
+ "decorrelate_envs_on_one_worker": true,
57
+ "actor_worker_gpus": [],
58
+ "set_workers_cpu_affinity": true,
59
+ "force_envs_single_thread": false,
60
+ "default_niceness": 0,
61
+ "log_to_file": true,
62
+ "experiment_summaries_interval": 10,
63
+ "flush_summaries_interval": 30,
64
+ "stats_avg": 100,
65
+ "summaries_use_frameskip": true,
66
+ "heartbeat_interval": 20,
67
+ "heartbeat_reporting_interval": 600,
68
+ "train_for_env_steps": 4000000,
69
+ "train_for_seconds": 10000000000,
70
+ "save_every_sec": 120,
71
+ "keep_checkpoints": 2,
72
+ "load_checkpoint_kind": "latest",
73
+ "save_milestones_sec": -1,
74
+ "save_best_every_sec": 5,
75
+ "save_best_metric": "reward",
76
+ "save_best_after": 100000,
77
+ "benchmark": false,
78
+ "encoder_mlp_layers": [
79
+ 512,
80
+ 512
81
+ ],
82
+ "encoder_conv_architecture": "convnet_simple",
83
+ "encoder_conv_mlp_layers": [
84
+ 512
85
+ ],
86
+ "use_rnn": true,
87
+ "rnn_size": 512,
88
+ "rnn_type": "gru",
89
+ "rnn_num_layers": 1,
90
+ "decoder_mlp_layers": [],
91
+ "nonlinearity": "elu",
92
+ "policy_initialization": "orthogonal",
93
+ "policy_init_gain": 1.0,
94
+ "actor_critic_share_weights": true,
95
+ "adaptive_stddev": true,
96
+ "continuous_tanh_scale": 0.0,
97
+ "initial_stddev": 1.0,
98
+ "use_env_info_cache": false,
99
+ "env_gpu_actions": false,
100
+ "env_gpu_observations": true,
101
+ "env_frameskip": 4,
102
+ "env_framestack": 1,
103
+ "pixel_format": "CHW",
104
+ "use_record_episode_statistics": false,
105
+ "with_wandb": false,
106
+ "wandb_user": null,
107
+ "wandb_project": "sample_factory",
108
+ "wandb_group": null,
109
+ "wandb_job_type": "SF",
110
+ "wandb_tags": [],
111
+ "with_pbt": false,
112
+ "pbt_mix_policies_in_one_env": true,
113
+ "pbt_period_env_steps": 5000000,
114
+ "pbt_start_mutation": 20000000,
115
+ "pbt_replace_fraction": 0.3,
116
+ "pbt_mutation_rate": 0.15,
117
+ "pbt_replace_reward_gap": 0.1,
118
+ "pbt_replace_reward_gap_absolute": 1e-06,
119
+ "pbt_optimize_gamma": false,
120
+ "pbt_target_objective": "true_objective",
121
+ "pbt_perturb_min": 1.1,
122
+ "pbt_perturb_max": 1.5,
123
+ "num_agents": -1,
124
+ "num_humans": 0,
125
+ "num_bots": -1,
126
+ "start_bot_difficulty": null,
127
+ "timelimit": null,
128
+ "res_w": 128,
129
+ "res_h": 72,
130
+ "wide_aspect_ratio": false,
131
+ "eval_env_frameskip": 1,
132
+ "fps": 35,
133
+ "command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
134
+ "cli_args": {
135
+ "env": "doom_health_gathering_supreme",
136
+ "num_workers": 8,
137
+ "num_envs_per_worker": 4,
138
+ "train_for_env_steps": 4000000
139
+ },
140
+ "git_hash": "unknown",
141
+ "git_repo_name": "not a git repository"
142
+ }
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24c9b0c85b1482d69ed5a7e956d27b7d01b7dd7d68cde3d10baa413ed582ddb1
3
+ size 5707871
sf_log.txt ADDED
@@ -0,0 +1,959 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-10-20 17:15:03,060][00556] Saving configuration to /content/train_dir/default_experiment/config.json...
2
+ [2024-10-20 17:15:03,062][00556] Rollout worker 0 uses device cpu
3
+ [2024-10-20 17:15:03,064][00556] Rollout worker 1 uses device cpu
4
+ [2024-10-20 17:15:03,065][00556] Rollout worker 2 uses device cpu
5
+ [2024-10-20 17:15:03,066][00556] Rollout worker 3 uses device cpu
6
+ [2024-10-20 17:15:03,068][00556] Rollout worker 4 uses device cpu
7
+ [2024-10-20 17:15:03,069][00556] Rollout worker 5 uses device cpu
8
+ [2024-10-20 17:15:03,070][00556] Rollout worker 6 uses device cpu
9
+ [2024-10-20 17:15:03,071][00556] Rollout worker 7 uses device cpu
10
+ [2024-10-20 17:15:03,247][00556] Using GPUs [0] for process 0 (actually maps to GPUs [0])
11
+ [2024-10-20 17:15:03,249][00556] InferenceWorker_p0-w0: min num requests: 2
12
+ [2024-10-20 17:15:03,282][00556] Starting all processes...
13
+ [2024-10-20 17:15:03,283][00556] Starting process learner_proc0
14
+ [2024-10-20 17:15:05,374][00556] Starting all processes...
15
+ [2024-10-20 17:15:05,383][00556] Starting process inference_proc0-0
16
+ [2024-10-20 17:15:05,384][00556] Starting process rollout_proc0
17
+ [2024-10-20 17:15:05,385][00556] Starting process rollout_proc1
18
+ [2024-10-20 17:15:05,385][00556] Starting process rollout_proc2
19
+ [2024-10-20 17:15:05,385][00556] Starting process rollout_proc3
20
+ [2024-10-20 17:15:05,386][00556] Starting process rollout_proc4
21
+ [2024-10-20 17:15:05,386][00556] Starting process rollout_proc5
22
+ [2024-10-20 17:15:05,389][00556] Starting process rollout_proc6
23
+ [2024-10-20 17:15:05,389][00556] Starting process rollout_proc7
24
+ [2024-10-20 17:15:20,894][02541] Using GPUs [0] for process 0 (actually maps to GPUs [0])
25
+ [2024-10-20 17:15:20,894][02541] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
26
+ [2024-10-20 17:15:20,919][02558] Worker 2 uses CPU cores [0]
27
+ [2024-10-20 17:15:20,964][02541] Num visible devices: 1
28
+ [2024-10-20 17:15:21,000][02541] Starting seed is not provided
29
+ [2024-10-20 17:15:21,001][02541] Using GPUs [0] for process 0 (actually maps to GPUs [0])
30
+ [2024-10-20 17:15:21,002][02541] Initializing actor-critic model on device cuda:0
31
+ [2024-10-20 17:15:21,003][02541] RunningMeanStd input shape: (3, 72, 128)
32
+ [2024-10-20 17:15:21,005][02541] RunningMeanStd input shape: (1,)
33
+ [2024-10-20 17:15:21,074][02541] ConvEncoder: input_channels=3
34
+ [2024-10-20 17:15:21,102][02556] Worker 1 uses CPU cores [1]
35
+ [2024-10-20 17:15:21,193][02557] Worker 3 uses CPU cores [1]
36
+ [2024-10-20 17:15:21,201][02555] Using GPUs [0] for process 0 (actually maps to GPUs [0])
37
+ [2024-10-20 17:15:21,202][02555] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
38
+ [2024-10-20 17:15:21,235][02559] Worker 6 uses CPU cores [0]
39
+ [2024-10-20 17:15:21,266][02555] Num visible devices: 1
40
+ [2024-10-20 17:15:21,274][02561] Worker 5 uses CPU cores [1]
41
+ [2024-10-20 17:15:21,300][02562] Worker 7 uses CPU cores [1]
42
+ [2024-10-20 17:15:21,325][02554] Worker 0 uses CPU cores [0]
43
+ [2024-10-20 17:15:21,397][02560] Worker 4 uses CPU cores [0]
44
+ [2024-10-20 17:15:21,467][02541] Conv encoder output size: 512
45
+ [2024-10-20 17:15:21,467][02541] Policy head output size: 512
46
+ [2024-10-20 17:15:21,526][02541] Created Actor Critic model with architecture:
47
+ [2024-10-20 17:15:21,527][02541] ActorCriticSharedWeights(
48
+ (obs_normalizer): ObservationNormalizer(
49
+ (running_mean_std): RunningMeanStdDictInPlace(
50
+ (running_mean_std): ModuleDict(
51
+ (obs): RunningMeanStdInPlace()
52
+ )
53
+ )
54
+ )
55
+ (returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
56
+ (encoder): VizdoomEncoder(
57
+ (basic_encoder): ConvEncoder(
58
+ (enc): RecursiveScriptModule(
59
+ original_name=ConvEncoderImpl
60
+ (conv_head): RecursiveScriptModule(
61
+ original_name=Sequential
62
+ (0): RecursiveScriptModule(original_name=Conv2d)
63
+ (1): RecursiveScriptModule(original_name=ELU)
64
+ (2): RecursiveScriptModule(original_name=Conv2d)
65
+ (3): RecursiveScriptModule(original_name=ELU)
66
+ (4): RecursiveScriptModule(original_name=Conv2d)
67
+ (5): RecursiveScriptModule(original_name=ELU)
68
+ )
69
+ (mlp_layers): RecursiveScriptModule(
70
+ original_name=Sequential
71
+ (0): RecursiveScriptModule(original_name=Linear)
72
+ (1): RecursiveScriptModule(original_name=ELU)
73
+ )
74
+ )
75
+ )
76
+ )
77
+ (core): ModelCoreRNN(
78
+ (core): GRU(512, 512)
79
+ )
80
+ (decoder): MlpDecoder(
81
+ (mlp): Identity()
82
+ )
83
+ (critic_linear): Linear(in_features=512, out_features=1, bias=True)
84
+ (action_parameterization): ActionParameterizationDefault(
85
+ (distribution_linear): Linear(in_features=512, out_features=5, bias=True)
86
+ )
87
+ )
88
+ [2024-10-20 17:15:21,822][02541] Using optimizer <class 'torch.optim.adam.Adam'>
89
+ [2024-10-20 17:15:22,888][02541] No checkpoints found
90
+ [2024-10-20 17:15:22,889][02541] Did not load from checkpoint, starting from scratch!
91
+ [2024-10-20 17:15:22,889][02541] Initialized policy 0 weights for model version 0
92
+ [2024-10-20 17:15:22,893][02541] LearnerWorker_p0 finished initialization!
93
+ [2024-10-20 17:15:22,894][02541] Using GPUs [0] for process 0 (actually maps to GPUs [0])
94
+ [2024-10-20 17:15:22,986][02555] RunningMeanStd input shape: (3, 72, 128)
95
+ [2024-10-20 17:15:22,987][02555] RunningMeanStd input shape: (1,)
96
+ [2024-10-20 17:15:22,999][02555] ConvEncoder: input_channels=3
97
+ [2024-10-20 17:15:23,101][02555] Conv encoder output size: 512
98
+ [2024-10-20 17:15:23,101][02555] Policy head output size: 512
99
+ [2024-10-20 17:15:23,153][00556] Inference worker 0-0 is ready!
100
+ [2024-10-20 17:15:23,155][00556] All inference workers are ready! Signal rollout workers to start!
101
+ [2024-10-20 17:15:23,239][00556] Heartbeat connected on Batcher_0
102
+ [2024-10-20 17:15:23,242][00556] Heartbeat connected on LearnerWorker_p0
103
+ [2024-10-20 17:15:23,284][00556] Heartbeat connected on InferenceWorker_p0-w0
104
+ [2024-10-20 17:15:23,361][02557] Doom resolution: 160x120, resize resolution: (128, 72)
105
+ [2024-10-20 17:15:23,360][02556] Doom resolution: 160x120, resize resolution: (128, 72)
106
+ [2024-10-20 17:15:23,359][02562] Doom resolution: 160x120, resize resolution: (128, 72)
107
+ [2024-10-20 17:15:23,357][02561] Doom resolution: 160x120, resize resolution: (128, 72)
108
+ [2024-10-20 17:15:23,392][02554] Doom resolution: 160x120, resize resolution: (128, 72)
109
+ [2024-10-20 17:15:23,396][02560] Doom resolution: 160x120, resize resolution: (128, 72)
110
+ [2024-10-20 17:15:23,394][02559] Doom resolution: 160x120, resize resolution: (128, 72)
111
+ [2024-10-20 17:15:23,404][02558] Doom resolution: 160x120, resize resolution: (128, 72)
112
+ [2024-10-20 17:15:24,535][02557] Decorrelating experience for 0 frames...
113
+ [2024-10-20 17:15:24,536][02562] Decorrelating experience for 0 frames...
114
+ [2024-10-20 17:15:25,211][02554] Decorrelating experience for 0 frames...
115
+ [2024-10-20 17:15:25,213][02560] Decorrelating experience for 0 frames...
116
+ [2024-10-20 17:15:25,214][02558] Decorrelating experience for 0 frames...
117
+ [2024-10-20 17:15:25,210][02559] Decorrelating experience for 0 frames...
118
+ [2024-10-20 17:15:25,600][02562] Decorrelating experience for 32 frames...
119
+ [2024-10-20 17:15:26,500][02554] Decorrelating experience for 32 frames...
120
+ [2024-10-20 17:15:26,503][02559] Decorrelating experience for 32 frames...
121
+ [2024-10-20 17:15:26,505][02558] Decorrelating experience for 32 frames...
122
+ [2024-10-20 17:15:26,994][00556] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
123
+ [2024-10-20 17:15:27,432][02556] Decorrelating experience for 0 frames...
124
+ [2024-10-20 17:15:27,788][02562] Decorrelating experience for 64 frames...
125
+ [2024-10-20 17:15:28,057][02561] Decorrelating experience for 0 frames...
126
+ [2024-10-20 17:15:28,204][02560] Decorrelating experience for 32 frames...
127
+ [2024-10-20 17:15:28,653][02559] Decorrelating experience for 64 frames...
128
+ [2024-10-20 17:15:28,729][02556] Decorrelating experience for 32 frames...
129
+ [2024-10-20 17:15:29,221][02562] Decorrelating experience for 96 frames...
130
+ [2024-10-20 17:15:29,436][00556] Heartbeat connected on RolloutWorker_w7
131
+ [2024-10-20 17:15:29,779][02558] Decorrelating experience for 64 frames...
132
+ [2024-10-20 17:15:30,096][02561] Decorrelating experience for 32 frames...
133
+ [2024-10-20 17:15:30,265][02560] Decorrelating experience for 64 frames...
134
+ [2024-10-20 17:15:30,607][02556] Decorrelating experience for 64 frames...
135
+ [2024-10-20 17:15:31,101][02554] Decorrelating experience for 64 frames...
136
+ [2024-10-20 17:15:31,208][02558] Decorrelating experience for 96 frames...
137
+ [2024-10-20 17:15:31,312][02557] Decorrelating experience for 32 frames...
138
+ [2024-10-20 17:15:31,466][00556] Heartbeat connected on RolloutWorker_w2
139
+ [2024-10-20 17:15:31,618][02561] Decorrelating experience for 64 frames...
140
+ [2024-10-20 17:15:31,994][00556] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 0.0. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
141
+ [2024-10-20 17:15:33,360][02560] Decorrelating experience for 96 frames...
142
+ [2024-10-20 17:15:33,798][02554] Decorrelating experience for 96 frames...
143
+ [2024-10-20 17:15:33,801][00556] Heartbeat connected on RolloutWorker_w4
144
+ [2024-10-20 17:15:34,196][02556] Decorrelating experience for 96 frames...
145
+ [2024-10-20 17:15:34,206][00556] Heartbeat connected on RolloutWorker_w0
146
+ [2024-10-20 17:15:34,243][02557] Decorrelating experience for 64 frames...
147
+ [2024-10-20 17:15:34,418][02561] Decorrelating experience for 96 frames...
148
+ [2024-10-20 17:15:34,548][00556] Heartbeat connected on RolloutWorker_w1
149
+ [2024-10-20 17:15:34,787][00556] Heartbeat connected on RolloutWorker_w5
150
+ [2024-10-20 17:15:36,541][02541] Signal inference workers to stop experience collection...
151
+ [2024-10-20 17:15:36,555][02555] InferenceWorker_p0-w0: stopping experience collection
152
+ [2024-10-20 17:15:36,596][02559] Decorrelating experience for 96 frames...
153
+ [2024-10-20 17:15:36,717][02557] Decorrelating experience for 96 frames...
154
+ [2024-10-20 17:15:36,772][00556] Heartbeat connected on RolloutWorker_w6
155
+ [2024-10-20 17:15:36,803][00556] Heartbeat connected on RolloutWorker_w3
156
+ [2024-10-20 17:15:36,994][00556] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 151.6. Samples: 1516. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
157
+ [2024-10-20 17:15:36,996][00556] Avg episode reward: [(0, '2.669')]
158
+ [2024-10-20 17:15:39,764][02541] Signal inference workers to resume experience collection...
159
+ [2024-10-20 17:15:39,765][02555] InferenceWorker_p0-w0: resuming experience collection
160
+ [2024-10-20 17:15:41,994][00556] Fps is (10 sec: 1228.8, 60 sec: 819.2, 300 sec: 819.2). Total num frames: 12288. Throughput: 0: 207.7. Samples: 3116. Policy #0 lag: (min: 0.0, avg: 1.2, max: 2.0)
161
+ [2024-10-20 17:15:42,000][00556] Avg episode reward: [(0, '2.906')]
162
+ [2024-10-20 17:15:46,994][00556] Fps is (10 sec: 2457.6, 60 sec: 1228.8, 300 sec: 1228.8). Total num frames: 24576. Throughput: 0: 262.0. Samples: 5240. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
163
+ [2024-10-20 17:15:46,997][00556] Avg episode reward: [(0, '3.593')]
164
+ [2024-10-20 17:15:49,960][02555] Updated weights for policy 0, policy_version 10 (0.0029)
165
+ [2024-10-20 17:15:51,994][00556] Fps is (10 sec: 3686.4, 60 sec: 1966.1, 300 sec: 1966.1). Total num frames: 49152. Throughput: 0: 441.9. Samples: 11048. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
166
+ [2024-10-20 17:15:51,999][00556] Avg episode reward: [(0, '4.364')]
167
+ [2024-10-20 17:15:56,998][00556] Fps is (10 sec: 4504.1, 60 sec: 2320.8, 300 sec: 2320.8). Total num frames: 69632. Throughput: 0: 584.8. Samples: 17546. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
168
+ [2024-10-20 17:15:57,000][00556] Avg episode reward: [(0, '4.628')]
169
+ [2024-10-20 17:16:01,994][00556] Fps is (10 sec: 2867.2, 60 sec: 2223.5, 300 sec: 2223.5). Total num frames: 77824. Throughput: 0: 532.2. Samples: 18626. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
170
+ [2024-10-20 17:16:01,997][00556] Avg episode reward: [(0, '4.649')]
171
+ [2024-10-20 17:16:02,757][02555] Updated weights for policy 0, policy_version 20 (0.0040)
172
+ [2024-10-20 17:16:06,994][00556] Fps is (10 sec: 2868.2, 60 sec: 2457.6, 300 sec: 2457.6). Total num frames: 98304. Throughput: 0: 577.2. Samples: 23088. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
173
+ [2024-10-20 17:16:06,997][00556] Avg episode reward: [(0, '4.491')]
174
+ [2024-10-20 17:16:11,996][00556] Fps is (10 sec: 3685.7, 60 sec: 2548.5, 300 sec: 2548.5). Total num frames: 114688. Throughput: 0: 648.3. Samples: 29176. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
175
+ [2024-10-20 17:16:12,001][00556] Avg episode reward: [(0, '4.403')]
176
+ [2024-10-20 17:16:12,062][02541] Saving new best policy, reward=4.403!
177
+ [2024-10-20 17:16:13,033][02555] Updated weights for policy 0, policy_version 30 (0.0020)
178
+ [2024-10-20 17:16:16,994][00556] Fps is (10 sec: 3276.8, 60 sec: 2621.4, 300 sec: 2621.4). Total num frames: 131072. Throughput: 0: 716.0. Samples: 32220. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
179
+ [2024-10-20 17:16:16,998][00556] Avg episode reward: [(0, '4.388')]
180
+ [2024-10-20 17:16:21,994][00556] Fps is (10 sec: 3277.4, 60 sec: 2681.0, 300 sec: 2681.0). Total num frames: 147456. Throughput: 0: 774.0. Samples: 36344. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
181
+ [2024-10-20 17:16:21,997][00556] Avg episode reward: [(0, '4.313')]
182
+ [2024-10-20 17:16:24,824][02555] Updated weights for policy 0, policy_version 40 (0.0020)
183
+ [2024-10-20 17:16:26,994][00556] Fps is (10 sec: 4096.0, 60 sec: 2867.2, 300 sec: 2867.2). Total num frames: 172032. Throughput: 0: 883.7. Samples: 42882. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
184
+ [2024-10-20 17:16:26,996][00556] Avg episode reward: [(0, '4.438')]
185
+ [2024-10-20 17:16:27,005][02541] Saving new best policy, reward=4.438!
186
+ [2024-10-20 17:16:31,996][00556] Fps is (10 sec: 4504.9, 60 sec: 3208.4, 300 sec: 2961.6). Total num frames: 192512. Throughput: 0: 907.9. Samples: 46098. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
187
+ [2024-10-20 17:16:31,998][00556] Avg episode reward: [(0, '4.485')]
188
+ [2024-10-20 17:16:32,008][02541] Saving new best policy, reward=4.485!
189
+ [2024-10-20 17:16:36,603][02555] Updated weights for policy 0, policy_version 50 (0.0025)
190
+ [2024-10-20 17:16:36,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3413.3, 300 sec: 2925.7). Total num frames: 204800. Throughput: 0: 879.0. Samples: 50602. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
191
+ [2024-10-20 17:16:37,001][00556] Avg episode reward: [(0, '4.468')]
192
+ [2024-10-20 17:16:41,994][00556] Fps is (10 sec: 3277.3, 60 sec: 3549.9, 300 sec: 3003.7). Total num frames: 225280. Throughput: 0: 867.1. Samples: 56562. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
193
+ [2024-10-20 17:16:41,996][00556] Avg episode reward: [(0, '4.318')]
194
+ [2024-10-20 17:16:45,872][02555] Updated weights for policy 0, policy_version 60 (0.0035)
195
+ [2024-10-20 17:16:46,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3754.7, 300 sec: 3123.2). Total num frames: 249856. Throughput: 0: 920.7. Samples: 60056. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
196
+ [2024-10-20 17:16:47,000][00556] Avg episode reward: [(0, '4.379')]
197
+ [2024-10-20 17:16:51,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3549.9, 300 sec: 3084.0). Total num frames: 262144. Throughput: 0: 948.0. Samples: 65748. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
198
+ [2024-10-20 17:16:52,002][00556] Avg episode reward: [(0, '4.477')]
199
+ [2024-10-20 17:16:52,014][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000064_262144.pth...
200
+ [2024-10-20 17:16:56,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3550.1, 300 sec: 3140.3). Total num frames: 282624. Throughput: 0: 923.1. Samples: 70712. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
201
+ [2024-10-20 17:16:57,004][00556] Avg episode reward: [(0, '4.471')]
202
+ [2024-10-20 17:16:57,509][02555] Updated weights for policy 0, policy_version 70 (0.0026)
203
+ [2024-10-20 17:17:01,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3822.9, 300 sec: 3233.7). Total num frames: 307200. Throughput: 0: 930.7. Samples: 74102. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
204
+ [2024-10-20 17:17:01,997][00556] Avg episode reward: [(0, '4.424')]
205
+ [2024-10-20 17:17:06,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3235.8). Total num frames: 323584. Throughput: 0: 984.7. Samples: 80654. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
206
+ [2024-10-20 17:17:07,000][00556] Avg episode reward: [(0, '4.367')]
207
+ [2024-10-20 17:17:07,842][02555] Updated weights for policy 0, policy_version 80 (0.0030)
208
+ [2024-10-20 17:17:11,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.8, 300 sec: 3237.8). Total num frames: 339968. Throughput: 0: 931.3. Samples: 84792. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
209
+ [2024-10-20 17:17:12,000][00556] Avg episode reward: [(0, '4.414')]
210
+ [2024-10-20 17:17:16,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3276.8). Total num frames: 360448. Throughput: 0: 936.8. Samples: 88252. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
211
+ [2024-10-20 17:17:16,998][00556] Avg episode reward: [(0, '4.510')]
212
+ [2024-10-20 17:17:17,001][02541] Saving new best policy, reward=4.510!
213
+ [2024-10-20 17:17:18,037][02555] Updated weights for policy 0, policy_version 90 (0.0034)
214
+ [2024-10-20 17:17:22,000][00556] Fps is (10 sec: 4503.1, 60 sec: 3959.1, 300 sec: 3347.9). Total num frames: 385024. Throughput: 0: 992.4. Samples: 95266. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
215
+ [2024-10-20 17:17:22,008][00556] Avg episode reward: [(0, '4.607')]
216
+ [2024-10-20 17:17:22,017][02541] Saving new best policy, reward=4.607!
217
+ [2024-10-20 17:17:26,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3310.9). Total num frames: 397312. Throughput: 0: 966.3. Samples: 100044. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
218
+ [2024-10-20 17:17:26,996][00556] Avg episode reward: [(0, '4.601')]
219
+ [2024-10-20 17:17:29,405][02555] Updated weights for policy 0, policy_version 100 (0.0035)
220
+ [2024-10-20 17:17:31,994][00556] Fps is (10 sec: 3278.6, 60 sec: 3754.8, 300 sec: 3342.3). Total num frames: 417792. Throughput: 0: 945.3. Samples: 102596. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
221
+ [2024-10-20 17:17:32,001][00556] Avg episode reward: [(0, '4.566')]
222
+ [2024-10-20 17:17:36,996][00556] Fps is (10 sec: 4504.9, 60 sec: 3959.4, 300 sec: 3402.8). Total num frames: 442368. Throughput: 0: 973.0. Samples: 109534. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
223
+ [2024-10-20 17:17:37,002][00556] Avg episode reward: [(0, '4.592')]
224
+ [2024-10-20 17:17:38,479][02555] Updated weights for policy 0, policy_version 110 (0.0022)
225
+ [2024-10-20 17:17:41,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3398.2). Total num frames: 458752. Throughput: 0: 985.1. Samples: 115042. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
226
+ [2024-10-20 17:17:41,998][00556] Avg episode reward: [(0, '4.664')]
227
+ [2024-10-20 17:17:42,006][02541] Saving new best policy, reward=4.664!
228
+ [2024-10-20 17:17:46,994][00556] Fps is (10 sec: 3277.3, 60 sec: 3754.7, 300 sec: 3393.8). Total num frames: 475136. Throughput: 0: 954.7. Samples: 117062. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
229
+ [2024-10-20 17:17:47,000][00556] Avg episode reward: [(0, '4.536')]
230
+ [2024-10-20 17:17:49,896][02555] Updated weights for policy 0, policy_version 120 (0.0031)
231
+ [2024-10-20 17:17:51,995][00556] Fps is (10 sec: 4095.8, 60 sec: 3959.4, 300 sec: 3446.3). Total num frames: 499712. Throughput: 0: 956.2. Samples: 123684. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
232
+ [2024-10-20 17:17:51,997][00556] Avg episode reward: [(0, '4.404')]
233
+ [2024-10-20 17:17:56,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3467.9). Total num frames: 520192. Throughput: 0: 1007.4. Samples: 130126. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
234
+ [2024-10-20 17:17:57,000][00556] Avg episode reward: [(0, '4.698')]
235
+ [2024-10-20 17:17:57,005][02541] Saving new best policy, reward=4.698!
236
+ [2024-10-20 17:18:01,623][02555] Updated weights for policy 0, policy_version 130 (0.0025)
237
+ [2024-10-20 17:18:01,994][00556] Fps is (10 sec: 3276.9, 60 sec: 3754.7, 300 sec: 3435.4). Total num frames: 532480. Throughput: 0: 973.4. Samples: 132056. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
238
+ [2024-10-20 17:18:02,001][00556] Avg episode reward: [(0, '4.713')]
239
+ [2024-10-20 17:18:02,013][02541] Saving new best policy, reward=4.713!
240
+ [2024-10-20 17:18:06,994][00556] Fps is (10 sec: 3276.7, 60 sec: 3822.9, 300 sec: 3456.0). Total num frames: 552960. Throughput: 0: 941.6. Samples: 137634. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
241
+ [2024-10-20 17:18:07,000][00556] Avg episode reward: [(0, '4.475')]
242
+ [2024-10-20 17:18:10,782][02555] Updated weights for policy 0, policy_version 140 (0.0029)
243
+ [2024-10-20 17:18:11,994][00556] Fps is (10 sec: 4505.5, 60 sec: 3959.4, 300 sec: 3500.2). Total num frames: 577536. Throughput: 0: 985.5. Samples: 144394. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
244
+ [2024-10-20 17:18:12,000][00556] Avg episode reward: [(0, '4.464')]
245
+ [2024-10-20 17:18:16,994][00556] Fps is (10 sec: 3686.5, 60 sec: 3822.9, 300 sec: 3469.6). Total num frames: 589824. Throughput: 0: 982.7. Samples: 146816. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
246
+ [2024-10-20 17:18:17,000][00556] Avg episode reward: [(0, '4.402')]
247
+ [2024-10-20 17:18:21,994][00556] Fps is (10 sec: 3276.9, 60 sec: 3755.0, 300 sec: 3487.4). Total num frames: 610304. Throughput: 0: 935.7. Samples: 151638. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
248
+ [2024-10-20 17:18:21,997][00556] Avg episode reward: [(0, '4.324')]
249
+ [2024-10-20 17:18:22,334][02555] Updated weights for policy 0, policy_version 150 (0.0037)
250
+ [2024-10-20 17:18:26,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3527.1). Total num frames: 634880. Throughput: 0: 969.6. Samples: 158672. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
251
+ [2024-10-20 17:18:27,002][00556] Avg episode reward: [(0, '4.426')]
252
+ [2024-10-20 17:18:31,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3520.3). Total num frames: 651264. Throughput: 0: 995.8. Samples: 161874. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
253
+ [2024-10-20 17:18:31,998][00556] Avg episode reward: [(0, '4.579')]
254
+ [2024-10-20 17:18:33,044][02555] Updated weights for policy 0, policy_version 160 (0.0025)
255
+ [2024-10-20 17:18:36,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.8, 300 sec: 3513.9). Total num frames: 667648. Throughput: 0: 940.7. Samples: 166016. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
256
+ [2024-10-20 17:18:36,997][00556] Avg episode reward: [(0, '4.686')]
257
+ [2024-10-20 17:18:41,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3528.9). Total num frames: 688128. Throughput: 0: 942.2. Samples: 172524. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
258
+ [2024-10-20 17:18:42,000][00556] Avg episode reward: [(0, '4.555')]
259
+ [2024-10-20 17:18:43,120][02555] Updated weights for policy 0, policy_version 170 (0.0032)
260
+ [2024-10-20 17:18:46,997][00556] Fps is (10 sec: 4094.9, 60 sec: 3891.0, 300 sec: 3543.0). Total num frames: 708608. Throughput: 0: 975.8. Samples: 175968. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
261
+ [2024-10-20 17:18:47,003][00556] Avg episode reward: [(0, '4.542')]
262
+ [2024-10-20 17:18:51,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3536.5). Total num frames: 724992. Throughput: 0: 962.0. Samples: 180926. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
263
+ [2024-10-20 17:18:51,997][00556] Avg episode reward: [(0, '4.566')]
264
+ [2024-10-20 17:18:52,020][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000177_724992.pth...
265
+ [2024-10-20 17:18:54,722][02555] Updated weights for policy 0, policy_version 180 (0.0048)
266
+ [2024-10-20 17:18:56,994][00556] Fps is (10 sec: 3687.4, 60 sec: 3754.7, 300 sec: 3549.9). Total num frames: 745472. Throughput: 0: 938.0. Samples: 186602. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
267
+ [2024-10-20 17:18:57,001][00556] Avg episode reward: [(0, '4.457')]
268
+ [2024-10-20 17:19:01,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3581.6). Total num frames: 770048. Throughput: 0: 961.3. Samples: 190076. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
269
+ [2024-10-20 17:19:01,998][00556] Avg episode reward: [(0, '4.644')]
270
+ [2024-10-20 17:19:04,069][02555] Updated weights for policy 0, policy_version 190 (0.0013)
271
+ [2024-10-20 17:19:07,000][00556] Fps is (10 sec: 3684.4, 60 sec: 3822.6, 300 sec: 3556.0). Total num frames: 782336. Throughput: 0: 984.8. Samples: 195960. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
272
+ [2024-10-20 17:19:07,002][00556] Avg episode reward: [(0, '4.712')]
273
+ [2024-10-20 17:19:11,994][00556] Fps is (10 sec: 3276.7, 60 sec: 3754.7, 300 sec: 3568.1). Total num frames: 802816. Throughput: 0: 934.2. Samples: 200712. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
274
+ [2024-10-20 17:19:11,999][00556] Avg episode reward: [(0, '4.756')]
275
+ [2024-10-20 17:19:12,008][02541] Saving new best policy, reward=4.756!
276
+ [2024-10-20 17:19:15,359][02555] Updated weights for policy 0, policy_version 200 (0.0037)
277
+ [2024-10-20 17:19:16,994][00556] Fps is (10 sec: 4098.2, 60 sec: 3891.2, 300 sec: 3579.5). Total num frames: 823296. Throughput: 0: 938.7. Samples: 204116. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
278
+ [2024-10-20 17:19:17,000][00556] Avg episode reward: [(0, '4.541')]
279
+ [2024-10-20 17:19:21,997][00556] Fps is (10 sec: 4095.1, 60 sec: 3891.1, 300 sec: 3590.5). Total num frames: 843776. Throughput: 0: 994.2. Samples: 210756. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
280
+ [2024-10-20 17:19:21,999][00556] Avg episode reward: [(0, '4.340')]
281
+ [2024-10-20 17:19:26,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3566.9). Total num frames: 856064. Throughput: 0: 936.6. Samples: 214670. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
282
+ [2024-10-20 17:19:26,997][00556] Avg episode reward: [(0, '4.488')]
283
+ [2024-10-20 17:19:27,238][02555] Updated weights for policy 0, policy_version 210 (0.0017)
284
+ [2024-10-20 17:19:31,994][00556] Fps is (10 sec: 3687.3, 60 sec: 3822.9, 300 sec: 3594.4). Total num frames: 880640. Throughput: 0: 928.8. Samples: 217760. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
285
+ [2024-10-20 17:19:31,998][00556] Avg episode reward: [(0, '4.370')]
286
+ [2024-10-20 17:19:36,708][02555] Updated weights for policy 0, policy_version 220 (0.0030)
287
+ [2024-10-20 17:19:36,998][00556] Fps is (10 sec: 4504.1, 60 sec: 3891.0, 300 sec: 3604.4). Total num frames: 901120. Throughput: 0: 964.2. Samples: 224320. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
288
+ [2024-10-20 17:19:37,000][00556] Avg episode reward: [(0, '4.397')]
289
+ [2024-10-20 17:19:41,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3582.0). Total num frames: 913408. Throughput: 0: 939.1. Samples: 228862. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
290
+ [2024-10-20 17:19:41,997][00556] Avg episode reward: [(0, '4.374')]
291
+ [2024-10-20 17:19:46,994][00556] Fps is (10 sec: 3277.9, 60 sec: 3754.8, 300 sec: 3591.9). Total num frames: 933888. Throughput: 0: 910.8. Samples: 231062. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
292
+ [2024-10-20 17:19:47,001][00556] Avg episode reward: [(0, '4.654')]
293
+ [2024-10-20 17:19:48,771][02555] Updated weights for policy 0, policy_version 230 (0.0024)
294
+ [2024-10-20 17:19:51,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3601.4). Total num frames: 954368. Throughput: 0: 928.9. Samples: 237754. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
295
+ [2024-10-20 17:19:52,002][00556] Avg episode reward: [(0, '4.973')]
296
+ [2024-10-20 17:19:52,010][02541] Saving new best policy, reward=4.973!
297
+ [2024-10-20 17:19:56,995][00556] Fps is (10 sec: 3685.9, 60 sec: 3754.6, 300 sec: 3595.4). Total num frames: 970752. Throughput: 0: 947.9. Samples: 243368. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
298
+ [2024-10-20 17:19:57,000][00556] Avg episode reward: [(0, '4.925')]
299
+ [2024-10-20 17:20:00,486][02555] Updated weights for policy 0, policy_version 240 (0.0020)
300
+ [2024-10-20 17:20:01,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3618.1, 300 sec: 3589.6). Total num frames: 987136. Throughput: 0: 918.1. Samples: 245432. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
301
+ [2024-10-20 17:20:02,001][00556] Avg episode reward: [(0, '4.858')]
302
+ [2024-10-20 17:20:06,996][00556] Fps is (10 sec: 4095.9, 60 sec: 3823.2, 300 sec: 3613.2). Total num frames: 1011712. Throughput: 0: 906.3. Samples: 251538. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
303
+ [2024-10-20 17:20:06,998][00556] Avg episode reward: [(0, '4.596')]
304
+ [2024-10-20 17:20:09,878][02555] Updated weights for policy 0, policy_version 250 (0.0033)
305
+ [2024-10-20 17:20:11,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3754.7, 300 sec: 3607.4). Total num frames: 1028096. Throughput: 0: 962.8. Samples: 257998. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
306
+ [2024-10-20 17:20:12,001][00556] Avg episode reward: [(0, '4.561')]
307
+ [2024-10-20 17:20:16,994][00556] Fps is (10 sec: 3277.3, 60 sec: 3686.4, 300 sec: 3601.7). Total num frames: 1044480. Throughput: 0: 937.6. Samples: 259954. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
308
+ [2024-10-20 17:20:17,000][00556] Avg episode reward: [(0, '4.634')]
309
+ [2024-10-20 17:20:21,595][02555] Updated weights for policy 0, policy_version 260 (0.0020)
310
+ [2024-10-20 17:20:21,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3686.5, 300 sec: 3610.0). Total num frames: 1064960. Throughput: 0: 908.8. Samples: 265214. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
311
+ [2024-10-20 17:20:22,000][00556] Avg episode reward: [(0, '4.502')]
312
+ [2024-10-20 17:20:26,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3679.5). Total num frames: 1085440. Throughput: 0: 957.8. Samples: 271964. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
313
+ [2024-10-20 17:20:26,996][00556] Avg episode reward: [(0, '4.740')]
314
+ [2024-10-20 17:20:31,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3735.0). Total num frames: 1101824. Throughput: 0: 968.5. Samples: 274646. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
315
+ [2024-10-20 17:20:32,003][00556] Avg episode reward: [(0, '5.000')]
316
+ [2024-10-20 17:20:32,017][02541] Saving new best policy, reward=5.000!
317
+ [2024-10-20 17:20:32,964][02555] Updated weights for policy 0, policy_version 270 (0.0026)
318
+ [2024-10-20 17:20:36,999][00556] Fps is (10 sec: 3275.3, 60 sec: 3618.1, 300 sec: 3748.8). Total num frames: 1118208. Throughput: 0: 918.0. Samples: 279070. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
319
+ [2024-10-20 17:20:37,001][00556] Avg episode reward: [(0, '4.945')]
320
+ [2024-10-20 17:20:41,994][00556] Fps is (10 sec: 4095.9, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 1142784. Throughput: 0: 941.9. Samples: 285752. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
321
+ [2024-10-20 17:20:41,998][00556] Avg episode reward: [(0, '4.762')]
322
+ [2024-10-20 17:20:42,566][02555] Updated weights for policy 0, policy_version 280 (0.0024)
323
+ [2024-10-20 17:20:46,998][00556] Fps is (10 sec: 4505.7, 60 sec: 3822.7, 300 sec: 3776.6). Total num frames: 1163264. Throughput: 0: 970.7. Samples: 289116. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
324
+ [2024-10-20 17:20:47,002][00556] Avg episode reward: [(0, '4.688')]
325
+ [2024-10-20 17:20:51,994][00556] Fps is (10 sec: 3276.9, 60 sec: 3686.4, 300 sec: 3748.9). Total num frames: 1175552. Throughput: 0: 932.0. Samples: 293478. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
326
+ [2024-10-20 17:20:52,000][00556] Avg episode reward: [(0, '4.577')]
327
+ [2024-10-20 17:20:52,010][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000287_1175552.pth...
328
+ [2024-10-20 17:20:52,131][02541] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000064_262144.pth
329
+ [2024-10-20 17:20:54,261][02555] Updated weights for policy 0, policy_version 290 (0.0038)
330
+ [2024-10-20 17:20:56,994][00556] Fps is (10 sec: 3687.9, 60 sec: 3823.0, 300 sec: 3804.4). Total num frames: 1200128. Throughput: 0: 923.6. Samples: 299562. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
331
+ [2024-10-20 17:20:57,006][00556] Avg episode reward: [(0, '4.978')]
332
+ [2024-10-20 17:21:01,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 1220608. Throughput: 0: 955.8. Samples: 302966. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
333
+ [2024-10-20 17:21:02,002][00556] Avg episode reward: [(0, '4.826')]
334
+ [2024-10-20 17:21:04,910][02555] Updated weights for policy 0, policy_version 300 (0.0017)
335
+ [2024-10-20 17:21:06,994][00556] Fps is (10 sec: 3276.9, 60 sec: 3686.5, 300 sec: 3790.6). Total num frames: 1232896. Throughput: 0: 950.8. Samples: 308002. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
336
+ [2024-10-20 17:21:06,997][00556] Avg episode reward: [(0, '4.719')]
337
+ [2024-10-20 17:21:11,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3804.4). Total num frames: 1253376. Throughput: 0: 917.7. Samples: 313262. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
338
+ [2024-10-20 17:21:11,999][00556] Avg episode reward: [(0, '4.776')]
339
+ [2024-10-20 17:21:15,433][02555] Updated weights for policy 0, policy_version 310 (0.0019)
340
+ [2024-10-20 17:21:16,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3818.3). Total num frames: 1273856. Throughput: 0: 932.1. Samples: 316592. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
341
+ [2024-10-20 17:21:16,997][00556] Avg episode reward: [(0, '4.856')]
342
+ [2024-10-20 17:21:21,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3790.5). Total num frames: 1290240. Throughput: 0: 966.6. Samples: 322564. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
343
+ [2024-10-20 17:21:22,006][00556] Avg episode reward: [(0, '4.800')]
344
+ [2024-10-20 17:21:26,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3776.7). Total num frames: 1306624. Throughput: 0: 918.4. Samples: 327080. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
345
+ [2024-10-20 17:21:26,999][00556] Avg episode reward: [(0, '4.793')]
346
+ [2024-10-20 17:21:27,150][02555] Updated weights for policy 0, policy_version 320 (0.0027)
347
+ [2024-10-20 17:21:31,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3818.3). Total num frames: 1331200. Throughput: 0: 917.7. Samples: 330410. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
348
+ [2024-10-20 17:21:31,999][00556] Avg episode reward: [(0, '4.815')]
349
+ [2024-10-20 17:21:36,637][02555] Updated weights for policy 0, policy_version 330 (0.0023)
350
+ [2024-10-20 17:21:36,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.5, 300 sec: 3818.3). Total num frames: 1351680. Throughput: 0: 972.2. Samples: 337226. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
351
+ [2024-10-20 17:21:36,999][00556] Avg episode reward: [(0, '4.618')]
352
+ [2024-10-20 17:21:41,997][00556] Fps is (10 sec: 3276.0, 60 sec: 3686.3, 300 sec: 3776.6). Total num frames: 1363968. Throughput: 0: 929.2. Samples: 341376. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
353
+ [2024-10-20 17:21:42,002][00556] Avg episode reward: [(0, '4.724')]
354
+ [2024-10-20 17:21:46,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.7, 300 sec: 3804.4). Total num frames: 1384448. Throughput: 0: 914.3. Samples: 344108. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
355
+ [2024-10-20 17:21:46,996][00556] Avg episode reward: [(0, '4.833')]
356
+ [2024-10-20 17:21:48,289][02555] Updated weights for policy 0, policy_version 340 (0.0028)
357
+ [2024-10-20 17:21:51,994][00556] Fps is (10 sec: 4506.7, 60 sec: 3891.2, 300 sec: 3818.3). Total num frames: 1409024. Throughput: 0: 955.1. Samples: 350982. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
358
+ [2024-10-20 17:21:51,997][00556] Avg episode reward: [(0, '4.703')]
359
+ [2024-10-20 17:21:56,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3686.4, 300 sec: 3776.7). Total num frames: 1421312. Throughput: 0: 952.0. Samples: 356100. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
360
+ [2024-10-20 17:21:56,999][00556] Avg episode reward: [(0, '4.475')]
361
+ [2024-10-20 17:22:00,114][02555] Updated weights for policy 0, policy_version 350 (0.0034)
362
+ [2024-10-20 17:22:01,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3790.5). Total num frames: 1441792. Throughput: 0: 924.8. Samples: 358206. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
363
+ [2024-10-20 17:22:02,002][00556] Avg episode reward: [(0, '4.488')]
364
+ [2024-10-20 17:22:06,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 1462272. Throughput: 0: 940.1. Samples: 364868. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
365
+ [2024-10-20 17:22:06,997][00556] Avg episode reward: [(0, '4.633')]
366
+ [2024-10-20 17:22:09,196][02555] Updated weights for policy 0, policy_version 360 (0.0024)
367
+ [2024-10-20 17:22:11,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3790.5). Total num frames: 1478656. Throughput: 0: 970.1. Samples: 370734. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
368
+ [2024-10-20 17:22:11,998][00556] Avg episode reward: [(0, '4.765')]
369
+ [2024-10-20 17:22:16,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 1495040. Throughput: 0: 939.6. Samples: 372692. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
370
+ [2024-10-20 17:22:17,001][00556] Avg episode reward: [(0, '4.802')]
371
+ [2024-10-20 17:22:21,005][02555] Updated weights for policy 0, policy_version 370 (0.0037)
372
+ [2024-10-20 17:22:21,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 1519616. Throughput: 0: 916.8. Samples: 378482. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
373
+ [2024-10-20 17:22:22,001][00556] Avg episode reward: [(0, '4.777')]
374
+ [2024-10-20 17:22:26,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 1540096. Throughput: 0: 979.2. Samples: 385436. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
375
+ [2024-10-20 17:22:27,000][00556] Avg episode reward: [(0, '4.433')]
376
+ [2024-10-20 17:22:31,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 1552384. Throughput: 0: 968.2. Samples: 387676. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
377
+ [2024-10-20 17:22:31,997][00556] Avg episode reward: [(0, '4.478')]
378
+ [2024-10-20 17:22:32,167][02555] Updated weights for policy 0, policy_version 380 (0.0025)
379
+ [2024-10-20 17:22:36,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3776.7). Total num frames: 1572864. Throughput: 0: 923.3. Samples: 392530. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
380
+ [2024-10-20 17:22:37,000][00556] Avg episode reward: [(0, '4.417')]
381
+ [2024-10-20 17:22:41,833][02555] Updated weights for policy 0, policy_version 390 (0.0051)
382
+ [2024-10-20 17:22:41,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.4, 300 sec: 3804.4). Total num frames: 1597440. Throughput: 0: 959.0. Samples: 399254. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
383
+ [2024-10-20 17:22:42,002][00556] Avg episode reward: [(0, '4.413')]
384
+ [2024-10-20 17:22:46,999][00556] Fps is (10 sec: 4094.2, 60 sec: 3822.7, 300 sec: 3776.6). Total num frames: 1613824. Throughput: 0: 981.0. Samples: 402356. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
385
+ [2024-10-20 17:22:47,001][00556] Avg episode reward: [(0, '4.597')]
386
+ [2024-10-20 17:22:51,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 1630208. Throughput: 0: 925.0. Samples: 406494. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
387
+ [2024-10-20 17:22:51,999][00556] Avg episode reward: [(0, '4.710')]
388
+ [2024-10-20 17:22:52,008][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000398_1630208.pth...
389
+ [2024-10-20 17:22:52,126][02541] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000177_724992.pth
390
+ [2024-10-20 17:22:53,659][02555] Updated weights for policy 0, policy_version 400 (0.0014)
391
+ [2024-10-20 17:22:56,994][00556] Fps is (10 sec: 3688.0, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 1650688. Throughput: 0: 939.7. Samples: 413022. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
392
+ [2024-10-20 17:22:56,999][00556] Avg episode reward: [(0, '4.779')]
393
+ [2024-10-20 17:23:01,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 1671168. Throughput: 0: 970.4. Samples: 416362. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
394
+ [2024-10-20 17:23:02,000][00556] Avg episode reward: [(0, '4.848')]
395
+ [2024-10-20 17:23:04,254][02555] Updated weights for policy 0, policy_version 410 (0.0031)
396
+ [2024-10-20 17:23:06,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3748.9). Total num frames: 1683456. Throughput: 0: 944.8. Samples: 421000. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
397
+ [2024-10-20 17:23:07,001][00556] Avg episode reward: [(0, '4.805')]
398
+ [2024-10-20 17:23:11,994][00556] Fps is (10 sec: 3276.7, 60 sec: 3754.6, 300 sec: 3776.6). Total num frames: 1703936. Throughput: 0: 914.4. Samples: 426584. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
399
+ [2024-10-20 17:23:11,997][00556] Avg episode reward: [(0, '4.771')]
400
+ [2024-10-20 17:23:14,595][02555] Updated weights for policy 0, policy_version 420 (0.0021)
401
+ [2024-10-20 17:23:16,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 1728512. Throughput: 0: 940.2. Samples: 429984. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
402
+ [2024-10-20 17:23:17,001][00556] Avg episode reward: [(0, '4.734')]
403
+ [2024-10-20 17:23:21,996][00556] Fps is (10 sec: 3686.0, 60 sec: 3686.3, 300 sec: 3748.9). Total num frames: 1740800. Throughput: 0: 957.6. Samples: 435624. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
404
+ [2024-10-20 17:23:22,003][00556] Avg episode reward: [(0, '4.944')]
405
+ [2024-10-20 17:23:26,691][02555] Updated weights for policy 0, policy_version 430 (0.0034)
406
+ [2024-10-20 17:23:26,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 1761280. Throughput: 0: 915.1. Samples: 440432. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
407
+ [2024-10-20 17:23:26,997][00556] Avg episode reward: [(0, '5.364')]
408
+ [2024-10-20 17:23:26,999][02541] Saving new best policy, reward=5.364!
409
+ [2024-10-20 17:23:31,994][00556] Fps is (10 sec: 4506.2, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 1785856. Throughput: 0: 921.1. Samples: 443802. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
410
+ [2024-10-20 17:23:32,001][00556] Avg episode reward: [(0, '5.019')]
411
+ [2024-10-20 17:23:36,201][02555] Updated weights for policy 0, policy_version 440 (0.0028)
412
+ [2024-10-20 17:23:36,996][00556] Fps is (10 sec: 4095.4, 60 sec: 3822.8, 300 sec: 3776.6). Total num frames: 1802240. Throughput: 0: 973.7. Samples: 450314. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
413
+ [2024-10-20 17:23:37,000][00556] Avg episode reward: [(0, '4.710')]
414
+ [2024-10-20 17:23:41,994][00556] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3748.9). Total num frames: 1814528. Throughput: 0: 919.2. Samples: 454386. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
415
+ [2024-10-20 17:23:42,001][00556] Avg episode reward: [(0, '4.684')]
416
+ [2024-10-20 17:23:46,996][00556] Fps is (10 sec: 3686.3, 60 sec: 3754.8, 300 sec: 3776.6). Total num frames: 1839104. Throughput: 0: 913.8. Samples: 457484. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
417
+ [2024-10-20 17:23:47,007][00556] Avg episode reward: [(0, '4.459')]
418
+ [2024-10-20 17:23:47,651][02555] Updated weights for policy 0, policy_version 450 (0.0026)
419
+ [2024-10-20 17:23:51,998][00556] Fps is (10 sec: 4504.0, 60 sec: 3822.7, 300 sec: 3776.6). Total num frames: 1859584. Throughput: 0: 962.5. Samples: 464316. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
420
+ [2024-10-20 17:23:52,003][00556] Avg episode reward: [(0, '4.597')]
421
+ [2024-10-20 17:23:56,998][00556] Fps is (10 sec: 3685.4, 60 sec: 3754.4, 300 sec: 3748.8). Total num frames: 1875968. Throughput: 0: 944.4. Samples: 469084. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
422
+ [2024-10-20 17:23:57,001][00556] Avg episode reward: [(0, '4.627')]
423
+ [2024-10-20 17:23:59,352][02555] Updated weights for policy 0, policy_version 460 (0.0029)
424
+ [2024-10-20 17:24:01,994][00556] Fps is (10 sec: 3277.8, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 1892352. Throughput: 0: 919.8. Samples: 471376. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
425
+ [2024-10-20 17:24:01,997][00556] Avg episode reward: [(0, '4.696')]
426
+ [2024-10-20 17:24:06,994][00556] Fps is (10 sec: 4097.7, 60 sec: 3891.2, 300 sec: 3776.7). Total num frames: 1916928. Throughput: 0: 945.6. Samples: 478174. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
427
+ [2024-10-20 17:24:07,000][00556] Avg episode reward: [(0, '4.839')]
428
+ [2024-10-20 17:24:08,367][02555] Updated weights for policy 0, policy_version 470 (0.0027)
429
+ [2024-10-20 17:24:11,994][00556] Fps is (10 sec: 4096.1, 60 sec: 3823.0, 300 sec: 3762.8). Total num frames: 1933312. Throughput: 0: 966.9. Samples: 483942. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
430
+ [2024-10-20 17:24:11,996][00556] Avg episode reward: [(0, '4.737')]
431
+ [2024-10-20 17:24:16,994][00556] Fps is (10 sec: 3276.9, 60 sec: 3686.4, 300 sec: 3748.9). Total num frames: 1949696. Throughput: 0: 938.0. Samples: 486012. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
432
+ [2024-10-20 17:24:16,996][00556] Avg episode reward: [(0, '4.771')]
433
+ [2024-10-20 17:24:20,063][02555] Updated weights for policy 0, policy_version 480 (0.0033)
434
+ [2024-10-20 17:24:21,995][00556] Fps is (10 sec: 4095.8, 60 sec: 3891.3, 300 sec: 3790.5). Total num frames: 1974272. Throughput: 0: 932.6. Samples: 492278. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
435
+ [2024-10-20 17:24:21,999][00556] Avg episode reward: [(0, '4.778')]
436
+ [2024-10-20 17:24:26,998][00556] Fps is (10 sec: 4503.7, 60 sec: 3890.9, 300 sec: 3776.6). Total num frames: 1994752. Throughput: 0: 989.4. Samples: 498914. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
437
+ [2024-10-20 17:24:27,004][00556] Avg episode reward: [(0, '4.685')]
438
+ [2024-10-20 17:24:31,202][02555] Updated weights for policy 0, policy_version 490 (0.0027)
439
+ [2024-10-20 17:24:31,994][00556] Fps is (10 sec: 3276.9, 60 sec: 3686.4, 300 sec: 3748.9). Total num frames: 2007040. Throughput: 0: 965.1. Samples: 500910. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
440
+ [2024-10-20 17:24:31,998][00556] Avg episode reward: [(0, '4.489')]
441
+ [2024-10-20 17:24:36,994][00556] Fps is (10 sec: 3278.2, 60 sec: 3754.8, 300 sec: 3776.7). Total num frames: 2027520. Throughput: 0: 929.3. Samples: 506130. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
442
+ [2024-10-20 17:24:36,996][00556] Avg episode reward: [(0, '4.545')]
443
+ [2024-10-20 17:24:40,936][02555] Updated weights for policy 0, policy_version 500 (0.0035)
444
+ [2024-10-20 17:24:41,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3790.5). Total num frames: 2052096. Throughput: 0: 970.8. Samples: 512764. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
445
+ [2024-10-20 17:24:41,997][00556] Avg episode reward: [(0, '4.800')]
446
+ [2024-10-20 17:24:46,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3823.0, 300 sec: 3776.7). Total num frames: 2068480. Throughput: 0: 983.1. Samples: 515614. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
447
+ [2024-10-20 17:24:46,998][00556] Avg episode reward: [(0, '4.732')]
448
+ [2024-10-20 17:24:51,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.9, 300 sec: 3776.7). Total num frames: 2084864. Throughput: 0: 928.0. Samples: 519936. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
449
+ [2024-10-20 17:24:52,002][00556] Avg episode reward: [(0, '4.685')]
450
+ [2024-10-20 17:24:52,013][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000509_2084864.pth...
451
+ [2024-10-20 17:24:52,144][02541] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000287_1175552.pth
452
+ [2024-10-20 17:24:52,823][02555] Updated weights for policy 0, policy_version 510 (0.0023)
453
+ [2024-10-20 17:24:56,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3823.2, 300 sec: 3790.5). Total num frames: 2105344. Throughput: 0: 949.9. Samples: 526688. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
454
+ [2024-10-20 17:24:57,003][00556] Avg episode reward: [(0, '4.702')]
455
+ [2024-10-20 17:25:01,995][00556] Fps is (10 sec: 4095.6, 60 sec: 3891.1, 300 sec: 3776.7). Total num frames: 2125824. Throughput: 0: 978.2. Samples: 530034. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
456
+ [2024-10-20 17:25:01,999][00556] Avg episode reward: [(0, '4.588')]
457
+ [2024-10-20 17:25:02,938][02555] Updated weights for policy 0, policy_version 520 (0.0040)
458
+ [2024-10-20 17:25:06,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 2138112. Throughput: 0: 937.6. Samples: 534468. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
459
+ [2024-10-20 17:25:06,996][00556] Avg episode reward: [(0, '4.495')]
460
+ [2024-10-20 17:25:11,998][00556] Fps is (10 sec: 3685.3, 60 sec: 3822.7, 300 sec: 3790.5). Total num frames: 2162688. Throughput: 0: 923.6. Samples: 540474. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
461
+ [2024-10-20 17:25:12,001][00556] Avg episode reward: [(0, '4.351')]
462
+ [2024-10-20 17:25:13,734][02555] Updated weights for policy 0, policy_version 530 (0.0019)
463
+ [2024-10-20 17:25:16,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 2183168. Throughput: 0: 955.0. Samples: 543884. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
464
+ [2024-10-20 17:25:16,997][00556] Avg episode reward: [(0, '4.457')]
465
+ [2024-10-20 17:25:21,994][00556] Fps is (10 sec: 3278.1, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 2195456. Throughput: 0: 956.7. Samples: 549180. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
466
+ [2024-10-20 17:25:21,997][00556] Avg episode reward: [(0, '4.714')]
467
+ [2024-10-20 17:25:25,374][02555] Updated weights for policy 0, policy_version 540 (0.0053)
468
+ [2024-10-20 17:25:26,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.7, 300 sec: 3776.7). Total num frames: 2215936. Throughput: 0: 925.3. Samples: 554404. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
469
+ [2024-10-20 17:25:27,002][00556] Avg episode reward: [(0, '4.837')]
470
+ [2024-10-20 17:25:31,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3804.5). Total num frames: 2240512. Throughput: 0: 934.6. Samples: 557670. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
471
+ [2024-10-20 17:25:32,000][00556] Avg episode reward: [(0, '4.752')]
472
+ [2024-10-20 17:25:35,054][02555] Updated weights for policy 0, policy_version 550 (0.0034)
473
+ [2024-10-20 17:25:36,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 2256896. Throughput: 0: 972.8. Samples: 563714. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
474
+ [2024-10-20 17:25:36,998][00556] Avg episode reward: [(0, '4.594')]
475
+ [2024-10-20 17:25:41,994][00556] Fps is (10 sec: 2867.2, 60 sec: 3618.1, 300 sec: 3748.9). Total num frames: 2269184. Throughput: 0: 915.7. Samples: 567896. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
476
+ [2024-10-20 17:25:41,997][00556] Avg episode reward: [(0, '4.525')]
477
+ [2024-10-20 17:25:46,605][02555] Updated weights for policy 0, policy_version 560 (0.0025)
478
+ [2024-10-20 17:25:46,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3790.5). Total num frames: 2293760. Throughput: 0: 916.6. Samples: 571280. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
479
+ [2024-10-20 17:25:46,998][00556] Avg episode reward: [(0, '4.673')]
480
+ [2024-10-20 17:25:51,996][00556] Fps is (10 sec: 4504.7, 60 sec: 3822.8, 300 sec: 3776.6). Total num frames: 2314240. Throughput: 0: 972.2. Samples: 578220. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
481
+ [2024-10-20 17:25:52,001][00556] Avg episode reward: [(0, '4.823')]
482
+ [2024-10-20 17:25:56,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3762.8). Total num frames: 2330624. Throughput: 0: 940.2. Samples: 582780. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
483
+ [2024-10-20 17:25:56,997][00556] Avg episode reward: [(0, '4.919')]
484
+ [2024-10-20 17:25:58,127][02555] Updated weights for policy 0, policy_version 570 (0.0019)
485
+ [2024-10-20 17:26:01,994][00556] Fps is (10 sec: 3687.1, 60 sec: 3754.7, 300 sec: 3790.5). Total num frames: 2351104. Throughput: 0: 923.7. Samples: 585452. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
486
+ [2024-10-20 17:26:01,997][00556] Avg episode reward: [(0, '4.706')]
487
+ [2024-10-20 17:26:06,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 2371584. Throughput: 0: 956.9. Samples: 592240. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
488
+ [2024-10-20 17:26:07,001][00556] Avg episode reward: [(0, '4.485')]
489
+ [2024-10-20 17:26:07,351][02555] Updated weights for policy 0, policy_version 580 (0.0049)
490
+ [2024-10-20 17:26:11,994][00556] Fps is (10 sec: 3686.3, 60 sec: 3754.9, 300 sec: 3776.6). Total num frames: 2387968. Throughput: 0: 956.1. Samples: 597430. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
491
+ [2024-10-20 17:26:11,997][00556] Avg episode reward: [(0, '4.536')]
492
+ [2024-10-20 17:26:16,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3776.7). Total num frames: 2404352. Throughput: 0: 929.2. Samples: 599484. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
493
+ [2024-10-20 17:26:17,001][00556] Avg episode reward: [(0, '4.665')]
494
+ [2024-10-20 17:26:19,046][02555] Updated weights for policy 0, policy_version 590 (0.0028)
495
+ [2024-10-20 17:26:21,994][00556] Fps is (10 sec: 4096.1, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 2428928. Throughput: 0: 940.4. Samples: 606030. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
496
+ [2024-10-20 17:26:22,003][00556] Avg episode reward: [(0, '4.743')]
497
+ [2024-10-20 17:26:26,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 2445312. Throughput: 0: 985.4. Samples: 612240. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
498
+ [2024-10-20 17:26:27,000][00556] Avg episode reward: [(0, '4.689')]
499
+ [2024-10-20 17:26:30,201][02555] Updated weights for policy 0, policy_version 600 (0.0029)
500
+ [2024-10-20 17:26:31,994][00556] Fps is (10 sec: 3276.7, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 2461696. Throughput: 0: 954.0. Samples: 614212. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
501
+ [2024-10-20 17:26:31,997][00556] Avg episode reward: [(0, '4.709')]
502
+ [2024-10-20 17:26:36,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3790.6). Total num frames: 2482176. Throughput: 0: 928.2. Samples: 619986. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
503
+ [2024-10-20 17:26:36,999][00556] Avg episode reward: [(0, '4.537')]
504
+ [2024-10-20 17:26:39,918][02555] Updated weights for policy 0, policy_version 610 (0.0037)
505
+ [2024-10-20 17:26:41,994][00556] Fps is (10 sec: 4505.7, 60 sec: 3959.5, 300 sec: 3804.4). Total num frames: 2506752. Throughput: 0: 974.3. Samples: 626622. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
506
+ [2024-10-20 17:26:42,001][00556] Avg episode reward: [(0, '4.535')]
507
+ [2024-10-20 17:26:47,000][00556] Fps is (10 sec: 3684.4, 60 sec: 3754.3, 300 sec: 3762.7). Total num frames: 2519040. Throughput: 0: 963.4. Samples: 628808. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
508
+ [2024-10-20 17:26:47,002][00556] Avg episode reward: [(0, '4.781')]
509
+ [2024-10-20 17:26:51,785][02555] Updated weights for policy 0, policy_version 620 (0.0034)
510
+ [2024-10-20 17:26:51,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.8, 300 sec: 3790.5). Total num frames: 2539520. Throughput: 0: 917.6. Samples: 633530. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
511
+ [2024-10-20 17:26:51,998][00556] Avg episode reward: [(0, '4.873')]
512
+ [2024-10-20 17:26:52,012][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000620_2539520.pth...
513
+ [2024-10-20 17:26:52,183][02541] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000398_1630208.pth
514
+ [2024-10-20 17:26:56,994][00556] Fps is (10 sec: 4098.2, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 2560000. Throughput: 0: 951.9. Samples: 640266. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
515
+ [2024-10-20 17:26:57,003][00556] Avg episode reward: [(0, '4.658')]
516
+ [2024-10-20 17:27:01,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3776.6). Total num frames: 2576384. Throughput: 0: 976.4. Samples: 643420. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
517
+ [2024-10-20 17:27:01,997][00556] Avg episode reward: [(0, '4.596')]
518
+ [2024-10-20 17:27:02,378][02555] Updated weights for policy 0, policy_version 630 (0.0023)
519
+ [2024-10-20 17:27:06,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3776.7). Total num frames: 2592768. Throughput: 0: 922.7. Samples: 647550. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
520
+ [2024-10-20 17:27:07,003][00556] Avg episode reward: [(0, '4.627')]
521
+ [2024-10-20 17:27:11,994][00556] Fps is (10 sec: 4096.1, 60 sec: 3823.0, 300 sec: 3804.4). Total num frames: 2617344. Throughput: 0: 931.9. Samples: 654174. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
522
+ [2024-10-20 17:27:12,002][00556] Avg episode reward: [(0, '4.807')]
523
+ [2024-10-20 17:27:12,912][02555] Updated weights for policy 0, policy_version 640 (0.0036)
524
+ [2024-10-20 17:27:16,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 2633728. Throughput: 0: 957.7. Samples: 657310. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
525
+ [2024-10-20 17:27:17,001][00556] Avg episode reward: [(0, '4.921')]
526
+ [2024-10-20 17:27:21,998][00556] Fps is (10 sec: 3275.7, 60 sec: 3686.2, 300 sec: 3762.7). Total num frames: 2650112. Throughput: 0: 934.8. Samples: 662054. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
527
+ [2024-10-20 17:27:22,000][00556] Avg episode reward: [(0, '4.677')]
528
+ [2024-10-20 17:27:24,618][02555] Updated weights for policy 0, policy_version 650 (0.0041)
529
+ [2024-10-20 17:27:26,994][00556] Fps is (10 sec: 3686.3, 60 sec: 3754.6, 300 sec: 3790.5). Total num frames: 2670592. Throughput: 0: 916.4. Samples: 667862. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
530
+ [2024-10-20 17:27:26,996][00556] Avg episode reward: [(0, '4.529')]
531
+ [2024-10-20 17:27:31,994][00556] Fps is (10 sec: 4507.1, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 2695168. Throughput: 0: 942.7. Samples: 671224. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
532
+ [2024-10-20 17:27:31,996][00556] Avg episode reward: [(0, '4.440')]
533
+ [2024-10-20 17:27:34,301][02555] Updated weights for policy 0, policy_version 660 (0.0025)
534
+ [2024-10-20 17:27:36,994][00556] Fps is (10 sec: 3686.5, 60 sec: 3754.7, 300 sec: 3762.8). Total num frames: 2707456. Throughput: 0: 967.6. Samples: 677072. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
535
+ [2024-10-20 17:27:37,000][00556] Avg episode reward: [(0, '4.531')]
536
+ [2024-10-20 17:27:41,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3776.7). Total num frames: 2727936. Throughput: 0: 927.2. Samples: 681990. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
537
+ [2024-10-20 17:27:41,997][00556] Avg episode reward: [(0, '4.663')]
538
+ [2024-10-20 17:27:45,328][02555] Updated weights for policy 0, policy_version 670 (0.0018)
539
+ [2024-10-20 17:27:46,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3823.3, 300 sec: 3790.5). Total num frames: 2748416. Throughput: 0: 931.8. Samples: 685350. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
540
+ [2024-10-20 17:27:47,000][00556] Avg episode reward: [(0, '4.670')]
541
+ [2024-10-20 17:27:51,996][00556] Fps is (10 sec: 4095.4, 60 sec: 3822.8, 300 sec: 3790.5). Total num frames: 2768896. Throughput: 0: 984.9. Samples: 691870. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
542
+ [2024-10-20 17:27:51,999][00556] Avg episode reward: [(0, '4.565')]
543
+ [2024-10-20 17:27:56,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 2781184. Throughput: 0: 929.6. Samples: 696004. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
544
+ [2024-10-20 17:27:56,996][00556] Avg episode reward: [(0, '4.838')]
545
+ [2024-10-20 17:27:57,025][02555] Updated weights for policy 0, policy_version 680 (0.0030)
546
+ [2024-10-20 17:28:01,994][00556] Fps is (10 sec: 3686.9, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 2805760. Throughput: 0: 932.3. Samples: 699264. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
547
+ [2024-10-20 17:28:02,001][00556] Avg episode reward: [(0, '4.913')]
548
+ [2024-10-20 17:28:06,054][02555] Updated weights for policy 0, policy_version 690 (0.0033)
549
+ [2024-10-20 17:28:06,996][00556] Fps is (10 sec: 4504.9, 60 sec: 3891.1, 300 sec: 3804.4). Total num frames: 2826240. Throughput: 0: 979.4. Samples: 706126. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
550
+ [2024-10-20 17:28:07,003][00556] Avg episode reward: [(0, '4.540')]
551
+ [2024-10-20 17:28:11,996][00556] Fps is (10 sec: 3685.9, 60 sec: 3754.6, 300 sec: 3776.6). Total num frames: 2842624. Throughput: 0: 953.3. Samples: 710762. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
552
+ [2024-10-20 17:28:12,002][00556] Avg episode reward: [(0, '4.684')]
553
+ [2024-10-20 17:28:16,994][00556] Fps is (10 sec: 3277.3, 60 sec: 3754.7, 300 sec: 3790.6). Total num frames: 2859008. Throughput: 0: 929.2. Samples: 713036. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
554
+ [2024-10-20 17:28:16,997][00556] Avg episode reward: [(0, '4.639')]
555
+ [2024-10-20 17:28:17,962][02555] Updated weights for policy 0, policy_version 700 (0.0028)
556
+ [2024-10-20 17:28:21,994][00556] Fps is (10 sec: 4096.6, 60 sec: 3891.4, 300 sec: 3804.4). Total num frames: 2883584. Throughput: 0: 949.9. Samples: 719818. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
557
+ [2024-10-20 17:28:21,997][00556] Avg episode reward: [(0, '4.914')]
558
+ [2024-10-20 17:28:26,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3776.7). Total num frames: 2899968. Throughput: 0: 965.8. Samples: 725452. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
559
+ [2024-10-20 17:28:26,996][00556] Avg episode reward: [(0, '4.976')]
560
+ [2024-10-20 17:28:29,403][02555] Updated weights for policy 0, policy_version 710 (0.0020)
561
+ [2024-10-20 17:28:31,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3776.7). Total num frames: 2916352. Throughput: 0: 935.8. Samples: 727460. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
562
+ [2024-10-20 17:28:32,001][00556] Avg episode reward: [(0, '4.898')]
563
+ [2024-10-20 17:28:36,994][00556] Fps is (10 sec: 3686.3, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 2936832. Throughput: 0: 931.5. Samples: 733786. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
564
+ [2024-10-20 17:28:37,002][00556] Avg episode reward: [(0, '4.801')]
565
+ [2024-10-20 17:28:38,815][02555] Updated weights for policy 0, policy_version 720 (0.0028)
566
+ [2024-10-20 17:28:41,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 2961408. Throughput: 0: 983.6. Samples: 740264. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
567
+ [2024-10-20 17:28:42,002][00556] Avg episode reward: [(0, '4.805')]
568
+ [2024-10-20 17:28:46,994][00556] Fps is (10 sec: 3686.5, 60 sec: 3754.7, 300 sec: 3776.7). Total num frames: 2973696. Throughput: 0: 955.8. Samples: 742276. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
569
+ [2024-10-20 17:28:46,998][00556] Avg episode reward: [(0, '4.785')]
570
+ [2024-10-20 17:28:50,856][02555] Updated weights for policy 0, policy_version 730 (0.0033)
571
+ [2024-10-20 17:28:51,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.8, 300 sec: 3790.6). Total num frames: 2994176. Throughput: 0: 918.5. Samples: 747456. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
572
+ [2024-10-20 17:28:52,002][00556] Avg episode reward: [(0, '4.639')]
573
+ [2024-10-20 17:28:52,011][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000731_2994176.pth...
574
+ [2024-10-20 17:28:52,146][02541] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000509_2084864.pth
575
+ [2024-10-20 17:28:56,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 3014656. Throughput: 0: 966.4. Samples: 754250. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
576
+ [2024-10-20 17:28:56,997][00556] Avg episode reward: [(0, '4.543')]
577
+ [2024-10-20 17:29:00,864][02555] Updated weights for policy 0, policy_version 740 (0.0024)
578
+ [2024-10-20 17:29:01,996][00556] Fps is (10 sec: 3685.9, 60 sec: 3754.6, 300 sec: 3776.6). Total num frames: 3031040. Throughput: 0: 978.7. Samples: 757078. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
579
+ [2024-10-20 17:29:02,001][00556] Avg episode reward: [(0, '4.622')]
580
+ [2024-10-20 17:29:06,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.8, 300 sec: 3790.5). Total num frames: 3051520. Throughput: 0: 921.9. Samples: 761304. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
581
+ [2024-10-20 17:29:06,996][00556] Avg episode reward: [(0, '4.663')]
582
+ [2024-10-20 17:29:11,500][02555] Updated weights for policy 0, policy_version 750 (0.0015)
583
+ [2024-10-20 17:29:11,994][00556] Fps is (10 sec: 4096.6, 60 sec: 3823.0, 300 sec: 3804.4). Total num frames: 3072000. Throughput: 0: 951.8. Samples: 768284. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
584
+ [2024-10-20 17:29:11,997][00556] Avg episode reward: [(0, '4.905')]
585
+ [2024-10-20 17:29:16,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3790.5). Total num frames: 3092480. Throughput: 0: 981.0. Samples: 771606. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
586
+ [2024-10-20 17:29:16,996][00556] Avg episode reward: [(0, '4.757')]
587
+ [2024-10-20 17:29:21,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3762.8). Total num frames: 3104768. Throughput: 0: 941.6. Samples: 776158. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
588
+ [2024-10-20 17:29:22,000][00556] Avg episode reward: [(0, '4.859')]
589
+ [2024-10-20 17:29:23,066][02555] Updated weights for policy 0, policy_version 760 (0.0018)
590
+ [2024-10-20 17:29:26,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 3129344. Throughput: 0: 938.8. Samples: 782512. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
591
+ [2024-10-20 17:29:26,997][00556] Avg episode reward: [(0, '4.867')]
592
+ [2024-10-20 17:29:31,733][02555] Updated weights for policy 0, policy_version 770 (0.0027)
593
+ [2024-10-20 17:29:31,997][00556] Fps is (10 sec: 4913.8, 60 sec: 3959.3, 300 sec: 3818.3). Total num frames: 3153920. Throughput: 0: 971.5. Samples: 785998. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
594
+ [2024-10-20 17:29:32,000][00556] Avg episode reward: [(0, '4.691')]
595
+ [2024-10-20 17:29:36,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3823.0, 300 sec: 3776.7). Total num frames: 3166208. Throughput: 0: 973.7. Samples: 791272. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
596
+ [2024-10-20 17:29:36,997][00556] Avg episode reward: [(0, '4.795')]
597
+ [2024-10-20 17:29:41,994][00556] Fps is (10 sec: 3277.8, 60 sec: 3754.7, 300 sec: 3790.5). Total num frames: 3186688. Throughput: 0: 937.6. Samples: 796444. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
598
+ [2024-10-20 17:29:41,997][00556] Avg episode reward: [(0, '4.931')]
599
+ [2024-10-20 17:29:43,761][02555] Updated weights for policy 0, policy_version 780 (0.0024)
600
+ [2024-10-20 17:29:46,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 3207168. Throughput: 0: 946.5. Samples: 799670. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
601
+ [2024-10-20 17:29:46,997][00556] Avg episode reward: [(0, '4.690')]
602
+ [2024-10-20 17:29:52,002][00556] Fps is (10 sec: 3683.5, 60 sec: 3822.4, 300 sec: 3790.4). Total num frames: 3223552. Throughput: 0: 992.4. Samples: 805970. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
603
+ [2024-10-20 17:29:52,008][00556] Avg episode reward: [(0, '4.689')]
604
+ [2024-10-20 17:29:55,285][02555] Updated weights for policy 0, policy_version 790 (0.0018)
605
+ [2024-10-20 17:29:56,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3776.7). Total num frames: 3239936. Throughput: 0: 933.4. Samples: 810288. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
606
+ [2024-10-20 17:29:56,997][00556] Avg episode reward: [(0, '4.649')]
607
+ [2024-10-20 17:30:01,994][00556] Fps is (10 sec: 4099.2, 60 sec: 3891.3, 300 sec: 3818.3). Total num frames: 3264512. Throughput: 0: 934.1. Samples: 813640. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
608
+ [2024-10-20 17:30:01,997][00556] Avg episode reward: [(0, '4.710')]
609
+ [2024-10-20 17:30:04,508][02555] Updated weights for policy 0, policy_version 800 (0.0024)
610
+ [2024-10-20 17:30:06,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3891.2, 300 sec: 3804.5). Total num frames: 3284992. Throughput: 0: 986.2. Samples: 820538. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
611
+ [2024-10-20 17:30:06,998][00556] Avg episode reward: [(0, '4.660')]
612
+ [2024-10-20 17:30:11,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3776.6). Total num frames: 3297280. Throughput: 0: 941.7. Samples: 824890. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
613
+ [2024-10-20 17:30:11,997][00556] Avg episode reward: [(0, '4.815')]
614
+ [2024-10-20 17:30:16,341][02555] Updated weights for policy 0, policy_version 810 (0.0059)
615
+ [2024-10-20 17:30:16,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3804.4). Total num frames: 3317760. Throughput: 0: 921.1. Samples: 827446. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
616
+ [2024-10-20 17:30:17,002][00556] Avg episode reward: [(0, '4.636')]
617
+ [2024-10-20 17:30:21,994][00556] Fps is (10 sec: 4505.6, 60 sec: 3959.5, 300 sec: 3818.3). Total num frames: 3342336. Throughput: 0: 957.6. Samples: 834366. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
618
+ [2024-10-20 17:30:21,998][00556] Avg episode reward: [(0, '4.725')]
619
+ [2024-10-20 17:30:26,937][02555] Updated weights for policy 0, policy_version 820 (0.0026)
620
+ [2024-10-20 17:30:26,994][00556] Fps is (10 sec: 4096.1, 60 sec: 3822.9, 300 sec: 3790.5). Total num frames: 3358720. Throughput: 0: 961.3. Samples: 839702. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
621
+ [2024-10-20 17:30:26,997][00556] Avg episode reward: [(0, '4.849')]
622
+ [2024-10-20 17:30:31,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.6, 300 sec: 3790.5). Total num frames: 3375104. Throughput: 0: 937.1. Samples: 841838. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
623
+ [2024-10-20 17:30:31,998][00556] Avg episode reward: [(0, '4.802')]
624
+ [2024-10-20 17:30:36,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3822.9, 300 sec: 3818.3). Total num frames: 3395584. Throughput: 0: 940.9. Samples: 848304. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
625
+ [2024-10-20 17:30:37,002][00556] Avg episode reward: [(0, '4.803')]
626
+ [2024-10-20 17:30:37,042][02555] Updated weights for policy 0, policy_version 830 (0.0036)
627
+ [2024-10-20 17:30:41,997][00556] Fps is (10 sec: 4095.0, 60 sec: 3822.8, 300 sec: 3804.4). Total num frames: 3416064. Throughput: 0: 985.5. Samples: 854636. Policy #0 lag: (min: 0.0, avg: 0.3, max: 1.0)
628
+ [2024-10-20 17:30:41,999][00556] Avg episode reward: [(0, '5.010')]
629
+ [2024-10-20 17:30:46,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.7, 300 sec: 3790.6). Total num frames: 3432448. Throughput: 0: 954.5. Samples: 856594. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
630
+ [2024-10-20 17:30:46,999][00556] Avg episode reward: [(0, '5.008')]
631
+ [2024-10-20 17:30:48,848][02555] Updated weights for policy 0, policy_version 840 (0.0036)
632
+ [2024-10-20 17:30:52,000][00556] Fps is (10 sec: 3685.1, 60 sec: 3823.1, 300 sec: 3804.3). Total num frames: 3452928. Throughput: 0: 924.4. Samples: 862142. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
633
+ [2024-10-20 17:30:52,004][00556] Avg episode reward: [(0, '4.853')]
634
+ [2024-10-20 17:30:52,014][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000843_3452928.pth...
635
+ [2024-10-20 17:30:52,170][02541] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000620_2539520.pth
636
+ [2024-10-20 17:30:56,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 3473408. Throughput: 0: 977.6. Samples: 868884. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
637
+ [2024-10-20 17:30:56,999][00556] Avg episode reward: [(0, '4.693')]
638
+ [2024-10-20 17:30:58,249][02555] Updated weights for policy 0, policy_version 850 (0.0028)
639
+ [2024-10-20 17:31:01,994][00556] Fps is (10 sec: 3688.6, 60 sec: 3754.7, 300 sec: 3790.5). Total num frames: 3489792. Throughput: 0: 979.2. Samples: 871508. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
640
+ [2024-10-20 17:31:01,997][00556] Avg episode reward: [(0, '4.726')]
641
+ [2024-10-20 17:31:06,994][00556] Fps is (10 sec: 3686.3, 60 sec: 3754.6, 300 sec: 3804.4). Total num frames: 3510272. Throughput: 0: 929.9. Samples: 876212. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
642
+ [2024-10-20 17:31:06,997][00556] Avg episode reward: [(0, '4.915')]
643
+ [2024-10-20 17:31:09,487][02555] Updated weights for policy 0, policy_version 860 (0.0019)
644
+ [2024-10-20 17:31:11,994][00556] Fps is (10 sec: 4095.9, 60 sec: 3891.2, 300 sec: 3818.3). Total num frames: 3530752. Throughput: 0: 969.4. Samples: 883326. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
645
+ [2024-10-20 17:31:11,997][00556] Avg episode reward: [(0, '4.967')]
646
+ [2024-10-20 17:31:16,994][00556] Fps is (10 sec: 4096.1, 60 sec: 3891.2, 300 sec: 3804.4). Total num frames: 3551232. Throughput: 0: 999.7. Samples: 886824. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
647
+ [2024-10-20 17:31:17,001][00556] Avg episode reward: [(0, '5.088')]
648
+ [2024-10-20 17:31:20,733][02555] Updated weights for policy 0, policy_version 870 (0.0034)
649
+ [2024-10-20 17:31:21,994][00556] Fps is (10 sec: 3686.5, 60 sec: 3754.7, 300 sec: 3804.4). Total num frames: 3567616. Throughput: 0: 947.2. Samples: 890930. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
650
+ [2024-10-20 17:31:21,998][00556] Avg episode reward: [(0, '5.315')]
651
+ [2024-10-20 17:31:26,997][00556] Fps is (10 sec: 3685.5, 60 sec: 3822.8, 300 sec: 3818.3). Total num frames: 3588096. Throughput: 0: 944.8. Samples: 897150. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
652
+ [2024-10-20 17:31:27,004][00556] Avg episode reward: [(0, '5.374')]
653
+ [2024-10-20 17:31:27,007][02541] Saving new best policy, reward=5.374!
654
+ [2024-10-20 17:31:30,119][02555] Updated weights for policy 0, policy_version 880 (0.0024)
655
+ [2024-10-20 17:31:32,014][00556] Fps is (10 sec: 4496.8, 60 sec: 3958.2, 300 sec: 3831.9). Total num frames: 3612672. Throughput: 0: 976.5. Samples: 900556. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
656
+ [2024-10-20 17:31:32,024][00556] Avg episode reward: [(0, '5.407')]
657
+ [2024-10-20 17:31:32,050][02541] Saving new best policy, reward=5.407!
658
+ [2024-10-20 17:31:36,998][00556] Fps is (10 sec: 3685.7, 60 sec: 3822.7, 300 sec: 3790.5). Total num frames: 3624960. Throughput: 0: 968.2. Samples: 905708. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
659
+ [2024-10-20 17:31:37,001][00556] Avg episode reward: [(0, '5.461')]
660
+ [2024-10-20 17:31:37,003][02541] Saving new best policy, reward=5.461!
661
+ [2024-10-20 17:31:41,994][00556] Fps is (10 sec: 2872.8, 60 sec: 3754.8, 300 sec: 3804.5). Total num frames: 3641344. Throughput: 0: 937.2. Samples: 911056. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
662
+ [2024-10-20 17:31:41,996][00556] Avg episode reward: [(0, '5.237')]
663
+ [2024-10-20 17:31:42,066][02555] Updated weights for policy 0, policy_version 890 (0.0045)
664
+ [2024-10-20 17:31:46,994][00556] Fps is (10 sec: 4097.8, 60 sec: 3891.2, 300 sec: 3818.3). Total num frames: 3665920. Throughput: 0: 952.3. Samples: 914362. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
665
+ [2024-10-20 17:31:46,997][00556] Avg episode reward: [(0, '4.998')]
666
+ [2024-10-20 17:31:51,996][00556] Fps is (10 sec: 4095.5, 60 sec: 3823.2, 300 sec: 3804.4). Total num frames: 3682304. Throughput: 0: 978.7. Samples: 920256. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
667
+ [2024-10-20 17:31:52,000][00556] Avg episode reward: [(0, '4.980')]
668
+ [2024-10-20 17:31:52,868][02555] Updated weights for policy 0, policy_version 900 (0.0030)
669
+ [2024-10-20 17:31:56,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3754.7, 300 sec: 3804.4). Total num frames: 3698688. Throughput: 0: 919.6. Samples: 924708. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
670
+ [2024-10-20 17:31:56,997][00556] Avg episode reward: [(0, '4.915')]
671
+ [2024-10-20 17:32:01,994][00556] Fps is (10 sec: 3686.9, 60 sec: 3822.9, 300 sec: 3818.3). Total num frames: 3719168. Throughput: 0: 916.8. Samples: 928082. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
672
+ [2024-10-20 17:32:01,997][00556] Avg episode reward: [(0, '4.540')]
673
+ [2024-10-20 17:32:03,085][02555] Updated weights for policy 0, policy_version 910 (0.0024)
674
+ [2024-10-20 17:32:06,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3823.0, 300 sec: 3804.4). Total num frames: 3739648. Throughput: 0: 976.3. Samples: 934864. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
675
+ [2024-10-20 17:32:06,998][00556] Avg episode reward: [(0, '4.468')]
676
+ [2024-10-20 17:32:11,998][00556] Fps is (10 sec: 3685.0, 60 sec: 3754.4, 300 sec: 3804.4). Total num frames: 3756032. Throughput: 0: 930.0. Samples: 939000. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
677
+ [2024-10-20 17:32:12,004][00556] Avg episode reward: [(0, '4.704')]
678
+ [2024-10-20 17:32:14,915][02555] Updated weights for policy 0, policy_version 920 (0.0024)
679
+ [2024-10-20 17:32:16,995][00556] Fps is (10 sec: 3686.2, 60 sec: 3754.6, 300 sec: 3818.3). Total num frames: 3776512. Throughput: 0: 918.6. Samples: 941874. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
680
+ [2024-10-20 17:32:17,001][00556] Avg episode reward: [(0, '4.778')]
681
+ [2024-10-20 17:32:21,994][00556] Fps is (10 sec: 4097.5, 60 sec: 3822.9, 300 sec: 3818.3). Total num frames: 3796992. Throughput: 0: 952.9. Samples: 948586. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
682
+ [2024-10-20 17:32:21,997][00556] Avg episode reward: [(0, '4.707')]
683
+ [2024-10-20 17:32:25,209][02555] Updated weights for policy 0, policy_version 930 (0.0024)
684
+ [2024-10-20 17:32:26,994][00556] Fps is (10 sec: 3686.6, 60 sec: 3754.8, 300 sec: 3790.5). Total num frames: 3813376. Throughput: 0: 946.4. Samples: 953646. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
685
+ [2024-10-20 17:32:27,001][00556] Avg episode reward: [(0, '4.644')]
686
+ [2024-10-20 17:32:31,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3619.3, 300 sec: 3804.4). Total num frames: 3829760. Throughput: 0: 920.1. Samples: 955766. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
687
+ [2024-10-20 17:32:31,997][00556] Avg episode reward: [(0, '4.634')]
688
+ [2024-10-20 17:32:35,894][02555] Updated weights for policy 0, policy_version 940 (0.0047)
689
+ [2024-10-20 17:32:36,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3823.2, 300 sec: 3818.3). Total num frames: 3854336. Throughput: 0: 934.0. Samples: 962284. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
690
+ [2024-10-20 17:32:37,003][00556] Avg episode reward: [(0, '4.554')]
691
+ [2024-10-20 17:32:41,994][00556] Fps is (10 sec: 4096.0, 60 sec: 3822.9, 300 sec: 3804.4). Total num frames: 3870720. Throughput: 0: 971.2. Samples: 968414. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
692
+ [2024-10-20 17:32:42,001][00556] Avg episode reward: [(0, '4.386')]
693
+ [2024-10-20 17:32:46,994][00556] Fps is (10 sec: 3276.8, 60 sec: 3686.4, 300 sec: 3790.6). Total num frames: 3887104. Throughput: 0: 941.4. Samples: 970446. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
694
+ [2024-10-20 17:32:46,997][00556] Avg episode reward: [(0, '4.390')]
695
+ [2024-10-20 17:32:47,676][02555] Updated weights for policy 0, policy_version 950 (0.0015)
696
+ [2024-10-20 17:32:51,994][00556] Fps is (10 sec: 3686.4, 60 sec: 3754.8, 300 sec: 3818.3). Total num frames: 3907584. Throughput: 0: 921.0. Samples: 976308. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
697
+ [2024-10-20 17:32:52,001][00556] Avg episode reward: [(0, '4.427')]
698
+ [2024-10-20 17:32:52,052][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000955_3911680.pth...
699
+ [2024-10-20 17:32:52,209][02541] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000731_2994176.pth
700
+ [2024-10-20 17:32:56,572][02555] Updated weights for policy 0, policy_version 960 (0.0034)
701
+ [2024-10-20 17:32:56,998][00556] Fps is (10 sec: 4503.7, 60 sec: 3890.9, 300 sec: 3818.3). Total num frames: 3932160. Throughput: 0: 979.6. Samples: 983082. Policy #0 lag: (min: 0.0, avg: 0.4, max: 2.0)
702
+ [2024-10-20 17:32:57,003][00556] Avg episode reward: [(0, '4.519')]
703
+ [2024-10-20 17:33:02,001][00556] Fps is (10 sec: 3683.9, 60 sec: 3754.2, 300 sec: 3790.5). Total num frames: 3944448. Throughput: 0: 966.5. Samples: 985372. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
704
+ [2024-10-20 17:33:02,011][00556] Avg episode reward: [(0, '4.563')]
705
+ [2024-10-20 17:33:06,994][00556] Fps is (10 sec: 3278.1, 60 sec: 3754.7, 300 sec: 3804.4). Total num frames: 3964928. Throughput: 0: 920.9. Samples: 990028. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
706
+ [2024-10-20 17:33:06,996][00556] Avg episode reward: [(0, '4.668')]
707
+ [2024-10-20 17:33:08,575][02555] Updated weights for policy 0, policy_version 970 (0.0027)
708
+ [2024-10-20 17:33:11,994][00556] Fps is (10 sec: 4098.7, 60 sec: 3823.2, 300 sec: 3818.3). Total num frames: 3985408. Throughput: 0: 959.4. Samples: 996818. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
709
+ [2024-10-20 17:33:11,996][00556] Avg episode reward: [(0, '4.825')]
710
+ [2024-10-20 17:33:16,991][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
711
+ [2024-10-20 17:33:17,016][02541] Stopping Batcher_0...
712
+ [2024-10-20 17:33:17,016][02541] Loop batcher_evt_loop terminating...
713
+ [2024-10-20 17:33:17,016][00556] Component Batcher_0 stopped!
714
+ [2024-10-20 17:33:17,094][02555] Weights refcount: 2 0
715
+ [2024-10-20 17:33:17,096][02555] Stopping InferenceWorker_p0-w0...
716
+ [2024-10-20 17:33:17,096][02555] Loop inference_proc0-0_evt_loop terminating...
717
+ [2024-10-20 17:33:17,098][00556] Component InferenceWorker_p0-w0 stopped!
718
+ [2024-10-20 17:33:17,180][02541] Removing /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000843_3452928.pth
719
+ [2024-10-20 17:33:17,217][02541] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
720
+ [2024-10-20 17:33:17,478][00556] Component LearnerWorker_p0 stopped!
721
+ [2024-10-20 17:33:17,480][02541] Stopping LearnerWorker_p0...
722
+ [2024-10-20 17:33:17,484][02541] Loop learner_proc0_evt_loop terminating...
723
+ [2024-10-20 17:33:17,593][00556] Component RolloutWorker_w2 stopped!
724
+ [2024-10-20 17:33:17,596][02558] Stopping RolloutWorker_w2...
725
+ [2024-10-20 17:33:17,598][02558] Loop rollout_proc2_evt_loop terminating...
726
+ [2024-10-20 17:33:17,630][00556] Component RolloutWorker_w0 stopped!
727
+ [2024-10-20 17:33:17,633][02554] Stopping RolloutWorker_w0...
728
+ [2024-10-20 17:33:17,642][02554] Loop rollout_proc0_evt_loop terminating...
729
+ [2024-10-20 17:33:17,670][00556] Component RolloutWorker_w6 stopped!
730
+ [2024-10-20 17:33:17,673][02559] Stopping RolloutWorker_w6...
731
+ [2024-10-20 17:33:17,684][00556] Component RolloutWorker_w4 stopped!
732
+ [2024-10-20 17:33:17,688][02560] Stopping RolloutWorker_w4...
733
+ [2024-10-20 17:33:17,689][02560] Loop rollout_proc4_evt_loop terminating...
734
+ [2024-10-20 17:33:17,674][02559] Loop rollout_proc6_evt_loop terminating...
735
+ [2024-10-20 17:33:17,729][02562] Stopping RolloutWorker_w7...
736
+ [2024-10-20 17:33:17,730][00556] Component RolloutWorker_w7 stopped!
737
+ [2024-10-20 17:33:17,730][02562] Loop rollout_proc7_evt_loop terminating...
738
+ [2024-10-20 17:33:17,761][00556] Component RolloutWorker_w5 stopped!
739
+ [2024-10-20 17:33:17,765][02561] Stopping RolloutWorker_w5...
740
+ [2024-10-20 17:33:17,767][02561] Loop rollout_proc5_evt_loop terminating...
741
+ [2024-10-20 17:33:17,780][00556] Component RolloutWorker_w3 stopped!
742
+ [2024-10-20 17:33:17,788][02557] Stopping RolloutWorker_w3...
743
+ [2024-10-20 17:33:17,789][02557] Loop rollout_proc3_evt_loop terminating...
744
+ [2024-10-20 17:33:17,796][00556] Component RolloutWorker_w1 stopped!
745
+ [2024-10-20 17:33:17,798][00556] Waiting for process learner_proc0 to stop...
746
+ [2024-10-20 17:33:17,802][02556] Stopping RolloutWorker_w1...
747
+ [2024-10-20 17:33:17,803][02556] Loop rollout_proc1_evt_loop terminating...
748
+ [2024-10-20 17:33:19,784][00556] Waiting for process inference_proc0-0 to join...
749
+ [2024-10-20 17:33:19,789][00556] Waiting for process rollout_proc0 to join...
750
+ [2024-10-20 17:33:21,985][00556] Waiting for process rollout_proc1 to join...
751
+ [2024-10-20 17:33:21,989][00556] Waiting for process rollout_proc2 to join...
752
+ [2024-10-20 17:33:21,994][00556] Waiting for process rollout_proc3 to join...
753
+ [2024-10-20 17:33:21,997][00556] Waiting for process rollout_proc4 to join...
754
+ [2024-10-20 17:33:22,002][00556] Waiting for process rollout_proc5 to join...
755
+ [2024-10-20 17:33:22,004][00556] Waiting for process rollout_proc6 to join...
756
+ [2024-10-20 17:33:22,011][00556] Waiting for process rollout_proc7 to join...
757
+ [2024-10-20 17:33:22,017][00556] Batcher 0 profile tree view:
758
+ batching: 26.2966, releasing_batches: 0.0304
759
+ [2024-10-20 17:33:22,019][00556] InferenceWorker_p0-w0 profile tree view:
760
+ wait_policy: 0.0000
761
+ wait_policy_total: 427.7291
762
+ update_model: 9.1894
763
+ weight_update: 0.0024
764
+ one_step: 0.0246
765
+ handle_policy_step: 591.8972
766
+ deserialize: 15.4524, stack: 3.1635, obs_to_device_normalize: 120.7706, forward: 315.7641, send_messages: 28.2282
767
+ prepare_outputs: 79.9414
768
+ to_cpu: 45.7012
769
+ [2024-10-20 17:33:22,021][00556] Learner 0 profile tree view:
770
+ misc: 0.0056, prepare_batch: 14.0403
771
+ train: 75.1067
772
+ epoch_init: 0.0132, minibatch_init: 0.0119, losses_postprocess: 0.6113, kl_divergence: 0.7346, after_optimizer: 34.5945
773
+ calculate_losses: 26.7507
774
+ losses_init: 0.0060, forward_head: 1.2384, bptt_initial: 18.0547, tail: 1.0522, advantages_returns: 0.2671, losses: 3.7825
775
+ bptt: 2.0385
776
+ bptt_forward_core: 1.9446
777
+ update: 11.7477
778
+ clip: 0.8929
779
+ [2024-10-20 17:33:22,024][00556] RolloutWorker_w0 profile tree view:
780
+ wait_for_trajectories: 0.3558, enqueue_policy_requests: 103.7466, env_step: 832.9414, overhead: 13.4578, complete_rollouts: 6.5863
781
+ save_policy_outputs: 20.6964
782
+ split_output_tensors: 8.2401
783
+ [2024-10-20 17:33:22,026][00556] RolloutWorker_w7 profile tree view:
784
+ wait_for_trajectories: 0.3766, enqueue_policy_requests: 105.3575, env_step: 832.6607, overhead: 13.3658, complete_rollouts: 6.9517
785
+ save_policy_outputs: 20.5847
786
+ split_output_tensors: 8.1255
787
+ [2024-10-20 17:33:22,028][00556] Loop Runner_EvtLoop terminating...
788
+ [2024-10-20 17:33:22,030][00556] Runner profile tree view:
789
+ main_loop: 1098.7486
790
+ [2024-10-20 17:33:22,031][00556] Collected {0: 4005888}, FPS: 3645.9
791
+ [2024-10-20 17:33:22,617][00556] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
792
+ [2024-10-20 17:33:22,619][00556] Overriding arg 'num_workers' with value 1 passed from command line
793
+ [2024-10-20 17:33:22,621][00556] Adding new argument 'no_render'=True that is not in the saved config file!
794
+ [2024-10-20 17:33:22,623][00556] Adding new argument 'save_video'=True that is not in the saved config file!
795
+ [2024-10-20 17:33:22,625][00556] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
796
+ [2024-10-20 17:33:22,627][00556] Adding new argument 'video_name'=None that is not in the saved config file!
797
+ [2024-10-20 17:33:22,628][00556] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
798
+ [2024-10-20 17:33:22,629][00556] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
799
+ [2024-10-20 17:33:22,630][00556] Adding new argument 'push_to_hub'=False that is not in the saved config file!
800
+ [2024-10-20 17:33:22,631][00556] Adding new argument 'hf_repository'=None that is not in the saved config file!
801
+ [2024-10-20 17:33:22,633][00556] Adding new argument 'policy_index'=0 that is not in the saved config file!
802
+ [2024-10-20 17:33:22,634][00556] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
803
+ [2024-10-20 17:33:22,635][00556] Adding new argument 'train_script'=None that is not in the saved config file!
804
+ [2024-10-20 17:33:22,636][00556] Adding new argument 'enjoy_script'=None that is not in the saved config file!
805
+ [2024-10-20 17:33:22,637][00556] Using frameskip 1 and render_action_repeat=4 for evaluation
806
+ [2024-10-20 17:33:22,671][00556] Doom resolution: 160x120, resize resolution: (128, 72)
807
+ [2024-10-20 17:33:22,674][00556] RunningMeanStd input shape: (3, 72, 128)
808
+ [2024-10-20 17:33:22,676][00556] RunningMeanStd input shape: (1,)
809
+ [2024-10-20 17:33:22,694][00556] ConvEncoder: input_channels=3
810
+ [2024-10-20 17:33:22,796][00556] Conv encoder output size: 512
811
+ [2024-10-20 17:33:22,797][00556] Policy head output size: 512
812
+ [2024-10-20 17:33:22,969][00556] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
813
+ [2024-10-20 17:33:23,797][00556] Num frames 100...
814
+ [2024-10-20 17:33:23,922][00556] Num frames 200...
815
+ [2024-10-20 17:33:24,049][00556] Num frames 300...
816
+ [2024-10-20 17:33:24,206][00556] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
817
+ [2024-10-20 17:33:24,208][00556] Avg episode reward: 3.840, avg true_objective: 3.840
818
+ [2024-10-20 17:33:24,232][00556] Num frames 400...
819
+ [2024-10-20 17:33:24,359][00556] Num frames 500...
820
+ [2024-10-20 17:33:24,485][00556] Num frames 600...
821
+ [2024-10-20 17:33:24,618][00556] Num frames 700...
822
+ [2024-10-20 17:33:24,757][00556] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
823
+ [2024-10-20 17:33:24,759][00556] Avg episode reward: 3.840, avg true_objective: 3.840
824
+ [2024-10-20 17:33:24,801][00556] Num frames 800...
825
+ [2024-10-20 17:33:24,924][00556] Num frames 900...
826
+ [2024-10-20 17:33:25,058][00556] Num frames 1000...
827
+ [2024-10-20 17:33:25,180][00556] Num frames 1100...
828
+ [2024-10-20 17:33:25,303][00556] Avg episode rewards: #0: 3.840, true rewards: #0: 3.840
829
+ [2024-10-20 17:33:25,306][00556] Avg episode reward: 3.840, avg true_objective: 3.840
830
+ [2024-10-20 17:33:25,364][00556] Num frames 1200...
831
+ [2024-10-20 17:33:25,501][00556] Num frames 1300...
832
+ [2024-10-20 17:33:25,633][00556] Num frames 1400...
833
+ [2024-10-20 17:33:25,752][00556] Num frames 1500...
834
+ [2024-10-20 17:33:25,876][00556] Num frames 1600...
835
+ [2024-10-20 17:33:25,970][00556] Avg episode rewards: #0: 4.330, true rewards: #0: 4.080
836
+ [2024-10-20 17:33:25,973][00556] Avg episode reward: 4.330, avg true_objective: 4.080
837
+ [2024-10-20 17:33:26,058][00556] Num frames 1700...
838
+ [2024-10-20 17:33:26,183][00556] Num frames 1800...
839
+ [2024-10-20 17:33:26,309][00556] Num frames 1900...
840
+ [2024-10-20 17:33:26,444][00556] Num frames 2000...
841
+ [2024-10-20 17:33:26,600][00556] Avg episode rewards: #0: 4.560, true rewards: #0: 4.160
842
+ [2024-10-20 17:33:26,602][00556] Avg episode reward: 4.560, avg true_objective: 4.160
843
+ [2024-10-20 17:33:26,630][00556] Num frames 2100...
844
+ [2024-10-20 17:33:26,753][00556] Num frames 2200...
845
+ [2024-10-20 17:33:26,876][00556] Num frames 2300...
846
+ [2024-10-20 17:33:26,979][00556] Avg episode rewards: #0: 4.227, true rewards: #0: 3.893
847
+ [2024-10-20 17:33:26,980][00556] Avg episode reward: 4.227, avg true_objective: 3.893
848
+ [2024-10-20 17:33:27,065][00556] Num frames 2400...
849
+ [2024-10-20 17:33:27,187][00556] Num frames 2500...
850
+ [2024-10-20 17:33:27,314][00556] Num frames 2600...
851
+ [2024-10-20 17:33:27,450][00556] Num frames 2700...
852
+ [2024-10-20 17:33:27,573][00556] Num frames 2800...
853
+ [2024-10-20 17:33:27,744][00556] Avg episode rewards: #0: 4.829, true rewards: #0: 4.114
854
+ [2024-10-20 17:33:27,746][00556] Avg episode reward: 4.829, avg true_objective: 4.114
855
+ [2024-10-20 17:33:27,774][00556] Num frames 2900...
856
+ [2024-10-20 17:33:27,898][00556] Num frames 3000...
857
+ [2024-10-20 17:33:28,021][00556] Num frames 3100...
858
+ [2024-10-20 17:33:28,143][00556] Num frames 3200...
859
+ [2024-10-20 17:33:28,280][00556] Avg episode rewards: #0: 4.705, true rewards: #0: 4.080
860
+ [2024-10-20 17:33:28,281][00556] Avg episode reward: 4.705, avg true_objective: 4.080
861
+ [2024-10-20 17:33:28,330][00556] Num frames 3300...
862
+ [2024-10-20 17:33:28,462][00556] Num frames 3400...
863
+ [2024-10-20 17:33:28,589][00556] Num frames 3500...
864
+ [2024-10-20 17:33:28,725][00556] Num frames 3600...
865
+ [2024-10-20 17:33:28,850][00556] Num frames 3700...
866
+ [2024-10-20 17:33:28,921][00556] Avg episode rewards: #0: 4.791, true rewards: #0: 4.124
867
+ [2024-10-20 17:33:28,923][00556] Avg episode reward: 4.791, avg true_objective: 4.124
868
+ [2024-10-20 17:33:29,034][00556] Num frames 3800...
869
+ [2024-10-20 17:33:29,164][00556] Num frames 3900...
870
+ [2024-10-20 17:33:29,292][00556] Num frames 4000...
871
+ [2024-10-20 17:33:29,478][00556] Avg episode rewards: #0: 4.696, true rewards: #0: 4.096
872
+ [2024-10-20 17:33:29,480][00556] Avg episode reward: 4.696, avg true_objective: 4.096
873
+ [2024-10-20 17:33:49,967][00556] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
874
+ [2024-10-20 17:41:26,483][00556] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
875
+ [2024-10-20 17:41:26,485][00556] Overriding arg 'num_workers' with value 1 passed from command line
876
+ [2024-10-20 17:41:26,486][00556] Adding new argument 'no_render'=True that is not in the saved config file!
877
+ [2024-10-20 17:41:26,488][00556] Adding new argument 'save_video'=True that is not in the saved config file!
878
+ [2024-10-20 17:41:26,489][00556] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
879
+ [2024-10-20 17:41:26,490][00556] Adding new argument 'video_name'=None that is not in the saved config file!
880
+ [2024-10-20 17:41:26,491][00556] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
881
+ [2024-10-20 17:41:26,492][00556] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
882
+ [2024-10-20 17:41:26,494][00556] Adding new argument 'push_to_hub'=True that is not in the saved config file!
883
+ [2024-10-20 17:41:26,500][00556] Adding new argument 'hf_repository'='jerryvc/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
884
+ [2024-10-20 17:41:26,501][00556] Adding new argument 'policy_index'=0 that is not in the saved config file!
885
+ [2024-10-20 17:41:26,502][00556] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
886
+ [2024-10-20 17:41:26,503][00556] Adding new argument 'train_script'=None that is not in the saved config file!
887
+ [2024-10-20 17:41:26,504][00556] Adding new argument 'enjoy_script'=None that is not in the saved config file!
888
+ [2024-10-20 17:41:26,505][00556] Using frameskip 1 and render_action_repeat=4 for evaluation
889
+ [2024-10-20 17:41:26,541][00556] RunningMeanStd input shape: (3, 72, 128)
890
+ [2024-10-20 17:41:26,543][00556] RunningMeanStd input shape: (1,)
891
+ [2024-10-20 17:41:26,557][00556] ConvEncoder: input_channels=3
892
+ [2024-10-20 17:41:26,594][00556] Conv encoder output size: 512
893
+ [2024-10-20 17:41:26,595][00556] Policy head output size: 512
894
+ [2024-10-20 17:41:26,617][00556] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
895
+ [2024-10-20 17:41:27,021][00556] Num frames 100...
896
+ [2024-10-20 17:41:27,149][00556] Num frames 200...
897
+ [2024-10-20 17:41:27,272][00556] Num frames 300...
898
+ [2024-10-20 17:41:27,399][00556] Num frames 400...
899
+ [2024-10-20 17:41:27,550][00556] Num frames 500...
900
+ [2024-10-20 17:41:27,621][00556] Avg episode rewards: #0: 7.120, true rewards: #0: 5.120
901
+ [2024-10-20 17:41:27,622][00556] Avg episode reward: 7.120, avg true_objective: 5.120
902
+ [2024-10-20 17:41:27,740][00556] Num frames 600...
903
+ [2024-10-20 17:41:27,871][00556] Num frames 700...
904
+ [2024-10-20 17:41:27,999][00556] Num frames 800...
905
+ [2024-10-20 17:41:28,124][00556] Num frames 900...
906
+ [2024-10-20 17:41:28,260][00556] Avg episode rewards: #0: 6.300, true rewards: #0: 4.800
907
+ [2024-10-20 17:41:28,263][00556] Avg episode reward: 6.300, avg true_objective: 4.800
908
+ [2024-10-20 17:41:28,320][00556] Num frames 1000...
909
+ [2024-10-20 17:41:28,454][00556] Num frames 1100...
910
+ [2024-10-20 17:41:28,578][00556] Num frames 1200...
911
+ [2024-10-20 17:41:28,703][00556] Num frames 1300...
912
+ [2024-10-20 17:41:28,813][00556] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480
913
+ [2024-10-20 17:41:28,815][00556] Avg episode reward: 5.480, avg true_objective: 4.480
914
+ [2024-10-20 17:41:28,888][00556] Num frames 1400...
915
+ [2024-10-20 17:41:29,017][00556] Num frames 1500...
916
+ [2024-10-20 17:41:29,139][00556] Num frames 1600...
917
+ [2024-10-20 17:41:29,270][00556] Num frames 1700...
918
+ [2024-10-20 17:41:29,435][00556] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480
919
+ [2024-10-20 17:41:29,437][00556] Avg episode reward: 5.480, avg true_objective: 4.480
920
+ [2024-10-20 17:41:29,451][00556] Num frames 1800...
921
+ [2024-10-20 17:41:29,576][00556] Num frames 1900...
922
+ [2024-10-20 17:41:29,697][00556] Num frames 2000...
923
+ [2024-10-20 17:41:29,820][00556] Num frames 2100...
924
+ [2024-10-20 17:41:29,944][00556] Num frames 2200...
925
+ [2024-10-20 17:41:30,048][00556] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480
926
+ [2024-10-20 17:41:30,050][00556] Avg episode reward: 5.480, avg true_objective: 4.480
927
+ [2024-10-20 17:41:30,128][00556] Num frames 2300...
928
+ [2024-10-20 17:41:30,261][00556] Num frames 2400...
929
+ [2024-10-20 17:41:30,385][00556] Num frames 2500...
930
+ [2024-10-20 17:41:30,518][00556] Num frames 2600...
931
+ [2024-10-20 17:41:30,607][00556] Avg episode rewards: #0: 5.207, true rewards: #0: 4.373
932
+ [2024-10-20 17:41:30,611][00556] Avg episode reward: 5.207, avg true_objective: 4.373
933
+ [2024-10-20 17:41:30,706][00556] Num frames 2700...
934
+ [2024-10-20 17:41:30,825][00556] Num frames 2800...
935
+ [2024-10-20 17:41:30,948][00556] Num frames 2900...
936
+ [2024-10-20 17:41:31,070][00556] Num frames 3000...
937
+ [2024-10-20 17:41:31,214][00556] Avg episode rewards: #0: 5.246, true rewards: #0: 4.389
938
+ [2024-10-20 17:41:31,218][00556] Avg episode reward: 5.246, avg true_objective: 4.389
939
+ [2024-10-20 17:41:31,257][00556] Num frames 3100...
940
+ [2024-10-20 17:41:31,384][00556] Num frames 3200...
941
+ [2024-10-20 17:41:31,521][00556] Num frames 3300...
942
+ [2024-10-20 17:41:31,645][00556] Num frames 3400...
943
+ [2024-10-20 17:41:31,784][00556] Num frames 3500...
944
+ [2024-10-20 17:41:31,962][00556] Num frames 3600...
945
+ [2024-10-20 17:41:32,156][00556] Avg episode rewards: #0: 5.725, true rewards: #0: 4.600
946
+ [2024-10-20 17:41:32,158][00556] Avg episode reward: 5.725, avg true_objective: 4.600
947
+ [2024-10-20 17:41:32,196][00556] Num frames 3700...
948
+ [2024-10-20 17:41:32,376][00556] Num frames 3800...
949
+ [2024-10-20 17:41:32,558][00556] Num frames 3900...
950
+ [2024-10-20 17:41:32,728][00556] Num frames 4000...
951
+ [2024-10-20 17:41:32,950][00556] Avg episode rewards: #0: 5.662, true rewards: #0: 4.551
952
+ [2024-10-20 17:41:32,952][00556] Avg episode reward: 5.662, avg true_objective: 4.551
953
+ [2024-10-20 17:41:32,966][00556] Num frames 4100...
954
+ [2024-10-20 17:41:33,144][00556] Num frames 4200...
955
+ [2024-10-20 17:41:33,329][00556] Num frames 4300...
956
+ [2024-10-20 17:41:33,513][00556] Num frames 4400...
957
+ [2024-10-20 17:41:33,710][00556] Avg episode rewards: #0: 5.480, true rewards: #0: 4.480
958
+ [2024-10-20 17:41:33,712][00556] Avg episode reward: 5.480, avg true_objective: 4.480
959
+ [2024-10-20 17:41:53,429][00556] Replay video saved to /content/train_dir/default_experiment/replay.mp4!