Hamze-Hammami
commited on
Commit
•
0cfe806
1
Parent(s):
2d430a6
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .summary/0/events.out.tfevents.1720116548.3e4cb6262246 +3 -0
- README.md +56 -0
- checkpoint_p0/best_000000751_3076096_reward_24.514.pth +3 -0
- checkpoint_p0/checkpoint_000000491_2011136.pth +3 -0
- checkpoint_p0/checkpoint_000000978_4005888.pth +3 -0
- config.json +142 -0
- replay.mp4 +3 -0
- sf_log.txt +681 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
replay.mp4 filter=lfs diff=lfs merge=lfs -text
|
.summary/0/events.out.tfevents.1720116548.3e4cb6262246
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:880e00124a428f81a3e8b1c8785ab544957b8f885c6809041fe473eb51eddd50
|
3 |
+
size 207920
|
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: sample-factory
|
3 |
+
tags:
|
4 |
+
- deep-reinforcement-learning
|
5 |
+
- reinforcement-learning
|
6 |
+
- sample-factory
|
7 |
+
model-index:
|
8 |
+
- name: APPO
|
9 |
+
results:
|
10 |
+
- task:
|
11 |
+
type: reinforcement-learning
|
12 |
+
name: reinforcement-learning
|
13 |
+
dataset:
|
14 |
+
name: doom_health_gathering_supreme
|
15 |
+
type: doom_health_gathering_supreme
|
16 |
+
metrics:
|
17 |
+
- type: mean_reward
|
18 |
+
value: 7.36 +/- 5.32
|
19 |
+
name: mean_reward
|
20 |
+
verified: false
|
21 |
+
---
|
22 |
+
|
23 |
+
A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
|
24 |
+
|
25 |
+
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
|
26 |
+
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
|
27 |
+
|
28 |
+
|
29 |
+
## Downloading the model
|
30 |
+
|
31 |
+
After installing Sample-Factory, download the model with:
|
32 |
+
```
|
33 |
+
python -m sample_factory.huggingface.load_from_hub -r Hamze-Hammami/rl_course_vizdoom_health_gathering_supreme
|
34 |
+
```
|
35 |
+
|
36 |
+
|
37 |
+
## Using the model
|
38 |
+
|
39 |
+
To run the model after download, use the `enjoy` script corresponding to this environment:
|
40 |
+
```
|
41 |
+
python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
|
42 |
+
```
|
43 |
+
|
44 |
+
|
45 |
+
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
|
46 |
+
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
|
47 |
+
|
48 |
+
## Training with this model
|
49 |
+
|
50 |
+
To continue training with this model, use the `train` script corresponding to this environment:
|
51 |
+
```
|
52 |
+
python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
|
53 |
+
```
|
54 |
+
|
55 |
+
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
|
56 |
+
|
checkpoint_p0/best_000000751_3076096_reward_24.514.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b04cfdb14a772fede13f536b82e5ee0033d0b2ee5d5564479c456bdb1efdfd3c
|
3 |
+
size 34929051
|
checkpoint_p0/checkpoint_000000491_2011136.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d68b0c8fdd0c5726350712728fd43585cd83c443b750b6af9a87ad7cb4f24904
|
3 |
+
size 34929477
|
checkpoint_p0/checkpoint_000000978_4005888.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6c79cdcfb1b049ab5afbed74280f1b4e9cc338c3d448d38bb77d8f39b8ca83a
|
3 |
+
size 34929477
|
config.json
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"help": false,
|
3 |
+
"algo": "APPO",
|
4 |
+
"env": "doom_health_gathering_supreme",
|
5 |
+
"experiment": "default_experiment",
|
6 |
+
"train_dir": "/content/train_dir",
|
7 |
+
"restart_behavior": "resume",
|
8 |
+
"device": "gpu",
|
9 |
+
"seed": null,
|
10 |
+
"num_policies": 1,
|
11 |
+
"async_rl": true,
|
12 |
+
"serial_mode": false,
|
13 |
+
"batched_sampling": false,
|
14 |
+
"num_batches_to_accumulate": 2,
|
15 |
+
"worker_num_splits": 2,
|
16 |
+
"policy_workers_per_policy": 1,
|
17 |
+
"max_policy_lag": 1000,
|
18 |
+
"num_workers": 8,
|
19 |
+
"num_envs_per_worker": 4,
|
20 |
+
"batch_size": 1024,
|
21 |
+
"num_batches_per_epoch": 1,
|
22 |
+
"num_epochs": 1,
|
23 |
+
"rollout": 32,
|
24 |
+
"recurrence": 32,
|
25 |
+
"shuffle_minibatches": false,
|
26 |
+
"gamma": 0.99,
|
27 |
+
"reward_scale": 1.0,
|
28 |
+
"reward_clip": 1000.0,
|
29 |
+
"value_bootstrap": false,
|
30 |
+
"normalize_returns": true,
|
31 |
+
"exploration_loss_coeff": 0.001,
|
32 |
+
"value_loss_coeff": 0.5,
|
33 |
+
"kl_loss_coeff": 0.0,
|
34 |
+
"exploration_loss": "symmetric_kl",
|
35 |
+
"gae_lambda": 0.95,
|
36 |
+
"ppo_clip_ratio": 0.1,
|
37 |
+
"ppo_clip_value": 0.2,
|
38 |
+
"with_vtrace": false,
|
39 |
+
"vtrace_rho": 1.0,
|
40 |
+
"vtrace_c": 1.0,
|
41 |
+
"optimizer": "adam",
|
42 |
+
"adam_eps": 1e-06,
|
43 |
+
"adam_beta1": 0.9,
|
44 |
+
"adam_beta2": 0.999,
|
45 |
+
"max_grad_norm": 4.0,
|
46 |
+
"learning_rate": 0.0001,
|
47 |
+
"lr_schedule": "constant",
|
48 |
+
"lr_schedule_kl_threshold": 0.008,
|
49 |
+
"lr_adaptive_min": 1e-06,
|
50 |
+
"lr_adaptive_max": 0.01,
|
51 |
+
"obs_subtract_mean": 0.0,
|
52 |
+
"obs_scale": 255.0,
|
53 |
+
"normalize_input": true,
|
54 |
+
"normalize_input_keys": null,
|
55 |
+
"decorrelate_experience_max_seconds": 0,
|
56 |
+
"decorrelate_envs_on_one_worker": true,
|
57 |
+
"actor_worker_gpus": [],
|
58 |
+
"set_workers_cpu_affinity": true,
|
59 |
+
"force_envs_single_thread": false,
|
60 |
+
"default_niceness": 0,
|
61 |
+
"log_to_file": true,
|
62 |
+
"experiment_summaries_interval": 10,
|
63 |
+
"flush_summaries_interval": 30,
|
64 |
+
"stats_avg": 100,
|
65 |
+
"summaries_use_frameskip": true,
|
66 |
+
"heartbeat_interval": 20,
|
67 |
+
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 4000000,
|
69 |
+
"train_for_seconds": 10000000000,
|
70 |
+
"save_every_sec": 120,
|
71 |
+
"keep_checkpoints": 2,
|
72 |
+
"load_checkpoint_kind": "latest",
|
73 |
+
"save_milestones_sec": -1,
|
74 |
+
"save_best_every_sec": 5,
|
75 |
+
"save_best_metric": "reward",
|
76 |
+
"save_best_after": 100000,
|
77 |
+
"benchmark": false,
|
78 |
+
"encoder_mlp_layers": [
|
79 |
+
512,
|
80 |
+
512
|
81 |
+
],
|
82 |
+
"encoder_conv_architecture": "convnet_simple",
|
83 |
+
"encoder_conv_mlp_layers": [
|
84 |
+
512
|
85 |
+
],
|
86 |
+
"use_rnn": true,
|
87 |
+
"rnn_size": 512,
|
88 |
+
"rnn_type": "gru",
|
89 |
+
"rnn_num_layers": 1,
|
90 |
+
"decoder_mlp_layers": [],
|
91 |
+
"nonlinearity": "elu",
|
92 |
+
"policy_initialization": "orthogonal",
|
93 |
+
"policy_init_gain": 1.0,
|
94 |
+
"actor_critic_share_weights": true,
|
95 |
+
"adaptive_stddev": true,
|
96 |
+
"continuous_tanh_scale": 0.0,
|
97 |
+
"initial_stddev": 1.0,
|
98 |
+
"use_env_info_cache": false,
|
99 |
+
"env_gpu_actions": false,
|
100 |
+
"env_gpu_observations": true,
|
101 |
+
"env_frameskip": 4,
|
102 |
+
"env_framestack": 1,
|
103 |
+
"pixel_format": "CHW",
|
104 |
+
"use_record_episode_statistics": false,
|
105 |
+
"with_wandb": false,
|
106 |
+
"wandb_user": null,
|
107 |
+
"wandb_project": "sample_factory",
|
108 |
+
"wandb_group": null,
|
109 |
+
"wandb_job_type": "SF",
|
110 |
+
"wandb_tags": [],
|
111 |
+
"with_pbt": false,
|
112 |
+
"pbt_mix_policies_in_one_env": true,
|
113 |
+
"pbt_period_env_steps": 5000000,
|
114 |
+
"pbt_start_mutation": 20000000,
|
115 |
+
"pbt_replace_fraction": 0.3,
|
116 |
+
"pbt_mutation_rate": 0.15,
|
117 |
+
"pbt_replace_reward_gap": 0.1,
|
118 |
+
"pbt_replace_reward_gap_absolute": 1e-06,
|
119 |
+
"pbt_optimize_gamma": false,
|
120 |
+
"pbt_target_objective": "true_objective",
|
121 |
+
"pbt_perturb_min": 1.1,
|
122 |
+
"pbt_perturb_max": 1.5,
|
123 |
+
"num_agents": -1,
|
124 |
+
"num_humans": 0,
|
125 |
+
"num_bots": -1,
|
126 |
+
"start_bot_difficulty": null,
|
127 |
+
"timelimit": null,
|
128 |
+
"res_w": 128,
|
129 |
+
"res_h": 72,
|
130 |
+
"wide_aspect_ratio": false,
|
131 |
+
"eval_env_frameskip": 1,
|
132 |
+
"fps": 35,
|
133 |
+
"command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
|
134 |
+
"cli_args": {
|
135 |
+
"env": "doom_health_gathering_supreme",
|
136 |
+
"num_workers": 8,
|
137 |
+
"num_envs_per_worker": 4,
|
138 |
+
"train_for_env_steps": 4000000
|
139 |
+
},
|
140 |
+
"git_hash": "unknown",
|
141 |
+
"git_repo_name": "not a git repository"
|
142 |
+
}
|
replay.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8ced743639fc29ef7d78402b1408d51cd9830f3ea6ed03067e3e65045fad74e
|
3 |
+
size 14323611
|
sf_log.txt
ADDED
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-07-04 18:09:12,563][02159] Saving configuration to /content/train_dir/default_experiment/config.json...
|
2 |
+
[2024-07-04 18:09:12,565][02159] Rollout worker 0 uses device cpu
|
3 |
+
[2024-07-04 18:09:12,566][02159] Rollout worker 1 uses device cpu
|
4 |
+
[2024-07-04 18:09:12,567][02159] Rollout worker 2 uses device cpu
|
5 |
+
[2024-07-04 18:09:12,568][02159] Rollout worker 3 uses device cpu
|
6 |
+
[2024-07-04 18:09:12,571][02159] Rollout worker 4 uses device cpu
|
7 |
+
[2024-07-04 18:09:12,571][02159] Rollout worker 5 uses device cpu
|
8 |
+
[2024-07-04 18:09:12,573][02159] Rollout worker 6 uses device cpu
|
9 |
+
[2024-07-04 18:09:12,574][02159] Rollout worker 7 uses device cpu
|
10 |
+
[2024-07-04 18:09:12,672][02159] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
11 |
+
[2024-07-04 18:09:12,673][02159] InferenceWorker_p0-w0: min num requests: 2
|
12 |
+
[2024-07-04 18:09:12,706][02159] Starting all processes...
|
13 |
+
[2024-07-04 18:09:12,707][02159] Starting process learner_proc0
|
14 |
+
[2024-07-04 18:09:14,405][02159] Starting all processes...
|
15 |
+
[2024-07-04 18:09:14,411][02159] Starting process inference_proc0-0
|
16 |
+
[2024-07-04 18:09:14,411][02159] Starting process rollout_proc0
|
17 |
+
[2024-07-04 18:09:14,412][02159] Starting process rollout_proc1
|
18 |
+
[2024-07-04 18:09:14,413][02159] Starting process rollout_proc2
|
19 |
+
[2024-07-04 18:09:14,413][02159] Starting process rollout_proc3
|
20 |
+
[2024-07-04 18:09:14,414][02159] Starting process rollout_proc4
|
21 |
+
[2024-07-04 18:09:14,414][02159] Starting process rollout_proc5
|
22 |
+
[2024-07-04 18:09:14,416][02159] Starting process rollout_proc6
|
23 |
+
[2024-07-04 18:09:14,420][02159] Starting process rollout_proc7
|
24 |
+
[2024-07-04 18:09:17,137][04783] Worker 1 uses CPU cores [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
25 |
+
[2024-07-04 18:09:17,169][04785] Worker 2 uses CPU cores [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
26 |
+
[2024-07-04 18:09:17,196][04768] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
27 |
+
[2024-07-04 18:09:17,197][04768] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
28 |
+
[2024-07-04 18:09:17,215][04768] Num visible devices: 1
|
29 |
+
[2024-07-04 18:09:17,250][04768] Starting seed is not provided
|
30 |
+
[2024-07-04 18:09:17,251][04768] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
31 |
+
[2024-07-04 18:09:17,251][04768] Initializing actor-critic model on device cuda:0
|
32 |
+
[2024-07-04 18:09:17,252][04768] RunningMeanStd input shape: (3, 72, 128)
|
33 |
+
[2024-07-04 18:09:17,254][04768] RunningMeanStd input shape: (1,)
|
34 |
+
[2024-07-04 18:09:17,275][04768] ConvEncoder: input_channels=3
|
35 |
+
[2024-07-04 18:09:17,375][04786] Worker 3 uses CPU cores [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
36 |
+
[2024-07-04 18:09:17,477][04782] Worker 0 uses CPU cores [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
37 |
+
[2024-07-04 18:09:17,483][04784] Worker 4 uses CPU cores [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
38 |
+
[2024-07-04 18:09:17,532][04789] Worker 7 uses CPU cores [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
39 |
+
[2024-07-04 18:09:17,536][04781] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
40 |
+
[2024-07-04 18:09:17,536][04781] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
41 |
+
[2024-07-04 18:09:17,538][04788] Worker 6 uses CPU cores [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
42 |
+
[2024-07-04 18:09:17,552][04781] Num visible devices: 1
|
43 |
+
[2024-07-04 18:09:17,563][04768] Conv encoder output size: 512
|
44 |
+
[2024-07-04 18:09:17,564][04768] Policy head output size: 512
|
45 |
+
[2024-07-04 18:09:17,614][04787] Worker 5 uses CPU cores [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
46 |
+
[2024-07-04 18:09:17,618][04768] Created Actor Critic model with architecture:
|
47 |
+
[2024-07-04 18:09:17,618][04768] ActorCriticSharedWeights(
|
48 |
+
(obs_normalizer): ObservationNormalizer(
|
49 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
50 |
+
(running_mean_std): ModuleDict(
|
51 |
+
(obs): RunningMeanStdInPlace()
|
52 |
+
)
|
53 |
+
)
|
54 |
+
)
|
55 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
56 |
+
(encoder): VizdoomEncoder(
|
57 |
+
(basic_encoder): ConvEncoder(
|
58 |
+
(enc): RecursiveScriptModule(
|
59 |
+
original_name=ConvEncoderImpl
|
60 |
+
(conv_head): RecursiveScriptModule(
|
61 |
+
original_name=Sequential
|
62 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
63 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
64 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
65 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
66 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
67 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
68 |
+
)
|
69 |
+
(mlp_layers): RecursiveScriptModule(
|
70 |
+
original_name=Sequential
|
71 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
72 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
73 |
+
)
|
74 |
+
)
|
75 |
+
)
|
76 |
+
)
|
77 |
+
(core): ModelCoreRNN(
|
78 |
+
(core): GRU(512, 512)
|
79 |
+
)
|
80 |
+
(decoder): MlpDecoder(
|
81 |
+
(mlp): Identity()
|
82 |
+
)
|
83 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
84 |
+
(action_parameterization): ActionParameterizationDefault(
|
85 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
86 |
+
)
|
87 |
+
)
|
88 |
+
[2024-07-04 18:09:17,843][04768] Using optimizer <class 'torch.optim.adam.Adam'>
|
89 |
+
[2024-07-04 18:09:18,814][04768] No checkpoints found
|
90 |
+
[2024-07-04 18:09:18,815][04768] Did not load from checkpoint, starting from scratch!
|
91 |
+
[2024-07-04 18:09:18,815][04768] Initialized policy 0 weights for model version 0
|
92 |
+
[2024-07-04 18:09:18,817][04768] LearnerWorker_p0 finished initialization!
|
93 |
+
[2024-07-04 18:09:18,817][04768] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
94 |
+
[2024-07-04 18:09:18,904][04781] RunningMeanStd input shape: (3, 72, 128)
|
95 |
+
[2024-07-04 18:09:18,905][04781] RunningMeanStd input shape: (1,)
|
96 |
+
[2024-07-04 18:09:18,917][04781] ConvEncoder: input_channels=3
|
97 |
+
[2024-07-04 18:09:19,026][04781] Conv encoder output size: 512
|
98 |
+
[2024-07-04 18:09:19,027][04781] Policy head output size: 512
|
99 |
+
[2024-07-04 18:09:19,085][02159] Inference worker 0-0 is ready!
|
100 |
+
[2024-07-04 18:09:19,087][02159] All inference workers are ready! Signal rollout workers to start!
|
101 |
+
[2024-07-04 18:09:19,129][04786] Doom resolution: 160x120, resize resolution: (128, 72)
|
102 |
+
[2024-07-04 18:09:19,134][04789] Doom resolution: 160x120, resize resolution: (128, 72)
|
103 |
+
[2024-07-04 18:09:19,137][04784] Doom resolution: 160x120, resize resolution: (128, 72)
|
104 |
+
[2024-07-04 18:09:19,138][04785] Doom resolution: 160x120, resize resolution: (128, 72)
|
105 |
+
[2024-07-04 18:09:19,141][04783] Doom resolution: 160x120, resize resolution: (128, 72)
|
106 |
+
[2024-07-04 18:09:19,143][04782] Doom resolution: 160x120, resize resolution: (128, 72)
|
107 |
+
[2024-07-04 18:09:19,144][04788] Doom resolution: 160x120, resize resolution: (128, 72)
|
108 |
+
[2024-07-04 18:09:19,148][04787] Doom resolution: 160x120, resize resolution: (128, 72)
|
109 |
+
[2024-07-04 18:09:19,456][04785] Decorrelating experience for 0 frames...
|
110 |
+
[2024-07-04 18:09:19,456][04788] Decorrelating experience for 0 frames...
|
111 |
+
[2024-07-04 18:09:19,456][04789] Decorrelating experience for 0 frames...
|
112 |
+
[2024-07-04 18:09:19,456][04786] Decorrelating experience for 0 frames...
|
113 |
+
[2024-07-04 18:09:19,703][04783] Decorrelating experience for 0 frames...
|
114 |
+
[2024-07-04 18:09:19,710][04787] Decorrelating experience for 0 frames...
|
115 |
+
[2024-07-04 18:09:19,731][04788] Decorrelating experience for 32 frames...
|
116 |
+
[2024-07-04 18:09:19,733][04785] Decorrelating experience for 32 frames...
|
117 |
+
[2024-07-04 18:09:19,733][04789] Decorrelating experience for 32 frames...
|
118 |
+
[2024-07-04 18:09:19,740][04784] Decorrelating experience for 0 frames...
|
119 |
+
[2024-07-04 18:09:19,994][04784] Decorrelating experience for 32 frames...
|
120 |
+
[2024-07-04 18:09:20,029][04783] Decorrelating experience for 32 frames...
|
121 |
+
[2024-07-04 18:09:20,036][04782] Decorrelating experience for 0 frames...
|
122 |
+
[2024-07-04 18:09:20,038][04787] Decorrelating experience for 32 frames...
|
123 |
+
[2024-07-04 18:09:20,081][04785] Decorrelating experience for 64 frames...
|
124 |
+
[2024-07-04 18:09:20,106][04788] Decorrelating experience for 64 frames...
|
125 |
+
[2024-07-04 18:09:20,271][04786] Decorrelating experience for 32 frames...
|
126 |
+
[2024-07-04 18:09:20,282][04782] Decorrelating experience for 32 frames...
|
127 |
+
[2024-07-04 18:09:20,335][04789] Decorrelating experience for 64 frames...
|
128 |
+
[2024-07-04 18:09:20,377][04787] Decorrelating experience for 64 frames...
|
129 |
+
[2024-07-04 18:09:20,395][04784] Decorrelating experience for 64 frames...
|
130 |
+
[2024-07-04 18:09:20,407][04785] Decorrelating experience for 96 frames...
|
131 |
+
[2024-07-04 18:09:20,548][04783] Decorrelating experience for 64 frames...
|
132 |
+
[2024-07-04 18:09:20,627][04786] Decorrelating experience for 64 frames...
|
133 |
+
[2024-07-04 18:09:20,699][04787] Decorrelating experience for 96 frames...
|
134 |
+
[2024-07-04 18:09:20,704][04782] Decorrelating experience for 64 frames...
|
135 |
+
[2024-07-04 18:09:20,818][04784] Decorrelating experience for 96 frames...
|
136 |
+
[2024-07-04 18:09:20,866][04783] Decorrelating experience for 96 frames...
|
137 |
+
[2024-07-04 18:09:20,895][04789] Decorrelating experience for 96 frames...
|
138 |
+
[2024-07-04 18:09:20,979][04788] Decorrelating experience for 96 frames...
|
139 |
+
[2024-07-04 18:09:21,033][04782] Decorrelating experience for 96 frames...
|
140 |
+
[2024-07-04 18:09:21,119][04786] Decorrelating experience for 96 frames...
|
141 |
+
[2024-07-04 18:09:22,008][04768] Signal inference workers to stop experience collection...
|
142 |
+
[2024-07-04 18:09:22,013][04781] InferenceWorker_p0-w0: stopping experience collection
|
143 |
+
[2024-07-04 18:09:23,523][02159] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 2216. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
144 |
+
[2024-07-04 18:09:23,525][02159] Avg episode reward: [(0, '1.747')]
|
145 |
+
[2024-07-04 18:09:23,826][04768] Signal inference workers to resume experience collection...
|
146 |
+
[2024-07-04 18:09:23,827][04781] InferenceWorker_p0-w0: resuming experience collection
|
147 |
+
[2024-07-04 18:09:25,945][04781] Updated weights for policy 0, policy_version 10 (0.0194)
|
148 |
+
[2024-07-04 18:09:28,220][04781] Updated weights for policy 0, policy_version 20 (0.0013)
|
149 |
+
[2024-07-04 18:09:28,524][02159] Fps is (10 sec: 17202.3, 60 sec: 17202.3, 300 sec: 17202.3). Total num frames: 86016. Throughput: 0: 3623.8. Samples: 20336. Policy #0 lag: (min: 0.0, avg: 0.4, max: 1.0)
|
150 |
+
[2024-07-04 18:09:28,526][02159] Avg episode reward: [(0, '4.635')]
|
151 |
+
[2024-07-04 18:09:30,328][04781] Updated weights for policy 0, policy_version 30 (0.0013)
|
152 |
+
[2024-07-04 18:09:32,417][04781] Updated weights for policy 0, policy_version 40 (0.0013)
|
153 |
+
[2024-07-04 18:09:32,664][02159] Heartbeat connected on Batcher_0
|
154 |
+
[2024-07-04 18:09:32,668][02159] Heartbeat connected on LearnerWorker_p0
|
155 |
+
[2024-07-04 18:09:32,677][02159] Heartbeat connected on InferenceWorker_p0-w0
|
156 |
+
[2024-07-04 18:09:32,680][02159] Heartbeat connected on RolloutWorker_w0
|
157 |
+
[2024-07-04 18:09:32,683][02159] Heartbeat connected on RolloutWorker_w1
|
158 |
+
[2024-07-04 18:09:32,688][02159] Heartbeat connected on RolloutWorker_w2
|
159 |
+
[2024-07-04 18:09:32,691][02159] Heartbeat connected on RolloutWorker_w3
|
160 |
+
[2024-07-04 18:09:32,695][02159] Heartbeat connected on RolloutWorker_w4
|
161 |
+
[2024-07-04 18:09:32,698][02159] Heartbeat connected on RolloutWorker_w5
|
162 |
+
[2024-07-04 18:09:32,704][02159] Heartbeat connected on RolloutWorker_w6
|
163 |
+
[2024-07-04 18:09:32,711][02159] Heartbeat connected on RolloutWorker_w7
|
164 |
+
[2024-07-04 18:09:33,523][02159] Fps is (10 sec: 18432.0, 60 sec: 18432.0, 300 sec: 18432.0). Total num frames: 184320. Throughput: 0: 3273.0. Samples: 34946. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
165 |
+
[2024-07-04 18:09:33,526][02159] Avg episode reward: [(0, '4.434')]
|
166 |
+
[2024-07-04 18:09:33,528][04768] Saving new best policy, reward=4.434!
|
167 |
+
[2024-07-04 18:09:34,517][04781] Updated weights for policy 0, policy_version 50 (0.0012)
|
168 |
+
[2024-07-04 18:09:36,609][04781] Updated weights for policy 0, policy_version 60 (0.0012)
|
169 |
+
[2024-07-04 18:09:38,523][02159] Fps is (10 sec: 19661.1, 60 sec: 18841.4, 300 sec: 18841.4). Total num frames: 282624. Throughput: 0: 4144.0. Samples: 64376. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
170 |
+
[2024-07-04 18:09:38,526][02159] Avg episode reward: [(0, '4.774')]
|
171 |
+
[2024-07-04 18:09:38,533][04768] Saving new best policy, reward=4.774!
|
172 |
+
[2024-07-04 18:09:38,692][04781] Updated weights for policy 0, policy_version 70 (0.0013)
|
173 |
+
[2024-07-04 18:09:41,053][04781] Updated weights for policy 0, policy_version 80 (0.0013)
|
174 |
+
[2024-07-04 18:09:43,239][04781] Updated weights for policy 0, policy_version 90 (0.0013)
|
175 |
+
[2024-07-04 18:09:43,523][02159] Fps is (10 sec: 18841.6, 60 sec: 18636.8, 300 sec: 18636.8). Total num frames: 372736. Throughput: 0: 4497.4. Samples: 92164. Policy #0 lag: (min: 0.0, avg: 0.7, max: 1.0)
|
176 |
+
[2024-07-04 18:09:43,525][02159] Avg episode reward: [(0, '4.653')]
|
177 |
+
[2024-07-04 18:09:45,347][04781] Updated weights for policy 0, policy_version 100 (0.0013)
|
178 |
+
[2024-07-04 18:09:47,462][04781] Updated weights for policy 0, policy_version 110 (0.0012)
|
179 |
+
[2024-07-04 18:09:48,524][02159] Fps is (10 sec: 18841.6, 60 sec: 18841.5, 300 sec: 18841.5). Total num frames: 471040. Throughput: 0: 4185.4. Samples: 106852. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
|
180 |
+
[2024-07-04 18:09:48,525][02159] Avg episode reward: [(0, '4.497')]
|
181 |
+
[2024-07-04 18:09:49,551][04781] Updated weights for policy 0, policy_version 120 (0.0013)
|
182 |
+
[2024-07-04 18:09:51,638][04781] Updated weights for policy 0, policy_version 130 (0.0013)
|
183 |
+
[2024-07-04 18:09:53,523][02159] Fps is (10 sec: 19251.0, 60 sec: 18841.5, 300 sec: 18841.5). Total num frames: 565248. Throughput: 0: 4462.7. Samples: 136096. Policy #0 lag: (min: 0.0, avg: 0.8, max: 1.0)
|
184 |
+
[2024-07-04 18:09:53,526][02159] Avg episode reward: [(0, '4.762')]
|
185 |
+
[2024-07-04 18:09:53,791][04781] Updated weights for policy 0, policy_version 140 (0.0012)
|
186 |
+
[2024-07-04 18:09:56,018][04781] Updated weights for policy 0, policy_version 150 (0.0013)
|
187 |
+
[2024-07-04 18:09:58,139][04781] Updated weights for policy 0, policy_version 160 (0.0013)
|
188 |
+
[2024-07-04 18:09:58,523][02159] Fps is (10 sec: 18841.7, 60 sec: 18841.5, 300 sec: 18841.5). Total num frames: 659456. Throughput: 0: 4634.4. Samples: 164422. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
189 |
+
[2024-07-04 18:09:58,526][02159] Avg episode reward: [(0, '4.845')]
|
190 |
+
[2024-07-04 18:09:58,556][04768] Saving new best policy, reward=4.845!
|
191 |
+
[2024-07-04 18:10:00,250][04781] Updated weights for policy 0, policy_version 170 (0.0012)
|
192 |
+
[2024-07-04 18:10:02,341][04781] Updated weights for policy 0, policy_version 180 (0.0013)
|
193 |
+
[2024-07-04 18:10:03,523][02159] Fps is (10 sec: 19251.2, 60 sec: 18943.9, 300 sec: 18943.9). Total num frames: 757760. Throughput: 0: 4418.0. Samples: 178936. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
194 |
+
[2024-07-04 18:10:03,525][02159] Avg episode reward: [(0, '5.081')]
|
195 |
+
[2024-07-04 18:10:03,528][04768] Saving new best policy, reward=5.081!
|
196 |
+
[2024-07-04 18:10:04,433][04781] Updated weights for policy 0, policy_version 190 (0.0012)
|
197 |
+
[2024-07-04 18:10:06,526][04781] Updated weights for policy 0, policy_version 200 (0.0012)
|
198 |
+
[2024-07-04 18:10:08,523][02159] Fps is (10 sec: 19660.8, 60 sec: 19023.6, 300 sec: 19023.6). Total num frames: 856064. Throughput: 0: 4576.0. Samples: 208138. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
199 |
+
[2024-07-04 18:10:08,525][02159] Avg episode reward: [(0, '5.253')]
|
200 |
+
[2024-07-04 18:10:08,533][04768] Saving new best policy, reward=5.253!
|
201 |
+
[2024-07-04 18:10:08,725][04781] Updated weights for policy 0, policy_version 210 (0.0012)
|
202 |
+
[2024-07-04 18:10:10,926][04781] Updated weights for policy 0, policy_version 220 (0.0012)
|
203 |
+
[2024-07-04 18:10:13,010][04781] Updated weights for policy 0, policy_version 230 (0.0012)
|
204 |
+
[2024-07-04 18:10:13,523][02159] Fps is (10 sec: 19251.2, 60 sec: 19005.4, 300 sec: 19005.4). Total num frames: 950272. Throughput: 0: 4805.5. Samples: 236584. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
205 |
+
[2024-07-04 18:10:13,525][02159] Avg episode reward: [(0, '5.371')]
|
206 |
+
[2024-07-04 18:10:13,528][04768] Saving new best policy, reward=5.371!
|
207 |
+
[2024-07-04 18:10:15,118][04781] Updated weights for policy 0, policy_version 240 (0.0012)
|
208 |
+
[2024-07-04 18:10:17,219][04781] Updated weights for policy 0, policy_version 250 (0.0012)
|
209 |
+
[2024-07-04 18:10:18,523][02159] Fps is (10 sec: 18841.6, 60 sec: 18990.5, 300 sec: 18990.5). Total num frames: 1044480. Throughput: 0: 4807.7. Samples: 251294. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
210 |
+
[2024-07-04 18:10:18,526][02159] Avg episode reward: [(0, '5.741')]
|
211 |
+
[2024-07-04 18:10:18,535][04768] Saving new best policy, reward=5.741!
|
212 |
+
[2024-07-04 18:10:19,379][04781] Updated weights for policy 0, policy_version 260 (0.0012)
|
213 |
+
[2024-07-04 18:10:21,511][04781] Updated weights for policy 0, policy_version 270 (0.0013)
|
214 |
+
[2024-07-04 18:10:23,523][02159] Fps is (10 sec: 19251.2, 60 sec: 19046.4, 300 sec: 19046.4). Total num frames: 1142784. Throughput: 0: 4790.5. Samples: 279948. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
215 |
+
[2024-07-04 18:10:23,525][02159] Avg episode reward: [(0, '5.533')]
|
216 |
+
[2024-07-04 18:10:23,692][04781] Updated weights for policy 0, policy_version 280 (0.0012)
|
217 |
+
[2024-07-04 18:10:25,797][04781] Updated weights for policy 0, policy_version 290 (0.0012)
|
218 |
+
[2024-07-04 18:10:27,852][04781] Updated weights for policy 0, policy_version 300 (0.0013)
|
219 |
+
[2024-07-04 18:10:28,523][02159] Fps is (10 sec: 19660.8, 60 sec: 19251.3, 300 sec: 19093.6). Total num frames: 1241088. Throughput: 0: 4816.7. Samples: 308914. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
220 |
+
[2024-07-04 18:10:28,526][02159] Avg episode reward: [(0, '6.158')]
|
221 |
+
[2024-07-04 18:10:28,533][04768] Saving new best policy, reward=6.158!
|
222 |
+
[2024-07-04 18:10:29,948][04781] Updated weights for policy 0, policy_version 310 (0.0013)
|
223 |
+
[2024-07-04 18:10:32,065][04781] Updated weights for policy 0, policy_version 320 (0.0012)
|
224 |
+
[2024-07-04 18:10:33,523][02159] Fps is (10 sec: 19660.9, 60 sec: 19251.2, 300 sec: 19134.1). Total num frames: 1339392. Throughput: 0: 4816.4. Samples: 323590. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
225 |
+
[2024-07-04 18:10:33,526][02159] Avg episode reward: [(0, '6.725')]
|
226 |
+
[2024-07-04 18:10:33,528][04768] Saving new best policy, reward=6.725!
|
227 |
+
[2024-07-04 18:10:34,147][04781] Updated weights for policy 0, policy_version 330 (0.0012)
|
228 |
+
[2024-07-04 18:10:36,343][04781] Updated weights for policy 0, policy_version 340 (0.0012)
|
229 |
+
[2024-07-04 18:10:38,512][04781] Updated weights for policy 0, policy_version 350 (0.0012)
|
230 |
+
[2024-07-04 18:10:38,523][02159] Fps is (10 sec: 19251.2, 60 sec: 19182.9, 300 sec: 19114.6). Total num frames: 1433600. Throughput: 0: 4803.1. Samples: 352236. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
231 |
+
[2024-07-04 18:10:38,526][02159] Avg episode reward: [(0, '7.359')]
|
232 |
+
[2024-07-04 18:10:38,532][04768] Saving new best policy, reward=7.359!
|
233 |
+
[2024-07-04 18:10:40,619][04781] Updated weights for policy 0, policy_version 360 (0.0012)
|
234 |
+
[2024-07-04 18:10:42,724][04781] Updated weights for policy 0, policy_version 370 (0.0012)
|
235 |
+
[2024-07-04 18:10:43,523][02159] Fps is (10 sec: 18841.5, 60 sec: 19251.2, 300 sec: 19097.6). Total num frames: 1527808. Throughput: 0: 4823.8. Samples: 381494. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
236 |
+
[2024-07-04 18:10:43,526][02159] Avg episode reward: [(0, '7.402')]
|
237 |
+
[2024-07-04 18:10:43,529][04768] Saving new best policy, reward=7.402!
|
238 |
+
[2024-07-04 18:10:44,833][04781] Updated weights for policy 0, policy_version 380 (0.0013)
|
239 |
+
[2024-07-04 18:10:46,918][04781] Updated weights for policy 0, policy_version 390 (0.0012)
|
240 |
+
[2024-07-04 18:10:48,524][02159] Fps is (10 sec: 19251.1, 60 sec: 19251.2, 300 sec: 19130.7). Total num frames: 1626112. Throughput: 0: 4825.1. Samples: 396064. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
241 |
+
[2024-07-04 18:10:48,526][02159] Avg episode reward: [(0, '8.141')]
|
242 |
+
[2024-07-04 18:10:48,533][04768] Saving new best policy, reward=8.141!
|
243 |
+
[2024-07-04 18:10:49,038][04781] Updated weights for policy 0, policy_version 400 (0.0012)
|
244 |
+
[2024-07-04 18:10:51,201][04781] Updated weights for policy 0, policy_version 410 (0.0012)
|
245 |
+
[2024-07-04 18:10:53,334][04781] Updated weights for policy 0, policy_version 420 (0.0013)
|
246 |
+
[2024-07-04 18:10:53,523][02159] Fps is (10 sec: 19251.3, 60 sec: 19251.2, 300 sec: 19114.6). Total num frames: 1720320. Throughput: 0: 4812.4. Samples: 424696. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
247 |
+
[2024-07-04 18:10:53,525][02159] Avg episode reward: [(0, '9.569')]
|
248 |
+
[2024-07-04 18:10:53,538][04768] Saving new best policy, reward=9.569!
|
249 |
+
[2024-07-04 18:10:55,428][04781] Updated weights for policy 0, policy_version 430 (0.0012)
|
250 |
+
[2024-07-04 18:10:57,514][04781] Updated weights for policy 0, policy_version 440 (0.0012)
|
251 |
+
[2024-07-04 18:10:58,523][02159] Fps is (10 sec: 19251.4, 60 sec: 19319.5, 300 sec: 19143.4). Total num frames: 1818624. Throughput: 0: 4832.3. Samples: 454038. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
252 |
+
[2024-07-04 18:10:58,526][02159] Avg episode reward: [(0, '8.960')]
|
253 |
+
[2024-07-04 18:10:59,608][04781] Updated weights for policy 0, policy_version 450 (0.0012)
|
254 |
+
[2024-07-04 18:11:01,664][04781] Updated weights for policy 0, policy_version 460 (0.0012)
|
255 |
+
[2024-07-04 18:11:03,523][02159] Fps is (10 sec: 19660.7, 60 sec: 19319.5, 300 sec: 19169.3). Total num frames: 1916928. Throughput: 0: 4832.0. Samples: 468736. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
256 |
+
[2024-07-04 18:11:03,525][02159] Avg episode reward: [(0, '10.328')]
|
257 |
+
[2024-07-04 18:11:03,528][04768] Saving new best policy, reward=10.328!
|
258 |
+
[2024-07-04 18:11:03,833][04781] Updated weights for policy 0, policy_version 470 (0.0012)
|
259 |
+
[2024-07-04 18:11:06,003][04781] Updated weights for policy 0, policy_version 480 (0.0013)
|
260 |
+
[2024-07-04 18:11:08,155][04781] Updated weights for policy 0, policy_version 490 (0.0012)
|
261 |
+
[2024-07-04 18:11:08,523][02159] Fps is (10 sec: 19251.1, 60 sec: 19251.2, 300 sec: 19153.6). Total num frames: 2011136. Throughput: 0: 4829.3. Samples: 497268. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
262 |
+
[2024-07-04 18:11:08,526][02159] Avg episode reward: [(0, '13.755')]
|
263 |
+
[2024-07-04 18:11:08,534][04768] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000491_2011136.pth...
|
264 |
+
[2024-07-04 18:11:08,612][04768] Saving new best policy, reward=13.755!
|
265 |
+
[2024-07-04 18:11:10,263][04781] Updated weights for policy 0, policy_version 500 (0.0012)
|
266 |
+
[2024-07-04 18:11:12,331][04781] Updated weights for policy 0, policy_version 510 (0.0013)
|
267 |
+
[2024-07-04 18:11:13,523][02159] Fps is (10 sec: 19251.3, 60 sec: 19319.5, 300 sec: 19176.7). Total num frames: 2109440. Throughput: 0: 4839.0. Samples: 526670. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
268 |
+
[2024-07-04 18:11:13,526][02159] Avg episode reward: [(0, '14.175')]
|
269 |
+
[2024-07-04 18:11:13,528][04768] Saving new best policy, reward=14.175!
|
270 |
+
[2024-07-04 18:11:14,398][04781] Updated weights for policy 0, policy_version 520 (0.0013)
|
271 |
+
[2024-07-04 18:11:16,508][04781] Updated weights for policy 0, policy_version 530 (0.0012)
|
272 |
+
[2024-07-04 18:11:18,523][02159] Fps is (10 sec: 19660.8, 60 sec: 19387.7, 300 sec: 19197.8). Total num frames: 2207744. Throughput: 0: 4839.9. Samples: 541384. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
273 |
+
[2024-07-04 18:11:18,526][02159] Avg episode reward: [(0, '14.465')]
|
274 |
+
[2024-07-04 18:11:18,533][04768] Saving new best policy, reward=14.465!
|
275 |
+
[2024-07-04 18:11:18,732][04781] Updated weights for policy 0, policy_version 540 (0.0013)
|
276 |
+
[2024-07-04 18:11:20,903][04781] Updated weights for policy 0, policy_version 550 (0.0012)
|
277 |
+
[2024-07-04 18:11:23,008][04781] Updated weights for policy 0, policy_version 560 (0.0013)
|
278 |
+
[2024-07-04 18:11:23,523][02159] Fps is (10 sec: 19251.3, 60 sec: 19319.5, 300 sec: 19182.9). Total num frames: 2301952. Throughput: 0: 4828.5. Samples: 569520. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
279 |
+
[2024-07-04 18:11:23,527][02159] Avg episode reward: [(0, '18.393')]
|
280 |
+
[2024-07-04 18:11:23,529][04768] Saving new best policy, reward=18.393!
|
281 |
+
[2024-07-04 18:11:25,085][04781] Updated weights for policy 0, policy_version 570 (0.0012)
|
282 |
+
[2024-07-04 18:11:27,159][04781] Updated weights for policy 0, policy_version 580 (0.0013)
|
283 |
+
[2024-07-04 18:11:28,523][02159] Fps is (10 sec: 19251.3, 60 sec: 19319.5, 300 sec: 19202.0). Total num frames: 2400256. Throughput: 0: 4835.6. Samples: 599096. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
284 |
+
[2024-07-04 18:11:28,525][02159] Avg episode reward: [(0, '18.739')]
|
285 |
+
[2024-07-04 18:11:28,532][04768] Saving new best policy, reward=18.739!
|
286 |
+
[2024-07-04 18:11:29,245][04781] Updated weights for policy 0, policy_version 590 (0.0012)
|
287 |
+
[2024-07-04 18:11:31,371][04781] Updated weights for policy 0, policy_version 600 (0.0012)
|
288 |
+
[2024-07-04 18:11:33,523][02159] Fps is (10 sec: 19251.0, 60 sec: 19251.2, 300 sec: 19188.2). Total num frames: 2494464. Throughput: 0: 4837.0. Samples: 613728. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
289 |
+
[2024-07-04 18:11:33,526][02159] Avg episode reward: [(0, '18.011')]
|
290 |
+
[2024-07-04 18:11:33,578][04781] Updated weights for policy 0, policy_version 610 (0.0013)
|
291 |
+
[2024-07-04 18:11:35,675][04781] Updated weights for policy 0, policy_version 620 (0.0012)
|
292 |
+
[2024-07-04 18:11:37,718][04781] Updated weights for policy 0, policy_version 630 (0.0012)
|
293 |
+
[2024-07-04 18:11:38,524][02159] Fps is (10 sec: 19250.8, 60 sec: 19319.4, 300 sec: 19205.7). Total num frames: 2592768. Throughput: 0: 4840.0. Samples: 642498. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
294 |
+
[2024-07-04 18:11:38,526][02159] Avg episode reward: [(0, '17.751')]
|
295 |
+
[2024-07-04 18:11:39,798][04781] Updated weights for policy 0, policy_version 640 (0.0012)
|
296 |
+
[2024-07-04 18:11:41,916][04781] Updated weights for policy 0, policy_version 650 (0.0012)
|
297 |
+
[2024-07-04 18:11:43,523][02159] Fps is (10 sec: 19660.8, 60 sec: 19387.7, 300 sec: 19221.9). Total num frames: 2691072. Throughput: 0: 4841.8. Samples: 671918. Policy #0 lag: (min: 0.0, avg: 0.5, max: 1.0)
|
298 |
+
[2024-07-04 18:11:43,526][02159] Avg episode reward: [(0, '19.244')]
|
299 |
+
[2024-07-04 18:11:43,528][04768] Saving new best policy, reward=19.244!
|
300 |
+
[2024-07-04 18:11:44,011][04781] Updated weights for policy 0, policy_version 660 (0.0013)
|
301 |
+
[2024-07-04 18:11:46,231][04781] Updated weights for policy 0, policy_version 670 (0.0012)
|
302 |
+
[2024-07-04 18:11:48,382][04781] Updated weights for policy 0, policy_version 680 (0.0012)
|
303 |
+
[2024-07-04 18:11:48,523][02159] Fps is (10 sec: 19251.4, 60 sec: 19319.5, 300 sec: 19208.8). Total num frames: 2785280. Throughput: 0: 4827.6. Samples: 685980. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
304 |
+
[2024-07-04 18:11:48,526][02159] Avg episode reward: [(0, '18.019')]
|
305 |
+
[2024-07-04 18:11:50,460][04781] Updated weights for policy 0, policy_version 690 (0.0012)
|
306 |
+
[2024-07-04 18:11:52,522][04781] Updated weights for policy 0, policy_version 700 (0.0012)
|
307 |
+
[2024-07-04 18:11:53,523][02159] Fps is (10 sec: 19251.2, 60 sec: 19387.7, 300 sec: 19223.9). Total num frames: 2883584. Throughput: 0: 4844.4. Samples: 715264. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
308 |
+
[2024-07-04 18:11:53,526][02159] Avg episode reward: [(0, '22.378')]
|
309 |
+
[2024-07-04 18:11:53,529][04768] Saving new best policy, reward=22.378!
|
310 |
+
[2024-07-04 18:11:54,613][04781] Updated weights for policy 0, policy_version 710 (0.0012)
|
311 |
+
[2024-07-04 18:11:56,698][04781] Updated weights for policy 0, policy_version 720 (0.0012)
|
312 |
+
[2024-07-04 18:11:58,523][02159] Fps is (10 sec: 19660.8, 60 sec: 19387.7, 300 sec: 19238.0). Total num frames: 2981888. Throughput: 0: 4848.9. Samples: 744872. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
313 |
+
[2024-07-04 18:11:58,525][02159] Avg episode reward: [(0, '22.491')]
|
314 |
+
[2024-07-04 18:11:58,533][04768] Saving new best policy, reward=22.491!
|
315 |
+
[2024-07-04 18:11:58,838][04781] Updated weights for policy 0, policy_version 730 (0.0013)
|
316 |
+
[2024-07-04 18:12:01,053][04781] Updated weights for policy 0, policy_version 740 (0.0012)
|
317 |
+
[2024-07-04 18:12:03,191][04781] Updated weights for policy 0, policy_version 750 (0.0012)
|
318 |
+
[2024-07-04 18:12:03,523][02159] Fps is (10 sec: 19251.2, 60 sec: 19319.5, 300 sec: 19225.6). Total num frames: 3076096. Throughput: 0: 4830.9. Samples: 758774. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
319 |
+
[2024-07-04 18:12:03,526][02159] Avg episode reward: [(0, '24.514')]
|
320 |
+
[2024-07-04 18:12:03,528][04768] Saving new best policy, reward=24.514!
|
321 |
+
[2024-07-04 18:12:05,289][04781] Updated weights for policy 0, policy_version 760 (0.0013)
|
322 |
+
[2024-07-04 18:12:07,391][04781] Updated weights for policy 0, policy_version 770 (0.0013)
|
323 |
+
[2024-07-04 18:12:08,523][02159] Fps is (10 sec: 19251.2, 60 sec: 19387.7, 300 sec: 19238.8). Total num frames: 3174400. Throughput: 0: 4854.3. Samples: 787962. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
|
324 |
+
[2024-07-04 18:12:08,526][02159] Avg episode reward: [(0, '21.793')]
|
325 |
+
[2024-07-04 18:12:09,440][04781] Updated weights for policy 0, policy_version 780 (0.0011)
|
326 |
+
[2024-07-04 18:12:11,521][04781] Updated weights for policy 0, policy_version 790 (0.0013)
|
327 |
+
[2024-07-04 18:12:13,523][02159] Fps is (10 sec: 19660.8, 60 sec: 19387.7, 300 sec: 19251.2). Total num frames: 3272704. Throughput: 0: 4844.8. Samples: 817114. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
328 |
+
[2024-07-04 18:12:13,526][02159] Avg episode reward: [(0, '21.078')]
|
329 |
+
[2024-07-04 18:12:13,714][04781] Updated weights for policy 0, policy_version 800 (0.0013)
|
330 |
+
[2024-07-04 18:12:15,906][04781] Updated weights for policy 0, policy_version 810 (0.0012)
|
331 |
+
[2024-07-04 18:12:17,984][04781] Updated weights for policy 0, policy_version 820 (0.0012)
|
332 |
+
[2024-07-04 18:12:18,523][02159] Fps is (10 sec: 19251.1, 60 sec: 19319.5, 300 sec: 19239.5). Total num frames: 3366912. Throughput: 0: 4830.8. Samples: 831114. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
333 |
+
[2024-07-04 18:12:18,526][02159] Avg episode reward: [(0, '23.839')]
|
334 |
+
[2024-07-04 18:12:20,055][04781] Updated weights for policy 0, policy_version 830 (0.0012)
|
335 |
+
[2024-07-04 18:12:22,196][04781] Updated weights for policy 0, policy_version 840 (0.0012)
|
336 |
+
[2024-07-04 18:12:23,523][02159] Fps is (10 sec: 19251.3, 60 sec: 19387.7, 300 sec: 19251.2). Total num frames: 3465216. Throughput: 0: 4841.9. Samples: 860384. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
337 |
+
[2024-07-04 18:12:23,525][02159] Avg episode reward: [(0, '21.011')]
|
338 |
+
[2024-07-04 18:12:24,272][04781] Updated weights for policy 0, policy_version 850 (0.0012)
|
339 |
+
[2024-07-04 18:12:26,415][04781] Updated weights for policy 0, policy_version 860 (0.0012)
|
340 |
+
[2024-07-04 18:12:28,523][02159] Fps is (10 sec: 19251.3, 60 sec: 19319.5, 300 sec: 19240.1). Total num frames: 3559424. Throughput: 0: 4825.3. Samples: 889056. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
341 |
+
[2024-07-04 18:12:28,525][02159] Avg episode reward: [(0, '20.468')]
|
342 |
+
[2024-07-04 18:12:28,642][04781] Updated weights for policy 0, policy_version 870 (0.0012)
|
343 |
+
[2024-07-04 18:12:30,835][04781] Updated weights for policy 0, policy_version 880 (0.0012)
|
344 |
+
[2024-07-04 18:12:32,945][04781] Updated weights for policy 0, policy_version 890 (0.0012)
|
345 |
+
[2024-07-04 18:12:33,523][02159] Fps is (10 sec: 18841.5, 60 sec: 19319.5, 300 sec: 19229.6). Total num frames: 3653632. Throughput: 0: 4824.1. Samples: 903066. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
346 |
+
[2024-07-04 18:12:33,525][02159] Avg episode reward: [(0, '21.472')]
|
347 |
+
[2024-07-04 18:12:35,094][04781] Updated weights for policy 0, policy_version 900 (0.0013)
|
348 |
+
[2024-07-04 18:12:37,247][04781] Updated weights for policy 0, policy_version 910 (0.0013)
|
349 |
+
[2024-07-04 18:12:38,531][02159] Fps is (10 sec: 19236.8, 60 sec: 19317.1, 300 sec: 19240.0). Total num frames: 3751936. Throughput: 0: 4812.4. Samples: 931858. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
350 |
+
[2024-07-04 18:12:38,536][02159] Avg episode reward: [(0, '21.460')]
|
351 |
+
[2024-07-04 18:12:39,358][04781] Updated weights for policy 0, policy_version 920 (0.0012)
|
352 |
+
[2024-07-04 18:12:41,534][04781] Updated weights for policy 0, policy_version 930 (0.0013)
|
353 |
+
[2024-07-04 18:12:43,523][02159] Fps is (10 sec: 18841.6, 60 sec: 19182.9, 300 sec: 19210.2). Total num frames: 3842048. Throughput: 0: 4783.5. Samples: 960130. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
354 |
+
[2024-07-04 18:12:43,526][02159] Avg episode reward: [(0, '22.188')]
|
355 |
+
[2024-07-04 18:12:43,788][04781] Updated weights for policy 0, policy_version 940 (0.0013)
|
356 |
+
[2024-07-04 18:12:45,848][04781] Updated weights for policy 0, policy_version 950 (0.0012)
|
357 |
+
[2024-07-04 18:12:47,986][04781] Updated weights for policy 0, policy_version 960 (0.0012)
|
358 |
+
[2024-07-04 18:12:48,524][02159] Fps is (10 sec: 18855.5, 60 sec: 19251.2, 300 sec: 19221.2). Total num frames: 3940352. Throughput: 0: 4797.9. Samples: 974678. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
359 |
+
[2024-07-04 18:12:48,526][02159] Avg episode reward: [(0, '19.636')]
|
360 |
+
[2024-07-04 18:12:50,105][04781] Updated weights for policy 0, policy_version 970 (0.0013)
|
361 |
+
[2024-07-04 18:12:51,811][04768] Stopping Batcher_0...
|
362 |
+
[2024-07-04 18:12:51,811][02159] Component Batcher_0 stopped!
|
363 |
+
[2024-07-04 18:12:51,811][04768] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
364 |
+
[2024-07-04 18:12:51,813][04768] Loop batcher_evt_loop terminating...
|
365 |
+
[2024-07-04 18:12:51,829][04785] Stopping RolloutWorker_w2...
|
366 |
+
[2024-07-04 18:12:51,829][04785] Loop rollout_proc2_evt_loop terminating...
|
367 |
+
[2024-07-04 18:12:51,830][04789] Stopping RolloutWorker_w7...
|
368 |
+
[2024-07-04 18:12:51,830][04786] Stopping RolloutWorker_w3...
|
369 |
+
[2024-07-04 18:12:51,830][04789] Loop rollout_proc7_evt_loop terminating...
|
370 |
+
[2024-07-04 18:12:51,830][04782] Stopping RolloutWorker_w0...
|
371 |
+
[2024-07-04 18:12:51,830][04786] Loop rollout_proc3_evt_loop terminating...
|
372 |
+
[2024-07-04 18:12:51,831][04787] Stopping RolloutWorker_w5...
|
373 |
+
[2024-07-04 18:12:51,829][02159] Component RolloutWorker_w2 stopped!
|
374 |
+
[2024-07-04 18:12:51,831][04787] Loop rollout_proc5_evt_loop terminating...
|
375 |
+
[2024-07-04 18:12:51,832][04784] Stopping RolloutWorker_w4...
|
376 |
+
[2024-07-04 18:12:51,831][04782] Loop rollout_proc0_evt_loop terminating...
|
377 |
+
[2024-07-04 18:12:51,832][04784] Loop rollout_proc4_evt_loop terminating...
|
378 |
+
[2024-07-04 18:12:51,832][04781] Weights refcount: 2 0
|
379 |
+
[2024-07-04 18:12:51,832][04783] Stopping RolloutWorker_w1...
|
380 |
+
[2024-07-04 18:12:51,833][04788] Stopping RolloutWorker_w6...
|
381 |
+
[2024-07-04 18:12:51,833][04783] Loop rollout_proc1_evt_loop terminating...
|
382 |
+
[2024-07-04 18:12:51,833][04788] Loop rollout_proc6_evt_loop terminating...
|
383 |
+
[2024-07-04 18:12:51,832][02159] Component RolloutWorker_w7 stopped!
|
384 |
+
[2024-07-04 18:12:51,834][04781] Stopping InferenceWorker_p0-w0...
|
385 |
+
[2024-07-04 18:12:51,834][04781] Loop inference_proc0-0_evt_loop terminating...
|
386 |
+
[2024-07-04 18:12:51,834][02159] Component RolloutWorker_w3 stopped!
|
387 |
+
[2024-07-04 18:12:51,835][02159] Component RolloutWorker_w0 stopped!
|
388 |
+
[2024-07-04 18:12:51,837][02159] Component RolloutWorker_w5 stopped!
|
389 |
+
[2024-07-04 18:12:51,839][02159] Component RolloutWorker_w4 stopped!
|
390 |
+
[2024-07-04 18:12:51,840][02159] Component RolloutWorker_w1 stopped!
|
391 |
+
[2024-07-04 18:12:51,842][02159] Component RolloutWorker_w6 stopped!
|
392 |
+
[2024-07-04 18:12:51,844][02159] Component InferenceWorker_p0-w0 stopped!
|
393 |
+
[2024-07-04 18:12:51,891][04768] Saving /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
394 |
+
[2024-07-04 18:12:52,016][04768] Stopping LearnerWorker_p0...
|
395 |
+
[2024-07-04 18:12:52,017][04768] Loop learner_proc0_evt_loop terminating...
|
396 |
+
[2024-07-04 18:12:52,017][02159] Component LearnerWorker_p0 stopped!
|
397 |
+
[2024-07-04 18:12:52,019][02159] Waiting for process learner_proc0 to stop...
|
398 |
+
[2024-07-04 18:12:52,805][02159] Waiting for process inference_proc0-0 to join...
|
399 |
+
[2024-07-04 18:12:52,807][02159] Waiting for process rollout_proc0 to join...
|
400 |
+
[2024-07-04 18:12:52,809][02159] Waiting for process rollout_proc1 to join...
|
401 |
+
[2024-07-04 18:12:52,811][02159] Waiting for process rollout_proc2 to join...
|
402 |
+
[2024-07-04 18:12:52,813][02159] Waiting for process rollout_proc3 to join...
|
403 |
+
[2024-07-04 18:12:52,815][02159] Waiting for process rollout_proc4 to join...
|
404 |
+
[2024-07-04 18:12:52,817][02159] Waiting for process rollout_proc5 to join...
|
405 |
+
[2024-07-04 18:12:52,818][02159] Waiting for process rollout_proc6 to join...
|
406 |
+
[2024-07-04 18:12:52,820][02159] Waiting for process rollout_proc7 to join...
|
407 |
+
[2024-07-04 18:12:52,822][02159] Batcher 0 profile tree view:
|
408 |
+
batching: 16.0773, releasing_batches: 0.0238
|
409 |
+
[2024-07-04 18:12:52,823][02159] InferenceWorker_p0-w0 profile tree view:
|
410 |
+
wait_policy: 0.0001
|
411 |
+
wait_policy_total: 3.8893
|
412 |
+
update_model: 3.4935
|
413 |
+
weight_update: 0.0013
|
414 |
+
one_step: 0.0030
|
415 |
+
handle_policy_step: 194.0125
|
416 |
+
deserialize: 7.8810, stack: 1.2906, obs_to_device_normalize: 45.2995, forward: 95.6950, send_messages: 13.1087
|
417 |
+
prepare_outputs: 22.0335
|
418 |
+
to_cpu: 13.1896
|
419 |
+
[2024-07-04 18:12:52,824][02159] Learner 0 profile tree view:
|
420 |
+
misc: 0.0049, prepare_batch: 6.6491
|
421 |
+
train: 18.5372
|
422 |
+
epoch_init: 0.0056, minibatch_init: 0.0063, losses_postprocess: 0.4912, kl_divergence: 0.3722, after_optimizer: 2.0643
|
423 |
+
calculate_losses: 8.6103
|
424 |
+
losses_init: 0.0036, forward_head: 0.6840, bptt_initial: 4.5650, tail: 0.6411, advantages_returns: 0.1587, losses: 1.1995
|
425 |
+
bptt: 1.1848
|
426 |
+
bptt_forward_core: 1.1282
|
427 |
+
update: 6.6508
|
428 |
+
clip: 0.7339
|
429 |
+
[2024-07-04 18:12:52,826][02159] RolloutWorker_w0 profile tree view:
|
430 |
+
wait_for_trajectories: 0.1520, enqueue_policy_requests: 7.1947, env_step: 135.8228, overhead: 6.3251, complete_rollouts: 0.2324
|
431 |
+
save_policy_outputs: 8.8245
|
432 |
+
split_output_tensors: 3.5413
|
433 |
+
[2024-07-04 18:12:52,828][02159] RolloutWorker_w7 profile tree view:
|
434 |
+
wait_for_trajectories: 0.1523, enqueue_policy_requests: 7.1059, env_step: 135.8799, overhead: 6.2706, complete_rollouts: 0.2327
|
435 |
+
save_policy_outputs: 8.8091
|
436 |
+
split_output_tensors: 3.5261
|
437 |
+
[2024-07-04 18:12:52,829][02159] Loop Runner_EvtLoop terminating...
|
438 |
+
[2024-07-04 18:12:52,831][02159] Runner profile tree view:
|
439 |
+
main_loop: 220.1254
|
440 |
+
[2024-07-04 18:12:52,832][02159] Collected {0: 4005888}, FPS: 18198.2
|
441 |
+
[2024-07-04 18:15:16,827][02159] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
442 |
+
[2024-07-04 18:15:16,829][02159] Overriding arg 'num_workers' with value 1 passed from command line
|
443 |
+
[2024-07-04 18:15:16,830][02159] Adding new argument 'no_render'=True that is not in the saved config file!
|
444 |
+
[2024-07-04 18:15:16,831][02159] Adding new argument 'save_video'=True that is not in the saved config file!
|
445 |
+
[2024-07-04 18:15:16,832][02159] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
446 |
+
[2024-07-04 18:15:16,835][02159] Adding new argument 'video_name'=None that is not in the saved config file!
|
447 |
+
[2024-07-04 18:15:16,836][02159] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
448 |
+
[2024-07-04 18:15:16,837][02159] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
449 |
+
[2024-07-04 18:15:16,838][02159] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
450 |
+
[2024-07-04 18:15:16,839][02159] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
451 |
+
[2024-07-04 18:15:16,841][02159] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
452 |
+
[2024-07-04 18:15:16,842][02159] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
453 |
+
[2024-07-04 18:15:16,843][02159] Adding new argument 'train_script'=None that is not in the saved config file!
|
454 |
+
[2024-07-04 18:15:16,845][02159] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
455 |
+
[2024-07-04 18:15:16,845][02159] Using frameskip 1 and render_action_repeat=4 for evaluation
|
456 |
+
[2024-07-04 18:15:16,874][02159] Doom resolution: 160x120, resize resolution: (128, 72)
|
457 |
+
[2024-07-04 18:15:16,877][02159] RunningMeanStd input shape: (3, 72, 128)
|
458 |
+
[2024-07-04 18:15:16,880][02159] RunningMeanStd input shape: (1,)
|
459 |
+
[2024-07-04 18:15:16,895][02159] ConvEncoder: input_channels=3
|
460 |
+
[2024-07-04 18:15:17,010][02159] Conv encoder output size: 512
|
461 |
+
[2024-07-04 18:15:17,013][02159] Policy head output size: 512
|
462 |
+
[2024-07-04 18:15:17,168][02159] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
463 |
+
[2024-07-04 18:15:17,926][02159] Num frames 100...
|
464 |
+
[2024-07-04 18:15:18,058][02159] Num frames 200...
|
465 |
+
[2024-07-04 18:15:18,211][02159] Num frames 300...
|
466 |
+
[2024-07-04 18:15:18,341][02159] Num frames 400...
|
467 |
+
[2024-07-04 18:15:18,418][02159] Avg episode rewards: #0: 5.160, true rewards: #0: 4.160
|
468 |
+
[2024-07-04 18:15:18,420][02159] Avg episode reward: 5.160, avg true_objective: 4.160
|
469 |
+
[2024-07-04 18:15:18,528][02159] Num frames 500...
|
470 |
+
[2024-07-04 18:15:18,655][02159] Num frames 600...
|
471 |
+
[2024-07-04 18:15:18,779][02159] Num frames 700...
|
472 |
+
[2024-07-04 18:15:18,907][02159] Num frames 800...
|
473 |
+
[2024-07-04 18:15:19,032][02159] Num frames 900...
|
474 |
+
[2024-07-04 18:15:19,161][02159] Avg episode rewards: #0: 7.300, true rewards: #0: 4.800
|
475 |
+
[2024-07-04 18:15:19,162][02159] Avg episode reward: 7.300, avg true_objective: 4.800
|
476 |
+
[2024-07-04 18:15:19,216][02159] Num frames 1000...
|
477 |
+
[2024-07-04 18:15:19,342][02159] Num frames 1100...
|
478 |
+
[2024-07-04 18:15:19,470][02159] Num frames 1200...
|
479 |
+
[2024-07-04 18:15:19,599][02159] Num frames 1300...
|
480 |
+
[2024-07-04 18:15:19,728][02159] Num frames 1400...
|
481 |
+
[2024-07-04 18:15:19,857][02159] Num frames 1500...
|
482 |
+
[2024-07-04 18:15:19,984][02159] Num frames 1600...
|
483 |
+
[2024-07-04 18:15:20,112][02159] Num frames 1700...
|
484 |
+
[2024-07-04 18:15:20,238][02159] Num frames 1800...
|
485 |
+
[2024-07-04 18:15:20,365][02159] Num frames 1900...
|
486 |
+
[2024-07-04 18:15:20,493][02159] Num frames 2000...
|
487 |
+
[2024-07-04 18:15:20,622][02159] Num frames 2100...
|
488 |
+
[2024-07-04 18:15:20,751][02159] Num frames 2200...
|
489 |
+
[2024-07-04 18:15:20,879][02159] Num frames 2300...
|
490 |
+
[2024-07-04 18:15:20,939][02159] Avg episode rewards: #0: 14.347, true rewards: #0: 7.680
|
491 |
+
[2024-07-04 18:15:20,940][02159] Avg episode reward: 14.347, avg true_objective: 7.680
|
492 |
+
[2024-07-04 18:15:21,061][02159] Num frames 2400...
|
493 |
+
[2024-07-04 18:15:21,188][02159] Num frames 2500...
|
494 |
+
[2024-07-04 18:15:21,316][02159] Num frames 2600...
|
495 |
+
[2024-07-04 18:15:21,442][02159] Num frames 2700...
|
496 |
+
[2024-07-04 18:15:21,571][02159] Num frames 2800...
|
497 |
+
[2024-07-04 18:15:21,701][02159] Num frames 2900...
|
498 |
+
[2024-07-04 18:15:21,828][02159] Num frames 3000...
|
499 |
+
[2024-07-04 18:15:21,954][02159] Num frames 3100...
|
500 |
+
[2024-07-04 18:15:22,089][02159] Num frames 3200...
|
501 |
+
[2024-07-04 18:15:22,189][02159] Avg episode rewards: #0: 14.830, true rewards: #0: 8.080
|
502 |
+
[2024-07-04 18:15:22,190][02159] Avg episode reward: 14.830, avg true_objective: 8.080
|
503 |
+
[2024-07-04 18:15:22,281][02159] Num frames 3300...
|
504 |
+
[2024-07-04 18:15:22,410][02159] Num frames 3400...
|
505 |
+
[2024-07-04 18:15:22,538][02159] Num frames 3500...
|
506 |
+
[2024-07-04 18:15:22,666][02159] Num frames 3600...
|
507 |
+
[2024-07-04 18:15:22,795][02159] Num frames 3700...
|
508 |
+
[2024-07-04 18:15:22,927][02159] Num frames 3800...
|
509 |
+
[2024-07-04 18:15:23,057][02159] Num frames 3900...
|
510 |
+
[2024-07-04 18:15:23,190][02159] Num frames 4000...
|
511 |
+
[2024-07-04 18:15:23,313][02159] Avg episode rewards: #0: 15.902, true rewards: #0: 8.102
|
512 |
+
[2024-07-04 18:15:23,314][02159] Avg episode reward: 15.902, avg true_objective: 8.102
|
513 |
+
[2024-07-04 18:15:23,385][02159] Num frames 4100...
|
514 |
+
[2024-07-04 18:15:23,521][02159] Num frames 4200...
|
515 |
+
[2024-07-04 18:15:23,657][02159] Num frames 4300...
|
516 |
+
[2024-07-04 18:15:23,791][02159] Num frames 4400...
|
517 |
+
[2024-07-04 18:15:23,927][02159] Num frames 4500...
|
518 |
+
[2024-07-04 18:15:24,061][02159] Num frames 4600...
|
519 |
+
[2024-07-04 18:15:24,195][02159] Num frames 4700...
|
520 |
+
[2024-07-04 18:15:24,328][02159] Num frames 4800...
|
521 |
+
[2024-07-04 18:15:24,461][02159] Num frames 4900...
|
522 |
+
[2024-07-04 18:15:24,595][02159] Num frames 5000...
|
523 |
+
[2024-07-04 18:15:24,783][02159] Avg episode rewards: #0: 16.792, true rewards: #0: 8.458
|
524 |
+
[2024-07-04 18:15:24,785][02159] Avg episode reward: 16.792, avg true_objective: 8.458
|
525 |
+
[2024-07-04 18:15:24,823][02159] Num frames 5100...
|
526 |
+
[2024-07-04 18:15:24,948][02159] Num frames 5200...
|
527 |
+
[2024-07-04 18:15:25,074][02159] Num frames 5300...
|
528 |
+
[2024-07-04 18:15:25,200][02159] Num frames 5400...
|
529 |
+
[2024-07-04 18:15:25,325][02159] Num frames 5500...
|
530 |
+
[2024-07-04 18:15:25,455][02159] Num frames 5600...
|
531 |
+
[2024-07-04 18:15:25,583][02159] Num frames 5700...
|
532 |
+
[2024-07-04 18:15:25,710][02159] Num frames 5800...
|
533 |
+
[2024-07-04 18:15:25,860][02159] Avg episode rewards: #0: 16.393, true rewards: #0: 8.393
|
534 |
+
[2024-07-04 18:15:25,862][02159] Avg episode reward: 16.393, avg true_objective: 8.393
|
535 |
+
[2024-07-04 18:15:25,896][02159] Num frames 5900...
|
536 |
+
[2024-07-04 18:15:26,024][02159] Num frames 6000...
|
537 |
+
[2024-07-04 18:15:26,152][02159] Num frames 6100...
|
538 |
+
[2024-07-04 18:15:26,281][02159] Num frames 6200...
|
539 |
+
[2024-07-04 18:15:26,408][02159] Num frames 6300...
|
540 |
+
[2024-07-04 18:15:26,533][02159] Num frames 6400...
|
541 |
+
[2024-07-04 18:15:26,659][02159] Num frames 6500...
|
542 |
+
[2024-07-04 18:15:26,786][02159] Num frames 6600...
|
543 |
+
[2024-07-04 18:15:26,879][02159] Avg episode rewards: #0: 16.161, true rewards: #0: 8.286
|
544 |
+
[2024-07-04 18:15:26,880][02159] Avg episode reward: 16.161, avg true_objective: 8.286
|
545 |
+
[2024-07-04 18:15:26,969][02159] Num frames 6700...
|
546 |
+
[2024-07-04 18:15:27,096][02159] Num frames 6800...
|
547 |
+
[2024-07-04 18:15:27,227][02159] Num frames 6900...
|
548 |
+
[2024-07-04 18:15:27,355][02159] Num frames 7000...
|
549 |
+
[2024-07-04 18:15:27,480][02159] Num frames 7100...
|
550 |
+
[2024-07-04 18:15:27,607][02159] Num frames 7200...
|
551 |
+
[2024-07-04 18:15:27,732][02159] Num frames 7300...
|
552 |
+
[2024-07-04 18:15:27,790][02159] Avg episode rewards: #0: 15.779, true rewards: #0: 8.112
|
553 |
+
[2024-07-04 18:15:27,791][02159] Avg episode reward: 15.779, avg true_objective: 8.112
|
554 |
+
[2024-07-04 18:15:27,916][02159] Num frames 7400...
|
555 |
+
[2024-07-04 18:15:28,044][02159] Num frames 7500...
|
556 |
+
[2024-07-04 18:15:28,168][02159] Num frames 7600...
|
557 |
+
[2024-07-04 18:15:28,298][02159] Num frames 7700...
|
558 |
+
[2024-07-04 18:15:28,427][02159] Num frames 7800...
|
559 |
+
[2024-07-04 18:15:28,559][02159] Num frames 7900...
|
560 |
+
[2024-07-04 18:15:28,686][02159] Num frames 8000...
|
561 |
+
[2024-07-04 18:15:28,816][02159] Num frames 8100...
|
562 |
+
[2024-07-04 18:15:28,946][02159] Num frames 8200...
|
563 |
+
[2024-07-04 18:15:29,073][02159] Num frames 8300...
|
564 |
+
[2024-07-04 18:15:29,201][02159] Avg episode rewards: #0: 16.757, true rewards: #0: 8.357
|
565 |
+
[2024-07-04 18:15:29,202][02159] Avg episode reward: 16.757, avg true_objective: 8.357
|
566 |
+
[2024-07-04 18:15:49,147][02159] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|
567 |
+
[2024-07-04 18:23:54,057][02159] Loading existing experiment configuration from /content/train_dir/default_experiment/config.json
|
568 |
+
[2024-07-04 18:23:54,059][02159] Overriding arg 'num_workers' with value 1 passed from command line
|
569 |
+
[2024-07-04 18:23:54,059][02159] Adding new argument 'no_render'=True that is not in the saved config file!
|
570 |
+
[2024-07-04 18:23:54,061][02159] Adding new argument 'save_video'=True that is not in the saved config file!
|
571 |
+
[2024-07-04 18:23:54,062][02159] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
572 |
+
[2024-07-04 18:23:54,064][02159] Adding new argument 'video_name'=None that is not in the saved config file!
|
573 |
+
[2024-07-04 18:23:54,066][02159] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
574 |
+
[2024-07-04 18:23:54,067][02159] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
575 |
+
[2024-07-04 18:23:54,069][02159] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
576 |
+
[2024-07-04 18:23:54,070][02159] Adding new argument 'hf_repository'='Hamze-Hammami/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
577 |
+
[2024-07-04 18:23:54,071][02159] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
578 |
+
[2024-07-04 18:23:54,073][02159] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
579 |
+
[2024-07-04 18:23:54,075][02159] Adding new argument 'train_script'=None that is not in the saved config file!
|
580 |
+
[2024-07-04 18:23:54,076][02159] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
581 |
+
[2024-07-04 18:23:54,078][02159] Using frameskip 1 and render_action_repeat=4 for evaluation
|
582 |
+
[2024-07-04 18:23:54,103][02159] RunningMeanStd input shape: (3, 72, 128)
|
583 |
+
[2024-07-04 18:23:54,105][02159] RunningMeanStd input shape: (1,)
|
584 |
+
[2024-07-04 18:23:54,117][02159] ConvEncoder: input_channels=3
|
585 |
+
[2024-07-04 18:23:54,156][02159] Conv encoder output size: 512
|
586 |
+
[2024-07-04 18:23:54,157][02159] Policy head output size: 512
|
587 |
+
[2024-07-04 18:23:54,176][02159] Loading state from checkpoint /content/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
588 |
+
[2024-07-04 18:23:54,591][02159] Num frames 100...
|
589 |
+
[2024-07-04 18:23:54,722][02159] Num frames 200...
|
590 |
+
[2024-07-04 18:23:54,849][02159] Num frames 300...
|
591 |
+
[2024-07-04 18:23:55,017][02159] Avg episode rewards: #0: 8.900, true rewards: #0: 3.900
|
592 |
+
[2024-07-04 18:23:55,018][02159] Avg episode reward: 8.900, avg true_objective: 3.900
|
593 |
+
[2024-07-04 18:23:55,033][02159] Num frames 400...
|
594 |
+
[2024-07-04 18:23:55,163][02159] Num frames 500...
|
595 |
+
[2024-07-04 18:23:55,291][02159] Num frames 600...
|
596 |
+
[2024-07-04 18:23:55,420][02159] Num frames 700...
|
597 |
+
[2024-07-04 18:23:55,556][02159] Num frames 800...
|
598 |
+
[2024-07-04 18:23:55,619][02159] Avg episode rewards: #0: 10.530, true rewards: #0: 4.030
|
599 |
+
[2024-07-04 18:23:55,621][02159] Avg episode reward: 10.530, avg true_objective: 4.030
|
600 |
+
[2024-07-04 18:23:55,742][02159] Num frames 900...
|
601 |
+
[2024-07-04 18:23:55,873][02159] Num frames 1000...
|
602 |
+
[2024-07-04 18:23:56,002][02159] Num frames 1100...
|
603 |
+
[2024-07-04 18:23:56,175][02159] Avg episode rewards: #0: 9.300, true rewards: #0: 3.967
|
604 |
+
[2024-07-04 18:23:56,177][02159] Avg episode reward: 9.300, avg true_objective: 3.967
|
605 |
+
[2024-07-04 18:23:56,193][02159] Num frames 1200...
|
606 |
+
[2024-07-04 18:23:56,328][02159] Num frames 1300...
|
607 |
+
[2024-07-04 18:23:56,463][02159] Num frames 1400...
|
608 |
+
[2024-07-04 18:23:56,596][02159] Num frames 1500...
|
609 |
+
[2024-07-04 18:23:56,711][02159] Avg episode rewards: #0: 8.868, true rewards: #0: 3.867
|
610 |
+
[2024-07-04 18:23:56,712][02159] Avg episode reward: 8.868, avg true_objective: 3.867
|
611 |
+
[2024-07-04 18:23:56,787][02159] Num frames 1600...
|
612 |
+
[2024-07-04 18:23:56,924][02159] Num frames 1700...
|
613 |
+
[2024-07-04 18:23:57,059][02159] Num frames 1800...
|
614 |
+
[2024-07-04 18:23:57,194][02159] Num frames 1900...
|
615 |
+
[2024-07-04 18:23:57,329][02159] Num frames 2000...
|
616 |
+
[2024-07-04 18:23:57,458][02159] Avg episode rewards: #0: 8.910, true rewards: #0: 4.110
|
617 |
+
[2024-07-04 18:23:57,460][02159] Avg episode reward: 8.910, avg true_objective: 4.110
|
618 |
+
[2024-07-04 18:23:57,521][02159] Num frames 2100...
|
619 |
+
[2024-07-04 18:23:57,657][02159] Num frames 2200...
|
620 |
+
[2024-07-04 18:23:57,793][02159] Num frames 2300...
|
621 |
+
[2024-07-04 18:23:57,923][02159] Num frames 2400...
|
622 |
+
[2024-07-04 18:23:58,052][02159] Num frames 2500...
|
623 |
+
[2024-07-04 18:23:58,178][02159] Num frames 2600...
|
624 |
+
[2024-07-04 18:23:58,305][02159] Num frames 2700...
|
625 |
+
[2024-07-04 18:23:58,424][02159] Avg episode rewards: #0: 9.418, true rewards: #0: 4.585
|
626 |
+
[2024-07-04 18:23:58,426][02159] Avg episode reward: 9.418, avg true_objective: 4.585
|
627 |
+
[2024-07-04 18:23:58,489][02159] Num frames 2800...
|
628 |
+
[2024-07-04 18:23:58,616][02159] Num frames 2900...
|
629 |
+
[2024-07-04 18:23:58,743][02159] Num frames 3000...
|
630 |
+
[2024-07-04 18:23:58,879][02159] Num frames 3100...
|
631 |
+
[2024-07-04 18:23:59,008][02159] Num frames 3200...
|
632 |
+
[2024-07-04 18:23:59,139][02159] Num frames 3300...
|
633 |
+
[2024-07-04 18:23:59,266][02159] Num frames 3400...
|
634 |
+
[2024-07-04 18:23:59,393][02159] Num frames 3500...
|
635 |
+
[2024-07-04 18:23:59,452][02159] Avg episode rewards: #0: 10.576, true rewards: #0: 5.004
|
636 |
+
[2024-07-04 18:23:59,454][02159] Avg episode reward: 10.576, avg true_objective: 5.004
|
637 |
+
[2024-07-04 18:23:59,580][02159] Num frames 3600...
|
638 |
+
[2024-07-04 18:23:59,706][02159] Num frames 3700...
|
639 |
+
[2024-07-04 18:23:59,834][02159] Num frames 3800...
|
640 |
+
[2024-07-04 18:23:59,966][02159] Avg episode rewards: #0: 9.825, true rewards: #0: 4.825
|
641 |
+
[2024-07-04 18:23:59,968][02159] Avg episode reward: 9.825, avg true_objective: 4.825
|
642 |
+
[2024-07-04 18:24:00,022][02159] Num frames 3900...
|
643 |
+
[2024-07-04 18:24:00,147][02159] Num frames 4000...
|
644 |
+
[2024-07-04 18:24:00,274][02159] Num frames 4100...
|
645 |
+
[2024-07-04 18:24:00,404][02159] Num frames 4200...
|
646 |
+
[2024-07-04 18:24:00,533][02159] Num frames 4300...
|
647 |
+
[2024-07-04 18:24:00,659][02159] Num frames 4400...
|
648 |
+
[2024-07-04 18:24:00,786][02159] Num frames 4500...
|
649 |
+
[2024-07-04 18:24:00,912][02159] Num frames 4600...
|
650 |
+
[2024-07-04 18:24:01,041][02159] Num frames 4700...
|
651 |
+
[2024-07-04 18:24:01,168][02159] Num frames 4800...
|
652 |
+
[2024-07-04 18:24:01,296][02159] Num frames 4900...
|
653 |
+
[2024-07-04 18:24:01,425][02159] Num frames 5000...
|
654 |
+
[2024-07-04 18:24:01,550][02159] Num frames 5100...
|
655 |
+
[2024-07-04 18:24:01,679][02159] Num frames 5200...
|
656 |
+
[2024-07-04 18:24:01,806][02159] Num frames 5300...
|
657 |
+
[2024-07-04 18:24:01,935][02159] Num frames 5400...
|
658 |
+
[2024-07-04 18:24:02,005][02159] Avg episode rewards: #0: 13.123, true rewards: #0: 6.012
|
659 |
+
[2024-07-04 18:24:02,007][02159] Avg episode reward: 13.123, avg true_objective: 6.012
|
660 |
+
[2024-07-04 18:24:02,124][02159] Num frames 5500...
|
661 |
+
[2024-07-04 18:24:02,255][02159] Num frames 5600...
|
662 |
+
[2024-07-04 18:24:02,385][02159] Num frames 5700...
|
663 |
+
[2024-07-04 18:24:02,515][02159] Num frames 5800...
|
664 |
+
[2024-07-04 18:24:02,641][02159] Num frames 5900...
|
665 |
+
[2024-07-04 18:24:02,768][02159] Num frames 6000...
|
666 |
+
[2024-07-04 18:24:02,898][02159] Num frames 6100...
|
667 |
+
[2024-07-04 18:24:03,030][02159] Num frames 6200...
|
668 |
+
[2024-07-04 18:24:03,158][02159] Num frames 6300...
|
669 |
+
[2024-07-04 18:24:03,287][02159] Num frames 6400...
|
670 |
+
[2024-07-04 18:24:03,413][02159] Num frames 6500...
|
671 |
+
[2024-07-04 18:24:03,544][02159] Num frames 6600...
|
672 |
+
[2024-07-04 18:24:03,672][02159] Num frames 6700...
|
673 |
+
[2024-07-04 18:24:03,799][02159] Num frames 6800...
|
674 |
+
[2024-07-04 18:24:03,927][02159] Num frames 6900...
|
675 |
+
[2024-07-04 18:24:04,056][02159] Num frames 7000...
|
676 |
+
[2024-07-04 18:24:04,183][02159] Num frames 7100...
|
677 |
+
[2024-07-04 18:24:04,316][02159] Num frames 7200...
|
678 |
+
[2024-07-04 18:24:04,448][02159] Num frames 7300...
|
679 |
+
[2024-07-04 18:24:04,587][02159] Avg episode rewards: #0: 16.463, true rewards: #0: 7.363
|
680 |
+
[2024-07-04 18:24:04,588][02159] Avg episode reward: 16.463, avg true_objective: 7.363
|
681 |
+
[2024-07-04 18:24:22,135][02159] Replay video saved to /content/train_dir/default_experiment/replay.mp4!
|