sartajbhuvaji commited on
Commit
998e5c3
1 Parent(s): c29e91a

Initial commit

Browse files
README.md CHANGED
@@ -36,21 +36,26 @@ RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/>
36
  SB3: https://github.com/DLR-RM/stable-baselines3<br/>
37
  SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
38
 
 
 
 
 
 
39
  ```
40
  # Download model and save it into the logs/ folder
41
  python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sartajbhuvaji -f logs/
42
- python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
43
  ```
44
 
45
  If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do:
46
  ```
47
  python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sartajbhuvaji -f logs/
48
- rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
49
  ```
50
 
51
  ## Training (with the RL Zoo)
52
  ```
53
- python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
54
  # Upload the model and generate video (when possible)
55
  python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga sartajbhuvaji
56
  ```
@@ -58,16 +63,16 @@ python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f lo
58
  ## Hyperparameters
59
  ```python
60
  OrderedDict([('batch_size', 32),
61
- ('buffer_size', 100000),
62
  ('env_wrapper',
63
  ['stable_baselines3.common.atari_wrappers.AtariWrapper']),
64
  ('exploration_final_eps', 0.01),
65
  ('exploration_fraction', 0.1),
66
  ('frame_stack', 4),
67
  ('gradient_steps', 1),
68
- ('learning_rate', 0.0001),
69
  ('learning_starts', 100000),
70
- ('n_timesteps', 100000.0),
71
  ('optimize_memory_usage', False),
72
  ('policy', 'CnnPolicy'),
73
  ('target_update_interval', 1000),
 
36
  SB3: https://github.com/DLR-RM/stable-baselines3<br/>
37
  SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
38
 
39
+ Install the RL Zoo (with SB3 and SB3-Contrib):
40
+ ```bash
41
+ pip install rl_zoo3
42
+ ```
43
+
44
  ```
45
  # Download model and save it into the logs/ folder
46
  python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sartajbhuvaji -f logs/
47
+ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
48
  ```
49
 
50
  If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do:
51
  ```
52
  python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sartajbhuvaji -f logs/
53
+ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
54
  ```
55
 
56
  ## Training (with the RL Zoo)
57
  ```
58
+ python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
59
  # Upload the model and generate video (when possible)
60
  python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga sartajbhuvaji
61
  ```
 
63
  ## Hyperparameters
64
  ```python
65
  OrderedDict([('batch_size', 32),
66
+ ('buffer_size', 10000),
67
  ('env_wrapper',
68
  ['stable_baselines3.common.atari_wrappers.AtariWrapper']),
69
  ('exploration_final_eps', 0.01),
70
  ('exploration_fraction', 0.1),
71
  ('frame_stack', 4),
72
  ('gradient_steps', 1),
73
+ ('learning_rate', 0.001),
74
  ('learning_starts', 100000),
75
+ ('n_timesteps', 1000000.0),
76
  ('optimize_memory_usage', False),
77
  ('policy', 'CnnPolicy'),
78
  ('target_update_interval', 1000),
args.yml CHANGED
@@ -54,7 +54,7 @@
54
  - - save_replay_buffer
55
  - false
56
  - - seed
57
- - 3547615024
58
  - - storage
59
  - null
60
  - - study_name
@@ -77,5 +77,5 @@
77
  - null
78
  - - wandb_project_name
79
  - sb3
80
- - - yaml_file
81
- - null
 
54
  - - save_replay_buffer
55
  - false
56
  - - seed
57
+ - 1011172713
58
  - - storage
59
  - null
60
  - - study_name
 
77
  - null
78
  - - wandb_project_name
79
  - sb3
80
+ - - wandb_tags
81
+ - []
config.yml CHANGED
@@ -2,7 +2,7 @@
2
  - - - batch_size
3
  - 32
4
  - - buffer_size
5
- - 100000
6
  - - env_wrapper
7
  - - stable_baselines3.common.atari_wrappers.AtariWrapper
8
  - - exploration_final_eps
@@ -14,11 +14,11 @@
14
  - - gradient_steps
15
  - 1
16
  - - learning_rate
17
- - 0.0001
18
  - - learning_starts
19
  - 100000
20
  - - n_timesteps
21
- - 100000.0
22
  - - optimize_memory_usage
23
  - false
24
  - - policy
 
2
  - - - batch_size
3
  - 32
4
  - - buffer_size
5
+ - 10000
6
  - - env_wrapper
7
  - - stable_baselines3.common.atari_wrappers.AtariWrapper
8
  - - exploration_final_eps
 
14
  - - gradient_steps
15
  - 1
16
  - - learning_rate
17
+ - 0.001
18
  - - learning_starts
19
  - 100000
20
  - - n_timesteps
21
+ - 1000000.0
22
  - - optimize_memory_usage
23
  - false
24
  - - policy
dqn-SpaceInvadersNoFrameskip-v4.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92f07cba323794a6d01be749c5c6ab6be9ceda189d0f1a06ecb6d36ce911411b
3
- size 13719733
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cccc7f6dedddce34f8c4c8c42ca46a90358c8f7be81b4b726768377282a20eba
3
+ size 27224843
dqn-SpaceInvadersNoFrameskip-v4/_stable_baselines3_version CHANGED
@@ -1 +1 @@
1
- 1.7.0a10
 
1
+ 1.8.0
dqn-SpaceInvadersNoFrameskip-v4/data CHANGED
The diff for this file is too large to render. See raw diff
 
dqn-SpaceInvadersNoFrameskip-v4/policy.optimizer.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1e067afe9912f3dd1b7925918b8cbe439229f6008e572c9c7e431ae731419f1
3
- size 687
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae851940d8f915dde255898277b6c7ece90f03369b5e6d27c6767e29b861c949
3
+ size 13505739
dqn-SpaceInvadersNoFrameskip-v4/policy.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7851f940c80b90370bca8b4ec3b2f1218b9e63793e0e506197d288a36f9d5c7d
3
  size 13504937
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3550b8a7c5fe008b85e823679c0293256ccd83be4803c8706116a81353471a1
3
  size 13504937
dqn-SpaceInvadersNoFrameskip-v4/system_info.txt CHANGED
@@ -1,7 +1,7 @@
1
- OS: Linux-5.10.133+-x86_64-with-glibc2.27 #1 SMP Fri Aug 26 08:44:51 UTC 2022
2
- Python: 3.8.16
3
- Stable-Baselines3: 1.7.0a10
4
- PyTorch: 1.13.0+cu116
5
- GPU Enabled: True
6
- Numpy: 1.21.6
7
- Gym: 0.21.0
 
1
+ - OS: Linux-5.10.147+-x86_64-with-glibc2.31 # 1 SMP Sat Dec 10 16:00:40 UTC 2022
2
+ - Python: 3.9.16
3
+ - Stable-Baselines3: 1.8.0
4
+ - PyTorch: 2.0.0+cu118
5
+ - GPU Enabled: True
6
+ - Numpy: 1.22.4
7
+ - Gym: 0.21.0
replay.mp4 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4f1d23d7804882340d6f0dd57f9d6585261a116cde3d1cac98de13ebf75499a
3
- size 262996
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caa6a1f28e357785d754b2769d1539f69db8d8cc9b953475f14f2f15f30761f4
3
+ size 263235
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 275.5, "std_reward": 83.07978093375065, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2022-12-26T19:20:32.967136"}
 
1
+ {"mean_reward": 275.5, "std_reward": 83.07978093375065, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2023-04-08T17:40:48.028794"}
train_eval_metrics.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:897eba56504209227b810388a9793543c824bf8cd6c40f436fae821ac603519f
3
- size 5788
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7453e356c585ab6a1f9897e30aed0c47b7801f08aea06390451c3c6413fae78
3
+ size 40975