ernestum commited on
Commit
ac09740
1 Parent(s): 246f903

Initial commit

Browse files
README.md CHANGED
@@ -77,3 +77,8 @@ OrderedDict([('batch_size', 64),
77
  ('train_freq', 8),
78
  ('normalize', False)])
79
  ```
 
 
 
 
 
 
77
  ('train_freq', 8),
78
  ('normalize', False)])
79
  ```
80
+
81
+ # Environment Arguments
82
+ ```python
83
+ {'render_mode': 'rgb_array'}
84
+ ```
env_kwargs.yml CHANGED
@@ -1 +1 @@
1
- {}
 
1
+ render_mode: rgb_array
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f16fc62f4df2c3b4e46eb0d72ed453b9aa68885aec90a9e9110ad145c7de0b20
3
+ size 491501
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 367.4834745, "std_reward": 59.61112469905793, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-09-15T13:55:04.652816"}
 
1
+ {"mean_reward": 367.4834745, "std_reward": 59.61112469905793, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-09-18T09:55:03.230088"}
sac-seals-Humanoid-v1.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:286748292454082f9a382da8c39c89006dd306fa217ed0143ce87a094df634c5
3
- size 12379151
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da6eda93cf34bab5eb9c8a123755a9e75548af5da816e8371132811f986ca328
3
+ size 12379155
sac-seals-Humanoid-v1/_stable_baselines3_version CHANGED
@@ -1 +1 @@
1
- 2.1.0
 
1
+ 2.2.0a3
sac-seals-Humanoid-v1/data CHANGED
@@ -5,17 +5,17 @@
5
  "__module__": "stable_baselines3.sac.policies",
6
  "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}",
7
  "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
8
- "__init__": "<function SACPolicy.__init__ at 0x7f13d0bf48b0>",
9
- "_build": "<function SACPolicy._build at 0x7f13d0bf4940>",
10
- "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x7f13d0bf49d0>",
11
- "reset_noise": "<function SACPolicy.reset_noise at 0x7f13d0bf4a60>",
12
- "make_actor": "<function SACPolicy.make_actor at 0x7f13d0bf4af0>",
13
- "make_critic": "<function SACPolicy.make_critic at 0x7f13d0bf4b80>",
14
- "forward": "<function SACPolicy.forward at 0x7f13d0bf4c10>",
15
- "_predict": "<function SACPolicy._predict at 0x7f13d0bf4ca0>",
16
- "set_training_mode": "<function SACPolicy.set_training_mode at 0x7f13d0bf4d30>",
17
  "__abstractmethods__": "frozenset()",
18
- "_abc_impl": "<_abc_data object at 0x7f13d0bf5900>"
19
  },
20
  "verbose": 1,
21
  "policy_kwargs": {
@@ -103,13 +103,13 @@
103
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
104
  "__module__": "stable_baselines3.common.buffers",
105
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
106
- "__init__": "<function ReplayBuffer.__init__ at 0x7f13d0bbf820>",
107
- "add": "<function ReplayBuffer.add at 0x7f13d0bbf8b0>",
108
- "sample": "<function ReplayBuffer.sample at 0x7f13d0bbf940>",
109
- "_get_samples": "<function ReplayBuffer._get_samples at 0x7f13d0bbf9d0>",
110
- "_maybe_cast_dtype": "<staticmethod object at 0x7f13d0bc5610>",
111
  "__abstractmethods__": "frozenset()",
112
- "_abc_impl": "<_abc_data object at 0x7f13d0bc5630>"
113
  },
114
  "replay_buffer_kwargs": {},
115
  "train_freq": {
 
5
  "__module__": "stable_baselines3.sac.policies",
6
  "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}",
7
  "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
8
+ "__init__": "<function SACPolicy.__init__ at 0x7efec9bcc700>",
9
+ "_build": "<function SACPolicy._build at 0x7efec9bcc790>",
10
+ "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x7efec9bcc820>",
11
+ "reset_noise": "<function SACPolicy.reset_noise at 0x7efec9bcc8b0>",
12
+ "make_actor": "<function SACPolicy.make_actor at 0x7efec9bcc940>",
13
+ "make_critic": "<function SACPolicy.make_critic at 0x7efec9bcc9d0>",
14
+ "forward": "<function SACPolicy.forward at 0x7efec9bcca60>",
15
+ "_predict": "<function SACPolicy._predict at 0x7efec9bccaf0>",
16
+ "set_training_mode": "<function SACPolicy.set_training_mode at 0x7efec9bccb80>",
17
  "__abstractmethods__": "frozenset()",
18
+ "_abc_impl": "<_abc_data object at 0x7efec9bc2b40>"
19
  },
20
  "verbose": 1,
21
  "policy_kwargs": {
 
103
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
104
  "__module__": "stable_baselines3.common.buffers",
105
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
106
+ "__init__": "<function ReplayBuffer.__init__ at 0x7efec9c19700>",
107
+ "add": "<function ReplayBuffer.add at 0x7efec9c19790>",
108
+ "sample": "<function ReplayBuffer.sample at 0x7efec9c19820>",
109
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x7efec9c198b0>",
110
+ "_maybe_cast_dtype": "<staticmethod object at 0x7efec9c13850>",
111
  "__abstractmethods__": "frozenset()",
112
+ "_abc_impl": "<_abc_data object at 0x7efec9c13870>"
113
  },
114
  "replay_buffer_kwargs": {},
115
  "train_freq": {
sac-seals-Humanoid-v1/system_info.txt CHANGED
@@ -1,6 +1,6 @@
1
  - OS: Linux-5.4.0-156-generic-x86_64-with-glibc2.29 # 173-Ubuntu SMP Tue Jul 11 07:25:22 UTC 2023
2
  - Python: 3.8.10
3
- - Stable-Baselines3: 2.1.0
4
  - PyTorch: 2.0.1+cu117
5
  - GPU Enabled: False
6
  - Numpy: 1.24.4
 
1
  - OS: Linux-5.4.0-156-generic-x86_64-with-glibc2.29 # 173-Ubuntu SMP Tue Jul 11 07:25:22 UTC 2023
2
  - Python: 3.8.10
3
+ - Stable-Baselines3: 2.2.0a3
4
  - PyTorch: 2.0.1+cu117
5
  - GPU Enabled: False
6
  - Numpy: 1.24.4
train_eval_metrics.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b3bc20573122fa398a763659bfb3306c833ba2ef0553f2a24a212b615b801e3
3
  size 58659
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc83703bd5eb53aba76bd7a079c25215141c36de58f9b1697f1bbf9d1fd5d33
3
  size 58659