Robotics
Transformers
Safetensors
Inference Endpoints
vqbet_pusht / config.yaml
JayLee131's picture
Upload 2 files
9c0ba5d verified
raw
history blame contribute delete
No virus
2.71 kB
resume: false
device: cuda
use_amp: false
seed: 100000
dataset_repo_id: lerobot/pusht
training:
offline_steps: 800000
online_steps: 0
online_steps_between_rollouts: 1
online_sampling_ratio: 0.5
online_env_seed: ???
eval_freq: 20000
save_freq: 20000
log_freq: 250
save_checkpoint: true
num_workers: 4
batch_size: 64
grad_clip_norm: 10
lr: 0.0001
lr_scheduler: cosine
lr_warmup_steps: 500
adam_betas:
- 0.95
- 0.999
adam_eps: 1.0e-08
adam_weight_decay: 1.0e-06
vqvae_lr: 0.001
n_vqvae_training_steps: 20000
bet_weight_decay: 0.0002
bet_learning_rate: 5.5e-05
bet_betas:
- 0.9
- 0.999
delta_timestamps:
observation.image:
- -0.4
- -0.3
- -0.2
- -0.1
- 0.0
observation.state:
- -0.4
- -0.3
- -0.2
- -0.1
- 0.0
action:
- -0.4
- -0.3
- -0.2
- -0.1
- 0.0
- 0.1
- 0.2
- 0.3
- 0.4
- 0.5
- 0.6
- 0.7
- 0.8
- 0.9
- 1.0
eval:
n_episodes: 500
batch_size: 50
use_async_envs: false
wandb:
enable: true
disable_artifact: false
project: lerobot
notes: ''
fps: 10
env:
name: pusht
task: PushT-v0
image_size: 96
state_dim: 2
action_dim: 2
fps: ${fps}
episode_length: 300
gym:
obs_type: pixels_agent_pos
render_mode: rgb_array
visualization_width: 384
visualization_height: 384
override_dataset_stats:
observation.image:
mean:
- - - 0.5
- - - 0.5
- - - 0.5
std:
- - - 0.5
- - - 0.5
- - - 0.5
observation.state:
min:
- 13.456424
- 32.938293
max:
- 496.14618
- 510.9579
action:
min:
- 12.0
- 25.0
max:
- 511.0
- 511.0
policy:
name: vqbet
n_obs_steps: 5
n_action_pred_token: 7
action_chunk_size: 5
input_shapes:
observation.image:
- 3
- 96
- 96
observation.state:
- ${env.state_dim}
output_shapes:
action:
- ${env.action_dim}
input_normalization_modes:
observation.image: mean_std
observation.state: min_max
output_normalization_modes:
action: min_max
vision_backbone: resnet18
crop_shape:
- 84
- 84
crop_is_random: true
pretrained_backbone_weights: null
use_group_norm: true
spatial_softmax_num_keypoints: 32
discretize_step: ${training.n_vqvae_training_steps}
vqvae_n_embed: 16
vqvae_embedding_dim: 256
vqvae_enc_hidden_dim: 128
gpt_block_size: 500
gpt_input_dim: 512
gpt_output_dim: 512
gpt_n_layer: 8
gpt_n_head: 8
gpt_hidden_dim: 512
dropout: 0.1
mlp_hidden_dim: 1024
offset_loss_weight: 10000.0
primary_code_loss_weight: 5.0
secondary_code_loss_weight: 0.5
bet_softmax_temperature: 0.01
sequentially_select: false