Commit
97cdc39
1 Parent(s): 7e4f2e7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -34,21 +34,21 @@ Here's an example of how to use the RDT-1B model for inference on a Mobile-ALOHA
34
  ```python
35
  # Clone the repository and install dependencies
36
  from scripts.agilex_model import create_model
37
- CAMERA_NAMES = ['cam_high', 'cam_right_wrist', 'cam_left_wrist'] # Names of cameras used for visual input
 
38
  config = {
39
  'episode_len': 1000, # Max length of one episode
40
  'state_dim': 14, # Dimension of the robot's state
41
  'chunk_size': 64, # Number of actions to predict in one step
42
  'camera_names': CAMERA_NAMES,
43
  }
44
- control_frequency=25
45
  pretrained_vision_encoder_name_or_path = "google/siglip-so400m-patch14-384"
46
  # Create the model with specified configuration
47
  model = create_model(
48
  args=config,
49
  dtype=torch.bfloat16,
50
  pretrained_vision_encoder_name_or_path=pretrained_vision_encoder_name_or_path,
51
- control_frequency=control_frequency,
52
  )
53
  # Start inference process
54
  # Load pre-computed language embeddings
 
34
  ```python
35
  # Clone the repository and install dependencies
36
  from scripts.agilex_model import create_model
37
+ # Names of cameras used for visual input
38
+ CAMERA_NAMES = ['cam_high', 'cam_right_wrist', 'cam_left_wrist']
39
  config = {
40
  'episode_len': 1000, # Max length of one episode
41
  'state_dim': 14, # Dimension of the robot's state
42
  'chunk_size': 64, # Number of actions to predict in one step
43
  'camera_names': CAMERA_NAMES,
44
  }
 
45
  pretrained_vision_encoder_name_or_path = "google/siglip-so400m-patch14-384"
46
  # Create the model with specified configuration
47
  model = create_model(
48
  args=config,
49
  dtype=torch.bfloat16,
50
  pretrained_vision_encoder_name_or_path=pretrained_vision_encoder_name_or_path,
51
+ control_frequency=25,
52
  )
53
  # Start inference process
54
  # Load pre-computed language embeddings