Commit
5aa1106
1 Parent(s): f4fb9ee

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -36,23 +36,23 @@ Here's an example of how to use the RDT-1B model for inference on a Mobile-ALOHA
36
  from scripts.agilex_model import create_model
37
  CAMERA_NAMES = ['cam_high', 'cam_right_wrist', 'cam_left_wrist'] # Names of cameras used for visual input
38
  config = {
39
- 'episode_len': 1000, # Length of one episode
40
  'state_dim': 14, # Dimension of the robot's state
41
  'chunk_size': 64, # Number of actions to predict in one step
42
  'camera_names': CAMERA_NAMES,
43
  }
44
- ctrl_freq=25 # Set the control frequency (Hz)
45
- pretrained_vision_encoder_name_or_path = "google/siglip-so400m-patch14-384" # The pre-trained vision encoder model
46
  # Create the model with specified configuration
47
  model = create_model(
48
  args=config,
49
- dtype=torch.bfloat16, # Use bfloat16 for improved performance
50
  pretrained_vision_encoder_name_or_path=pretrained_vision_encoder_name_or_path,
51
- control_frequency=ctrl_freq,
52
  )
53
  # Start inference process
54
  lang_embeddings_path = 'your/language/embedding/path'
55
- text_embedding = torch.load(lang_embeddings_path)['embeddings'] # Load pre-computed language embeddings
56
  images: List(PIL.Image) = ... # The images from last 2 frame
57
  proprio = ... # The current robot state
58
  # Perform inference to predict the next chunk_size actions
 
36
  from scripts.agilex_model import create_model
37
  CAMERA_NAMES = ['cam_high', 'cam_right_wrist', 'cam_left_wrist'] # Names of cameras used for visual input
38
  config = {
39
+ 'episode_len': 1000, # Max length of one episode
40
  'state_dim': 14, # Dimension of the robot's state
41
  'chunk_size': 64, # Number of actions to predict in one step
42
  'camera_names': CAMERA_NAMES,
43
  }
44
+ control_frequency=25
45
+ pretrained_vision_encoder_name_or_path = "google/siglip-so400m-patch14-384"
46
  # Create the model with specified configuration
47
  model = create_model(
48
  args=config,
49
+ dtype=torch.bfloat16,
50
  pretrained_vision_encoder_name_or_path=pretrained_vision_encoder_name_or_path,
51
+ control_frequency=control_frequency,
52
  )
53
  # Start inference process
54
  lang_embeddings_path = 'your/language/embedding/path'
55
+ text_embedding = torch.load(lang_embeddings_path)['embeddings'] # Pre-computed language embeddings
56
  images: List(PIL.Image) = ... # The images from last 2 frame
57
  proprio = ... # The current robot state
58
  # Perform inference to predict the next chunk_size actions