Update README.md
Browse files
README.md
CHANGED
@@ -128,7 +128,6 @@ doc_image_inputs, doc_video_inputs = process_vision_info(doc_messages)
|
|
128 |
doc_inputs = processor(text=doc_texts, images=doc_image_inputs, videos=doc_video_inputs, padding='longest', return_tensors='pt').to('cuda:0')
|
129 |
cache_position = torch.arange(0, len(doc_texts))
|
130 |
doc_inputs = model.prepare_inputs_for_generation(**doc_inputs, cache_position=cache_position, use_cache=False)
|
131 |
-
output = model(**doc_inputs, return_dict=True, output_hidden_states=True)
|
132 |
with torch.no_grad():
|
133 |
output = model(**doc_inputs, return_dict=True, output_hidden_states=True)
|
134 |
doc_embeddings = get_embedding(output.hidden_states[-1], 1536) # adjust dimensionality for efficiency trade-off e.g. 512
|
@@ -175,7 +174,6 @@ doc_image_inputs, doc_video_inputs = process_vision_info(doc_messages)
|
|
175 |
doc_inputs = processor(text=doc_texts, images=doc_image_inputs, videos=doc_video_inputs, padding='longest', return_tensors='pt').to('cuda:0')
|
176 |
cache_position = torch.arange(0, len(doc_texts))
|
177 |
doc_inputs = model.prepare_inputs_for_generation(**doc_inputs, cache_position=cache_position, use_cache=False)
|
178 |
-
output = model(**doc_inputs, return_dict=True, output_hidden_states=True)
|
179 |
with torch.no_grad():
|
180 |
output = model(**doc_inputs, return_dict=True, output_hidden_states=True)
|
181 |
doc_embeddings = get_embedding(output.hidden_states[-1], 1536) # adjust dimensionality for efficiency trade-off e.g. 512
|
|
|
128 |
doc_inputs = processor(text=doc_texts, images=doc_image_inputs, videos=doc_video_inputs, padding='longest', return_tensors='pt').to('cuda:0')
|
129 |
cache_position = torch.arange(0, len(doc_texts))
|
130 |
doc_inputs = model.prepare_inputs_for_generation(**doc_inputs, cache_position=cache_position, use_cache=False)
|
|
|
131 |
with torch.no_grad():
|
132 |
output = model(**doc_inputs, return_dict=True, output_hidden_states=True)
|
133 |
doc_embeddings = get_embedding(output.hidden_states[-1], 1536) # adjust dimensionality for efficiency trade-off e.g. 512
|
|
|
174 |
doc_inputs = processor(text=doc_texts, images=doc_image_inputs, videos=doc_video_inputs, padding='longest', return_tensors='pt').to('cuda:0')
|
175 |
cache_position = torch.arange(0, len(doc_texts))
|
176 |
doc_inputs = model.prepare_inputs_for_generation(**doc_inputs, cache_position=cache_position, use_cache=False)
|
|
|
177 |
with torch.no_grad():
|
178 |
output = model(**doc_inputs, return_dict=True, output_hidden_states=True)
|
179 |
doc_embeddings = get_embedding(output.hidden_states[-1], 1536) # adjust dimensionality for efficiency trade-off e.g. 512
|