wjbmattingly
commited on
Commit
•
96f0fb9
1
Parent(s):
b69b00e
Update README.md
Browse files
README.md
CHANGED
@@ -10,7 +10,7 @@ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoPro
|
|
10 |
from qwen_vl_utils import process_vision_info
|
11 |
|
12 |
|
13 |
-
device = "cuda" if torch.cuda.is_available() else "
|
14 |
|
15 |
model_dir = "medieval-data/qwen2-vl-2b-catmus"
|
16 |
|
@@ -47,7 +47,7 @@ inputs = processor(
|
|
47 |
padding=True,
|
48 |
return_tensors="pt",
|
49 |
)
|
50 |
-
inputs = inputs.to(
|
51 |
|
52 |
# Inference: Generation of the output
|
53 |
generated_ids = model.generate(**inputs, max_new_tokens=4000)
|
|
|
10 |
from qwen_vl_utils import process_vision_info
|
11 |
|
12 |
|
13 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
|
15 |
model_dir = "medieval-data/qwen2-vl-2b-catmus"
|
16 |
|
|
|
47 |
padding=True,
|
48 |
return_tensors="pt",
|
49 |
)
|
50 |
+
inputs = inputs.to(device)
|
51 |
|
52 |
# Inference: Generation of the output
|
53 |
generated_ids = model.generate(**inputs, max_new_tokens=4000)
|