Spaces:
Runtime error
Runtime error
DongfuJiang
commited on
Commit
•
8b0a71a
1
Parent(s):
7a6af6e
update
Browse files- app_high_res.py +2 -2
app_high_res.py
CHANGED
@@ -13,7 +13,7 @@ from models.conversation import conv_templates
|
|
13 |
from typing import List
|
14 |
processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096")
|
15 |
model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096", torch_dtype=torch.float16)
|
16 |
-
model = model.to("cuda")
|
17 |
MAX_NUM_FRAMES = 24
|
18 |
conv_template = conv_templates["idefics_2"]
|
19 |
|
@@ -62,7 +62,7 @@ all the frames of video are as follows:
|
|
62 |
@spaces.GPU
|
63 |
def generate(text:str, images:List[Image.Image], history: List[dict], **kwargs):
|
64 |
global processor, model
|
65 |
-
|
66 |
if not images:
|
67 |
images = None
|
68 |
|
|
|
13 |
from typing import List
|
14 |
processor = AutoProcessor.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096")
|
15 |
model = Idefics2ForConditionalGeneration.from_pretrained("Mantis-VL/mantis-8b-idefics2-video-eval-high-res-35k-mantis-2epoch_4096", torch_dtype=torch.float16)
|
16 |
+
# model = model.to("cuda")
|
17 |
MAX_NUM_FRAMES = 24
|
18 |
conv_template = conv_templates["idefics_2"]
|
19 |
|
|
|
62 |
@spaces.GPU
|
63 |
def generate(text:str, images:List[Image.Image], history: List[dict], **kwargs):
|
64 |
global processor, model
|
65 |
+
model = model.to("cuda")
|
66 |
if not images:
|
67 |
images = None
|
68 |
|