Spaces:
Running
on
Zero
Running
on
Zero
MaziyarPanahi
commited on
Commit
•
a901f92
1
Parent(s):
47bb660
Update app.py (#12)
Browse files- Update app.py (ad65a3e83985ec4d0904e3f04399ea1368e6e25f)
app.py
CHANGED
@@ -14,21 +14,21 @@ import requests
|
|
14 |
CSS ="""
|
15 |
.contain { display: flex; flex-direction: column; }
|
16 |
#component-0 { height: 500px; }
|
17 |
-
#chatbot { flex-grow: 1; }
|
18 |
"""
|
19 |
|
20 |
model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
21 |
|
22 |
-
processor = AutoProcessor.from_pretrained(model_id)
|
23 |
|
24 |
-
model = LlavaForConditionalGeneration.from_pretrained(
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
)
|
29 |
|
30 |
-
model.to("cuda:0")
|
31 |
-
model.generation_config.eos_token_id = 128009
|
32 |
|
33 |
@spaces.GPU
|
34 |
def bot_streaming(message, history):
|
|
|
14 |
CSS ="""
|
15 |
.contain { display: flex; flex-direction: column; }
|
16 |
#component-0 { height: 500px; }
|
17 |
+
#chatbot { flex-grow: 1; height: 500px; }
|
18 |
"""
|
19 |
|
20 |
model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
|
21 |
|
22 |
+
# processor = AutoProcessor.from_pretrained(model_id)
|
23 |
|
24 |
+
# model = LlavaForConditionalGeneration.from_pretrained(
|
25 |
+
# model_id,
|
26 |
+
# torch_dtype=torch.float16,
|
27 |
+
# low_cpu_mem_usage=True,
|
28 |
+
# )
|
29 |
|
30 |
+
# model.to("cuda:0")
|
31 |
+
# model.generation_config.eos_token_id = 128009
|
32 |
|
33 |
@spaces.GPU
|
34 |
def bot_streaming(message, history):
|