Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -18,9 +18,9 @@ quantization_config = BitsAndBytesConfig(
|
|
18 |
)
|
19 |
|
20 |
model = AutoModelForCausalLM.from_pretrained(
|
21 |
-
"meta-llama/Meta-Llama-3-
|
22 |
)
|
23 |
-
tok = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-
|
24 |
|
25 |
if torch.cuda.is_available():
|
26 |
device = torch.device("cuda")
|
@@ -104,6 +104,6 @@ demo = gr.ChatInterface(
|
|
104 |
],
|
105 |
stop_btn="Stop Generation",
|
106 |
title="Chat With LLMs",
|
107 |
-
description="Now Running ![meta-llama/Meta-Llama-3-
|
108 |
)
|
109 |
demo.launch()
|
|
|
18 |
)
|
19 |
|
20 |
model = AutoModelForCausalLM.from_pretrained(
|
21 |
+
"meta-llama/Meta-Llama-3-8B-Instruct", quantization_config=quantization_config, token=token
|
22 |
)
|
23 |
+
tok = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", token=token)
|
24 |
|
25 |
if torch.cuda.is_available():
|
26 |
device = torch.device("cuda")
|
|
|
104 |
],
|
105 |
stop_btn="Stop Generation",
|
106 |
title="Chat With LLMs",
|
107 |
+
description="Now Running ![meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) in 4bit"
|
108 |
)
|
109 |
demo.launch()
|