gokaygokay
commited on
Commit
•
0b744ad
1
Parent(s):
f89ced9
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,8 @@ import re
|
|
6 |
from datetime import datetime
|
7 |
from huggingface_hub import InferenceClient
|
8 |
|
|
|
|
|
9 |
# Load JSON files
|
10 |
def load_json_file(file_name):
|
11 |
file_path = os.path.join("data", file_name)
|
@@ -262,6 +264,7 @@ class PromptGenerator:
|
|
262 |
class HuggingFaceInferenceNode:
|
263 |
def __init__(self):
|
264 |
self.clients = {
|
|
|
265 |
"Mixtral": InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"),
|
266 |
"Mistral": InferenceClient("mistralai/Mistral-7B-Instruct-v0.3"),
|
267 |
"Llama 3": InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct"),
|
@@ -382,7 +385,7 @@ def create_interface():
|
|
382 |
)
|
383 |
|
384 |
with gr.Tab("HuggingFace Inference Text Generator"):
|
385 |
-
model = gr.Dropdown(["Mixtral", "Mistral", "Llama 3", "Mistral-Nemo"], label="Model", value="Llama 3")
|
386 |
input_text = gr.Textbox(label="Input Text", lines=5)
|
387 |
happy_talk = gr.Checkbox(label="Happy Talk", value=True)
|
388 |
compress = gr.Checkbox(label="Compress", value=False)
|
|
|
6 |
from datetime import datetime
|
7 |
from huggingface_hub import InferenceClient
|
8 |
|
9 |
+
|
10 |
+
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
11 |
# Load JSON files
|
12 |
def load_json_file(file_name):
|
13 |
file_path = os.path.join("data", file_name)
|
|
|
264 |
class HuggingFaceInferenceNode:
|
265 |
def __init__(self):
|
266 |
self.clients = {
|
267 |
+
"Llama 3.1": InferenceClient("meta-llama/Meta-Llama-3.1-8B-Instruct", token=huggingface_token)
|
268 |
"Mixtral": InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"),
|
269 |
"Mistral": InferenceClient("mistralai/Mistral-7B-Instruct-v0.3"),
|
270 |
"Llama 3": InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct"),
|
|
|
385 |
)
|
386 |
|
387 |
with gr.Tab("HuggingFace Inference Text Generator"):
|
388 |
+
model = gr.Dropdown(["Llama 3.1", "Mixtral", "Mistral", "Llama 3", "Mistral-Nemo"], label="Model", value="Llama 3.1")
|
389 |
input_text = gr.Textbox(label="Input Text", lines=5)
|
390 |
happy_talk = gr.Checkbox(label="Happy Talk", value=True)
|
391 |
compress = gr.Checkbox(label="Compress", value=False)
|