gokaygokay commited on
Commit
a1f105c
1 Parent(s): 2328997

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -264,7 +264,6 @@ class PromptGenerator:
264
  class HuggingFaceInferenceNode:
265
  def __init__(self):
266
  self.clients = {
267
- "Llama 3.1": InferenceClient("meta-llama/Meta-Llama-3.1-8B-Instruct", token=huggingface_token),
268
  "Mixtral": InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"),
269
  "Mistral": InferenceClient("mistralai/Mistral-7B-Instruct-v0.3"),
270
  "Llama 3": InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct"),
@@ -385,7 +384,7 @@ def create_interface():
385
  )
386
 
387
  with gr.Tab("HuggingFace Inference Text Generator"):
388
- model = gr.Dropdown(["Llama 3.1", "Mixtral", "Mistral", "Llama 3", "Mistral-Nemo"], label="Model", value="Llama 3.1")
389
  input_text = gr.Textbox(label="Input Text", lines=5)
390
  happy_talk = gr.Checkbox(label="Happy Talk", value=True)
391
  compress = gr.Checkbox(label="Compress", value=False)
 
264
  class HuggingFaceInferenceNode:
265
  def __init__(self):
266
  self.clients = {
 
267
  "Mixtral": InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"),
268
  "Mistral": InferenceClient("mistralai/Mistral-7B-Instruct-v0.3"),
269
  "Llama 3": InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct"),
 
384
  )
385
 
386
  with gr.Tab("HuggingFace Inference Text Generator"):
387
+ model = gr.Dropdown(["Mixtral", "Mistral", "Llama 3", "Mistral-Nemo"], label="Model", value="Mixtral")
388
  input_text = gr.Textbox(label="Input Text", lines=5)
389
  happy_talk = gr.Checkbox(label="Happy Talk", value=True)
390
  compress = gr.Checkbox(label="Compress", value=False)