geored commited on
Commit
5c1306e
1 Parent(s): aba9e8d

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +19 -4
app.py CHANGED
@@ -1,15 +1,30 @@
1
  import gradio as gr
2
  from transformers import pipeline
 
 
 
 
 
 
 
3
 
4
  # Load the llama2 LLM model
5
  # model = pipeline("text-generation", model="llamalanguage/llama2", tokenizer="llamalanguage/llama2")
6
- model = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf", tokenizer="meta-llama/Llama-2-7b-chat-hf")
7
 
8
  # Define the chat function that uses the LLM model
 
 
 
 
 
 
9
  def chat_interface(input_text):
10
- response = model(input_text, max_length=100, return_full_text=True)[0]["generated_text"]
11
- response_words = response.split()
12
- return response_words
 
 
13
 
14
  # Create the Gradio interface
15
  iface = gr.Interface(
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # Load the Mistral-7B-v0.1 model and tokenizer
6
+ model_name = "mistralai/Mistral-7B-v0.1"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
+
10
 
11
  # Load the llama2 LLM model
12
  # model = pipeline("text-generation", model="llamalanguage/llama2", tokenizer="llamalanguage/llama2")
13
+ # model = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1", tokenizer="meta-llama/Llama-2-7b-chat-hf")
14
 
15
  # Define the chat function that uses the LLM model
16
+ # def chat_interface(input_text):
17
+ # response = model(input_text, max_length=100, return_full_text=True)[0]["generated_text"]
18
+ # response_words = response.split()
19
+ # return response_words
20
+
21
+ # Define the chat function that uses the Mistral-7B-v0.1 model
22
  def chat_interface(input_text):
23
+ inputs = tokenizer.encode(input_text, return_tensors="pt")
24
+ outputs = model.generate(inputs, max_length=100)
25
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
+ return response
27
+
28
 
29
  # Create the Gradio interface
30
  iface = gr.Interface(