paloma99 commited on
Commit
87bd002
1 Parent(s): 7a3d5aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -40
app.py CHANGED
@@ -1,42 +1,47 @@
 
1
  import gradio as gr
2
- from transformers import pipeline
3
-
4
- # Load the hotdog-not-hotdog model
5
- hotdog_pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
6
-
7
- # Load the chatbot model (DialoGPT)
8
- chatbot_pipeline = pipeline(task="text-generation", model="microsoft/DialoGPT-medium")
9
-
10
- def predict(input_data, option, chat_input=""):
11
- if option == "Image Classification":
12
- # Predict hotdog or not
13
- hotdog_predictions = hotdog_pipeline(input_data)
14
- return input_data, {
15
- "Hotdog Classification": {p["label"]: p["score"] for p in hotdog_predictions},
16
- "Chatbot Response": "", # Placeholder for chatbot response when not selected
17
- }
18
- elif option == "Chatbot":
19
- # Generate chatbot response
20
- chatbot_response = chatbot_pipeline(chat_input, max_length=50)[0]['generated_text']
21
- return input_data, {
22
- "Hotdog Classification": {},
23
- "Chatbot Response": chatbot_response,
24
- }
25
-
26
- iface = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
27
  fn=predict,
28
- inputs=[
29
- gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
30
- gr.Dropdown(label="Choose Option", choices=["Image Classification", "Chatbot"]),
31
- gr.Textbox(label="Chatbot Input", placeholder="Type something to chat with the bot", type="text"),
32
- ],
33
- outputs=[
34
- gr.Image(label="Processed Image"),
35
- gr.Label(label="Hotdog Classification", num_top_classes=2),
36
- gr.Textbox(label="Chatbot Response", type="text"),
37
- ],
38
- title="Hot Dog? Or Chat with the Bot?",
39
- )
40
-
41
- if __name__ == "__main__":
42
- iface.launch()
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
+ import torch
4
+
5
+
6
+ title = "????AI ChatBot"
7
+ description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
8
+ examples = [["How are you?"]]
9
+
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
12
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
13
+
14
+
15
+ def predict(input, history=[]):
16
+ # tokenize the new input sentence
17
+ new_user_input_ids = tokenizer.encode(
18
+ input + tokenizer.eos_token, return_tensors="pt"
19
+ )
20
+
21
+ # append the new user input tokens to the chat history
22
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
23
+
24
+ # generate a response
25
+ history = model.generate(
26
+ bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
27
+ ).tolist()
28
+
29
+ # convert the tokens to text, and then split the responses into lines
30
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
31
+ # print('decoded_response-->>'+str(response))
32
+ response = [
33
+ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
34
+ ] # convert to tuples of list
35
+ # print('response-->>'+str(response))
36
+ return response, history
37
+
38
+
39
+ gr.Interface(
40
  fn=predict,
41
+ title=title,
42
+ description=description,
43
+ examples=examples,
44
+ inputs=["text", "state"],
45
+ outputs=["chatbot", "state"],
46
+ theme="finlaymacklon/boxy_violet",
47
+ ).launch()