paloma99 commited on
Commit
611d4b2
1 Parent(s): 0f6559a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -33
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- import threading
3
  from transformers import pipeline
4
 
5
  # Load the hotdog-not-hotdog model
@@ -8,42 +7,33 @@ hotdog_pipeline = pipeline(task="image-classification", model="julien-c/hotdog-n
8
  # Load the chatbot model (DialoGPT)
9
  chatbot_pipeline = pipeline(task="text-generation", model="microsoft/DialoGPT-medium")
10
 
11
- def hotdog_predict(input_img):
12
- # Predict hotdog or not
13
- hotdog_predictions = hotdog_pipeline(input_img)
14
-
15
- return input_img, {
16
- "Hotdog Classification": {p["label"]: p["score"] for p in hotdog_predictions},
17
- }
18
-
19
- def chatbot_predict(chat_input):
20
- # Generate chatbot response
21
- chatbot_response = chatbot_pipeline(chat_input, max_length=50)[0]['generated_text']
22
-
23
- return chat_input, {
24
- "Chatbot Response": chatbot_response
25
- }
26
-
27
- hotdog_interface = gr.Interface(
28
- fn=hotdog_predict,
29
- inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
 
30
  outputs=[
31
  gr.Image(label="Processed Image"),
32
  gr.Label(label="Hotdog Classification", num_top_classes=2),
 
33
  ],
34
- title="Hot Dog? Or Not?",
35
- )
36
-
37
- chatbot_interface = gr.Interface(
38
- fn=chatbot_predict,
39
- inputs=gr.Textbox(label="Chatbot Input", placeholder="Type something to chat with the bot", type="text"),
40
- outputs=gr.Textbox(label="Chatbot Response", type="text"),
41
- title="Chat with the Bot",
42
  )
43
 
44
- def launch_interfaces():
45
- hotdog_interface.launch()
46
- chatbot_interface.launch()
47
-
48
  if __name__ == "__main__":
49
- threading.Thread(target=launch_interfaces).start()
 
1
  import gradio as gr
 
2
  from transformers import pipeline
3
 
4
  # Load the hotdog-not-hotdog model
 
7
  # Load the chatbot model (DialoGPT)
8
  chatbot_pipeline = pipeline(task="text-generation", model="microsoft/DialoGPT-medium")
9
 
10
+ def predict(input_data, option):
11
+ if option == "Image Classification":
12
+ # Predict hotdog or not
13
+ hotdog_predictions = hotdog_pipeline(input_data)
14
+ return input_data, {
15
+ "Hotdog Classification": {p["label"]: p["score"] for p in hotdog_predictions},
16
+ }
17
+ elif option == "Chatbot":
18
+ # Generate chatbot response
19
+ chatbot_response = chatbot_pipeline(input_data, max_length=50)[0]['generated_text']
20
+ return input_data, {
21
+ "Chatbot Response": chatbot_response
22
+ }
23
+
24
+ iface = gr.Interface(
25
+ fn=predict,
26
+ inputs=[
27
+ gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
28
+ gr.Dropdown(label="Choose Option", choices=["Image Classification", "Chatbot"])
29
+ ],
30
  outputs=[
31
  gr.Image(label="Processed Image"),
32
  gr.Label(label="Hotdog Classification", num_top_classes=2),
33
+ gr.Textbox(label="Chatbot Response", type="text"),
34
  ],
35
+ title="Hot Dog? Or Chat with the Bot?",
 
 
 
 
 
 
 
36
  )
37
 
 
 
 
 
38
  if __name__ == "__main__":
39
+ iface.launch()