File size: 1,370 Bytes
53d6474
 
 
d8b3ee4
 
53d6474
d8b3ee4
e120d49
53d6474
611d4b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e120d49
 
 
611d4b2
e120d49
611d4b2
53d6474
 
3f144ee
611d4b2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import gradio as gr
from transformers import pipeline

# Load the hotdog-not-hotdog model
hotdog_pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")

# Load the chatbot model (DialoGPT)
chatbot_pipeline = pipeline(task="text-generation", model="microsoft/DialoGPT-medium")

def predict(input_data, option):
    if option == "Image Classification":
        # Predict hotdog or not
        hotdog_predictions = hotdog_pipeline(input_data)
        return input_data, {
            "Hotdog Classification": {p["label"]: p["score"] for p in hotdog_predictions},
        }
    elif option == "Chatbot":
        # Generate chatbot response
        chatbot_response = chatbot_pipeline(input_data, max_length=50)[0]['generated_text']
        return input_data, {
            "Chatbot Response": chatbot_response
        }

iface = gr.Interface(
    fn=predict,
    inputs=[
        gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
        gr.Dropdown(label="Choose Option", choices=["Image Classification", "Chatbot"])
    ],
    outputs=[
        gr.Image(label="Processed Image"),
        gr.Label(label="Hotdog Classification", num_top_classes=2),
        gr.Textbox(label="Chatbot Response", type="text"),
    ],
    title="Hot Dog? Or Chat with the Bot?",
)

if __name__ == "__main__":
    iface.launch()