import gradio as gr from transformers import pipeline # Load the hotdog-not-hotdog model hotdog_pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") # Load the chatbot model (DialoGPT) chatbot_pipeline = pipeline(task="text-generation", model="microsoft/DialoGPT-medium") def predict(input_data, option): if option == "Image Classification": # Predict hotdog or not hotdog_predictions = hotdog_pipeline(input_data) return input_data, { "Hotdog Classification": {p["label"]: p["score"] for p in hotdog_predictions}, } elif option == "Chatbot": # Generate chatbot response chatbot_response = chatbot_pipeline(input_data, max_length=50)[0]['generated_text'] return input_data, { "Chatbot Response": chatbot_response } iface = gr.Interface( fn=predict, inputs=[ gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"), gr.Dropdown(label="Choose Option", choices=["Image Classification", "Chatbot"]) ], outputs=[ gr.Image(label="Processed Image"), gr.Label(label="Hotdog Classification", num_top_classes=2), gr.Textbox(label="Chatbot Response", type="text"), ], title="Hot Dog? Or Chat with the Bot?", ) if __name__ == "__main__": iface.launch()