Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import theme | |
theme = theme.Theme() | |
# Cell 1: Image Classification Model | |
image_pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") | |
def predict_image(input_img): | |
predictions = image_pipeline(input_img) | |
return input_img, {p["label"]: p["score"] for p in predictions} | |
image_gradio_app = gr.Interface( | |
fn=predict_image, | |
inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"), | |
outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)], | |
title="Hot Dog? Or Not?", | |
theme=theme | |
) | |
# Cell 2: Chatbot Model | |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") | |
chatbot_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium") | |
def echo(message, history): | |
return message | |
chatbot_gradio_app = gr.ChatInterface( | |
fn=echo, | |
title="Greta", | |
theme=theme | |
) | |
# Combine both interfaces into a single app | |
gr.TabbedInterface( | |
[image_gradio_app, chatbot_gradio_app], | |
tab_names=["Greta Image","Greta Chat"], | |
theme=theme | |
).launch() |