Spaces:
Sleeping
Sleeping
import time | |
from transformers import pipeline, Conversation, GPT2LMHeadModel, AutoTokenizer | |
import gradio as gr | |
import torch | |
model_name = "ingen51/DialoGPT-medium-GPT4" | |
model = GPT2LMHeadModel.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left') | |
device = torch.device("cuda") | |
model.to(device) | |
chatbot = pipeline(task="conversational", model = model, tokenizer = tokenizer) | |
past_user_inputs = [] | |
generated_responses = [] | |
def chocolate_ai(message, history): | |
conversation = Conversation(message, past_user_inputs = past_user_inputs, generated_responses = generated_responses) | |
conversation = chatbot(conversation) | |
past_user_inputs.append(message) | |
generated_responses.append(conversation.generated_responses[-1]) | |
return conversation.generated_responses[-1] | |
gradio_interface = gr.ChatInterface(chocolate_ai, title="Chocolate AI", description="Type to start a conversation.") | |
gradio_interface.launch() |