Spaces:
Sleeping
Sleeping
File size: 973 Bytes
73ed9bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import time
from transformers import pipeline, Conversation, GPT2LMHeadModel, AutoTokenizer
import gradio as gr
import torch
model_name = "ingen51/DialoGPT-medium-GPT4"
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left')
device = torch.device("cuda")
model.to(device)
chatbot = pipeline(task="conversational", model = model, tokenizer = tokenizer)
past_user_inputs = []
generated_responses = []
def chocolate_ai(message, history):
conversation = Conversation(message, past_user_inputs = past_user_inputs, generated_responses = generated_responses)
conversation = chatbot(conversation)
past_user_inputs.append(message)
generated_responses.append(conversation.generated_responses[-1])
return conversation.generated_responses[-1]
gradio_interface = gr.ChatInterface(chocolate_ai, title="Chocolate AI", description="Type to start a conversation.")
gradio_interface.launch() |