Spaces:
Sleeping
Sleeping
from transformers import GPT2LMHeadModel, GPT2Tokenizer | |
import torch | |
import gradio as gr | |
# Modell und Tokenizer laden | |
model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1") | |
tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1") | |
# Eine Funktion, um Fragen an GPT-2 zu stellen | |
def ask_gpt2(question, history): | |
input_ids = tokenizer.encode(history + question, return_tensors="pt") | |
attention_mask = torch.ones(input_ids.shape, dtype=torch.bool) | |
# Antwort generieren | |
output = model.generate(input_ids, attention_mask=attention_mask) | |
reply = tokenizer.decode(output[0], skip_special_tokens=True) | |
new_history = history + "Nutzer: " + question + "\nLöwolf GPT: " + reply + "\n" | |
return new_history | |
# Erstellen des Gradio-Interfaces | |
interface = gr.Interface( | |
fn=ask_gpt2, | |
inputs=[gr.inputs.Textbox(lines=2, placeholder="Stelle deine Frage hier..."), gr.inputs.Textbox(lines=10, placeholder="Chat-Verlauf...")], | |
outputs=gr.outputs.Textbox(label="Antwort"), | |
layout="vertical" | |
) | |
# Starten der Gradio-App | |
interface.launch() | |