|
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
import gradio as gr |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("yasunamiura/mr-ja") |
|
model = AutoModelForCausalLM.from_pretrained("yasunamiura/mr-ja") |
|
|
|
|
|
def generate(prompt): |
|
|
|
inputs = tokenizer.encode(prompt, return_tensors='pt') |
|
|
|
outputs = model.generate(inputs, max_length=300, temperature=1.0, top_k=50, top_p=0.95, do_sample=True) |
|
|
|
predicted_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return predicted_text |
|
|
|
|
|
iface = gr.Interface( |
|
|
|
generate, |
|
|
|
inputs = gr.Textbox(label = "Input a question", default = "質問してください"), |
|
|
|
outputs="text", |
|
|
|
title = "Mr Bot🤖") |
|
|
|
|
|
iface.launch() |
|
|