|
import gradio as gr |
|
|
|
from transformers import set_seed |
|
from transformers import AutoTokenizer |
|
from transformers import AutoModelForSeq2SeqLM |
|
|
|
import torch |
|
import random |
|
import numpy as np |
|
|
|
seed = 42 |
|
random.seed(seed) |
|
np.random.seed(seed) |
|
torch.manual_seed(seed) |
|
set_seed(seed) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Oysiyl/elvish-translator-quenya-t5-base") |
|
model = AutoModelForSeq2SeqLM.from_pretrained("Oysiyl/elvish-translator-quenya-t5-base") |
|
|
|
prefix = "translate English to Elvish: " |
|
|
|
def predict(text): |
|
inputs = tokenizer(prefix + text, return_tensors="pt").input_ids |
|
outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95) |
|
result = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return result |
|
|
|
demo = gr.Interface(title="English to Elvish translation!", |
|
description="<p style='text-align: center'>Provide English text and let's model try to guess the text in Elvish!</p>", |
|
article = "<p style='text-align: center'>Text Translation English -> Elvish | Demo Model</p>", |
|
fn=predict, inputs="text", outputs="text") |
|
demo.launch() |
|
|