File size: 1,060 Bytes
7730934
 
 
 
 
 
dbede30
7730934
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dbede30
7730934
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr
import torch.cuda
from transformers import AutoModelForCausalLM, AutoTokenizer

device = 'cuda' if torch.cuda.is_available() else 'cpu'

model = AutoModelForCausalLM.from_pretrained("saltacc/RandomPrompt-v1")

model.to(device)

tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
tokenizer.pad_token = tokenizer.eos_token


def detect(text_in, max_length):
    if not text_in:
        inputs = tokenizer.pad_token
    else:
        inputs = text_in
    text = tokenizer.batch_decode(model.generate(tokenizer.encode(inputs,
                                                                  return_tensors='pt').to(device),
                                                 do_sample=True,
                                                 temperature=0.9,
                                                 max_length=max_length))[0]
    text = text.replace(tokenizer.pad_token, '')
    return text


iface = gr.Interface(fn=detect, inputs=[gr.Textbox(), gr.Slider(100, 200, default=120)], outputs=gr.TextArea())
iface.launch()