File size: 1,195 Bytes
4fdd156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import streamlit as st
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch

DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model_path = "/zhvanetsky_model_1"
tokenizer = GPT2Tokenizer.from_pretrained(model_path)
model = GPT2LMHeadModel.from_pretrained(model_path).to(DEVICE)

def generate_text(input_text):
    model.eval()
    input_ids = tokenizer.encode(input_text, return_tensors="pt").to(DEVICE)
    with torch.no_grad():
        out = model.generate(input_ids,
                            do_sample=True,
                            num_beams=10,
                            temperature=2.2,
                            top_p=0.85,
                            top_k=500,
                            max_length=100,
                            no_repeat_ngram_size=3,
                            num_return_sequences=3,
                            )
    return tokenizer.decode(out[0], skip_special_tokens=True)

st.title("GPT-2 Text Generator")
user_input = st.text_area("Input Text", "Введите ваш текст")
if st.button("Generate"):
    generated_output = generate_text(user_input)
    st.text_area("Generated Text", generated_output)