import streamlit as st from transformers import GPT2LMHeadModel, GPT2Tokenizer import torch def app(): # Инкапсулирующая функция st.title("GPT-2 Generator") DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model_path = "zhvanetsky_model" tokenizer = GPT2Tokenizer.from_pretrained(model_path) model = GPT2LMHeadModel.from_pretrained(model_path).to(DEVICE) def generate_text(input_text, num_beams, temperature, max_length, top_p): model.eval() input_ids = tokenizer.encode(input_text, return_tensors="pt").to(DEVICE) with torch.no_grad(): out = model.generate(input_ids, do_sample=True, num_beams=num_beams, temperature=temperature, top_p=top_p, top_k=500, max_length=max_length, no_repeat_ngram_size=3, num_return_sequences=3, ) return tokenizer.decode(out[0], skip_special_tokens=True) user_input = st.text_area("Input Text", "Ладно Павел, спасибо за поддержку!") # Add sliders or input boxes for model parameters num_beams = st.slider("Number of Beams", min_value=1, max_value=20, value=10) temperature = st.slider("Temperature", min_value=0.1, max_value=3.0, value=1.0, step=0.1) max_length = st.number_input("Max Length", min_value=10, max_value=300, value=100) top_p = st.slider("Top P", min_value=0.1, max_value=1.0, value=0.85, step=0.05) if st.button("Generate"): generated_output = generate_text(user_input, num_beams, temperature, max_length, top_p) st.text_area("Generated Text", generated_output)