Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import GPT2LMHeadModel, GPT2Tokenizer | |
import torch | |
# Load the GPT-2 model and tokenizer | |
model_name = 'gpt2-large' | |
model = GPT2LMHeadModel.from_pretrained(model_name) | |
tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
st.title("AI Article Generator") | |
# Input for the article title | |
title = st.text_input("Enter the title of the article") | |
# Parameters for text generation | |
max_length = st.slider("Maximum length of the article", min_value=50, max_value=1000, value=500, step=50) | |
temperature = st.slider("Temperature (creativity level)", min_value=0.7, max_value=1.5, value=1.0, step=0.1) | |
# Generate the article | |
if st.button("Generate Article"): | |
if title: | |
with st.spinner("Generating article..."): | |
try: | |
input_ids = tokenizer.encode(title, return_tensors='pt') | |
output = model.generate( | |
input_ids, | |
max_length=max_length, | |
temperature=temperature, | |
num_return_sequences=1, | |
no_repeat_ngram_size=2, | |
early_stopping=True | |
) | |
article = tokenizer.decode(output[0], skip_special_tokens=True) | |
st.success("Article generated successfully!") | |
st.write(article) | |
except Exception as e: | |
st.error(f"An error occurred: {e}") | |
else: | |
st.warning("Please enter a title to generate an article") | |
st.markdown(""" | |
<style> | |
.reportview-container { | |
flex-direction: column; | |
align-items: center; | |
} | |
.css-1kyxreq { | |
width: 100%; | |
max-width: 700px; | |
margin: auto; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |