Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load the model and tokenizer | |
model_name = "ruggsea/gpt-ita-fdi_lega" | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
# Define the text completion function | |
def complete_tweet(initial_text, temperature=0.7, top_k=50, top_p=0.92, repetition_penalty=1.2): | |
# Tokenize the input text | |
input_ids = tokenizer.encode(initial_text, return_tensors="pt") | |
# Generate text using the model with custom parameters | |
output = model.generate( | |
input_ids, | |
max_length=140, | |
do_sample=True, | |
temperature=temperature, | |
top_k=top_k, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty | |
) | |
# Decode the generated output | |
completed_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
return completed_text | |
# Create the Gradio interface with a multiline textbox for input and output | |
tweet_input_output = gr.Textbox( | |
label="Scrivi l'inizio del tweet e premi 'Submit' per completare il tweet", | |
type="text" | |
) | |
interface = gr.Interface( | |
fn=complete_tweet, | |
inputs=tweet_input_output, | |
outputs=tweet_input_output, | |
live=False, | |
examples=[["I migranti"], ["Il ddl Zan"]], | |
title="Twitta come un parlamentare di FDI/Lega" | |
) | |
# Start the Gradio interface | |
interface.launch(share=True) | |