Aitana_6.3 / app.py
rsepulvedat's picture
initial commit
1f780ee
raw
history blame
591 Bytes
import gradio as gr
from transformers import pipeline
import torch
pipe = pipeline("text-generation", model="gplsi/Aitana-6.3B", torch_dtype=torch.bfloat16, device_map="auto")
def predict(input_text):
generation = pipe(input_text, max_new_tokens=50, repetition_penalty=1.2, top_k=50, top_p=0.95, do_sample=True,
temperature=0.5, early_stopping=True, num_beams=2)
return generation[0]
gradio_app = gr.Interface(
predict,
inputs='text',
outputs='text',
title="Aitana-6.3B Text Generation",
)
if __name__ == "__main__":
gradio_app.launch()