AdiaLlama3.1 / app.py
Hawoly18's picture
Update app.py
493c487 verified
raw
history blame
1.62 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from typing import List, Tuple
import torch
model_name = "Hawoly18/Adia_Llama3.1"
# Vérifier si un GPU est disponible
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def respond(
message: str,
history: List[Tuple[str, str]],
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
) -> str:
prompt = system_message
for user_msg, assistant_msg in history:
prompt += f"\nUser: {user_msg}\nAssistant: {assistant_msg}"
prompt += f"\nUser: {message}\nAssistant:"
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
**inputs,
max_length=max_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True,
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True).split("Assistant:")[-1].strip()
return response
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), # Fixed syntax error
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
title="Chatbot Interface"
)
if __name__ == "__main__":
demo.launch()