Spaces:
Runtime error
Runtime error
import gradio as gr | |
import pandas as pd | |
from transformers import pipeline | |
from load_models import models_and_tokenizers, models_checkpoints | |
import spaces | |
choice = {"ModelA": "", "ModelB": ""} | |
dff = pd.read_csv("models.csv") | |
dff.to_html("tab.html") | |
def refreshfn() -> gr.HTML: | |
df = pd.read_csv("models.csv") | |
df.to_html("tab.html") | |
f = open("tab.html") | |
content = f.read() | |
f.close() | |
t = gr.HTML(content) | |
return t | |
def rewrite_csv_ordered_by_winning_rate(csv_path): | |
# Read the input CSV | |
df = pd.read_csv(csv_path) | |
# Sort the DataFrame by WINNING_RATE in descending order | |
df_sorted = df.sort_values(by="WINNING_RATE", ascending=False) | |
# Save the sorted DataFrame to a new CSV file | |
df_sorted.to_csv(csv_path, index=False) | |
def run_inference(pipe, prompt): | |
response = pipe(prompt) | |
bot_message = response[0]["generated_text"] | |
return bot_message | |
def modelA_button(): | |
global choice | |
df = pd.read_csv("models.csv") | |
df.loc[df["MODEL"] == choice["ModelA"], "MATCHES_WON"] += 1 | |
df.loc[df["MODEL"] == choice["ModelA"], "WINNING_RATE"] = df.loc[df["MODEL"] == choice["ModelA"], "MATCHES_WON"]/df.loc[df["MODEL"] == choice["ModelA"], "MATCHES_PLAYED"] | |
df.to_csv("models.csv") | |
rewrite_csv_ordered_by_winning_rate("models.csv") | |
def modelB_button(): | |
global choice | |
df = pd.read_csv("models.csv") | |
df.loc[df["MODEL"] == choice["ModelB"], "MATCHES_WON"] += 1 | |
df.loc[df["MODEL"] == choice["ModelB"], "WINNING_RATE"] = df.loc[df["MODEL"] == choice["ModelB"], "MATCHES_WON"]/df.loc[df["MODEL"] == choice["ModelB"], "MATCHES_PLAYED"] | |
df.to_csv("models.csv") | |
rewrite_csv_ordered_by_winning_rate("models.csv") | |
def reply(modelA, modelB, prompt): | |
global choice | |
choice["ModelA"] = modelA | |
choice["ModelB"] = modelB | |
df = pd.read_csv("models.csv") | |
df.loc[df["MODEL"] == modelA, "MATCHES_PLAYED"] += 1 | |
df.loc[df["MODEL"] == modelB, "MATCHES_PLAYED"] += 1 | |
df.to_csv("models.csv", index=False) | |
pipeA = pipeline("text-generation", model=models_and_tokenizers[modelA][0], tokenizer=models_and_tokenizers[modelA][1], max_new_tokens=512, repetition_penalty=1.5, temperature=0.5, device_map="cuda:0") | |
responseA = run_inference(pipeA, prompt) | |
pipeB = pipeline("text-generation", model=models_and_tokenizers[modelB][0], tokenizer=models_and_tokenizers[modelB][1], max_new_tokens=512, repetition_penalty=1.5, temperature=0.5, device_map="cuda:1") | |
responseB = run_inference(pipeB, prompt) | |
return responseA, responseB | |
modelA_dropdown = gr.Dropdown(models_checkpoints, label="Model A", info="Choose the first model for the battle!") | |
modelB_dropdown = gr.Dropdown(models_checkpoints, label="Model B", info="Choose the second model for the battle!") | |
prompt_textbox = gr.Textbox(label="Prompt", value="Is pineapple pizza sacrilegious?") | |
with gr.Blocks() as demo1: | |
demo0 = gr.Interface(fn=reply, inputs=[modelA_dropdown, modelB_dropdown, prompt_textbox], outputs=[gr.Markdown(label="Model A response"), gr.Markdown(label="Model B response")]) | |
btnA = gr.Button("Vote for Model A!") | |
btnB = gr.Button("Vote for Model B!") | |
btnA.click(modelA_button, inputs=None, outputs=None) | |
btnB.click(modelB_button, inputs=None, outputs=None) | |
with gr.Blocks() as demo2: | |
f = open("tab.html") | |
content = f.read() | |
f.close() | |
t = gr.HTML(content) | |
btn = gr.Button("Refresh") | |
btn.click(fn=refreshfn, inputs=None, outputs=t) | |
demo = gr.TabbedInterface([demo1, demo2], ["Chat Arena", "Leaderboard"]) | |
if __name__ == "__main__": | |
demo.launch(server_name="0.0.0.0", server_port=7860) |