margsli's picture
Update app.py
dd86254 verified
raw
history blame
21.5 kB
"""A gradio app that renders a static leaderboard. This is used for Hugging Face Space."""
import ast
import argparse
import glob
import pickle
import gradio as gr
import numpy as np
import pandas as pd
basic_component_values = [None] * 6
leader_component_values = [None]
def make_default_md(arena_df, elo_results):
total_votes = sum(arena_df["num_battles"]) // 2
total_models = len(arena_df)
leaderboard_md = f"""
# NeurIPS LLM Merging Competition Leaderboard
[Website](https://llm-merging.github.io/index) | [Starter Kit (Github)](https://github.com/llm-merging/LLM-Merging) | [Discord](https://discord.com/invite/dPBHEVnV) |
"""
return leaderboard_md
def make_arena_leaderboard_md(arena_df):
total_votes = sum(arena_df["num_battles"]) // 2
total_models = len(arena_df)
space = "   "
leaderboard_md = f"""
Total #models: **{total_models}**.{space} Total #votes: **{"{:,}".format(total_votes)}**.{space} Last updated: June 1, 2024.
"""
return leaderboard_md
def make_category_arena_leaderboard_md(arena_df, arena_subset_df, name="Overall"):
total_votes = sum(arena_df["num_battles"]) // 2
total_models = len(arena_df)
space = "   "
total_subset_votes = sum(arena_subset_df["num_battles"]) // 2
total_subset_models = len(arena_subset_df)
leaderboard_md = f"""### {cat_name_to_explanation[name]}
#### [Coverage] {space} #models: **{total_subset_models} ({round(total_subset_models/total_models *100)}%)** {space} #votes: **{"{:,}".format(total_subset_votes)} ({round(total_subset_votes/total_votes * 100)}%)**{space}
"""
return leaderboard_md
def make_full_leaderboard_md(elo_results):
leaderboard_md = f"""
Three benchmarks are displayed: **Test Task 1**, **Test Task 2**, **Test Task 3**.
Higher values are better for all benchmarks.
"""
return leaderboard_md
def make_leaderboard_md_live(elo_results):
leaderboard_md = f"""
# Leaderboard
Last updated: {elo_results["last_updated_datetime"]}
{elo_results["leaderboard_table"]}
"""
return leaderboard_md
def update_elo_components(max_num_files, elo_results_file):
log_files = get_log_files(max_num_files)
# Leaderboard
if elo_results_file is None: # Do live update
battles = clean_battle_data(log_files)
elo_results = report_elo_analysis_results(battles)
leader_component_values[0] = make_leaderboard_md_live(elo_results)
# Basic stats
basic_stats = report_basic_stats(log_files)
md0 = f"Last updated: {basic_stats['last_updated_datetime']}"
md1 = "### Action Histogram\n"
md1 += basic_stats["action_hist_md"] + "\n"
md2 = "### Anony. Vote Histogram\n"
md2 += basic_stats["anony_vote_hist_md"] + "\n"
md3 = "### Model Call Histogram\n"
md3 += basic_stats["model_hist_md"] + "\n"
md4 = "### Model Call (Last 24 Hours)\n"
md4 += basic_stats["num_chats_last_24_hours"] + "\n"
basic_component_values[0] = md0
basic_component_values[1] = basic_stats["chat_dates_bar"]
basic_component_values[2] = md1
basic_component_values[3] = md2
basic_component_values[4] = md3
basic_component_values[5] = md4
def update_worker(max_num_files, interval, elo_results_file):
while True:
tic = time.time()
update_elo_components(max_num_files, elo_results_file)
durtaion = time.time() - tic
print(f"update duration: {durtaion:.2f} s")
time.sleep(max(interval - durtaion, 0))
def load_demo(url_params, request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
return basic_component_values + leader_component_values
def model_hyperlink(model_name, link):
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
def load_leaderboard_table_csv(filename, add_hyperlink=True):
lines = open(filename).readlines()
heads = [v.strip() for v in lines[0].split(",")]
rows = []
for i in range(1, len(lines)):
row = [v.strip() for v in lines[i].split(",")]
for j in range(len(heads)):
item = {}
for h, v in zip(heads, row):
if h == "Arena Elo rating":
if v != "-":
v = int(ast.literal_eval(v))
else:
v = np.nan
elif h == "MMLU":
if v != "-":
v = round(ast.literal_eval(v) * 100, 1)
else:
v = np.nan
elif h == "MT-bench (win rate %)":
if v != "-":
v = round(ast.literal_eval(v[:-1]), 1)
else:
v = np.nan
elif h == "MT-bench (score)":
if v != "-":
v = round(ast.literal_eval(v), 2)
else:
v = np.nan
item[h] = v
if add_hyperlink:
item["Model"] = model_hyperlink(item["Model"], item["Link"])
rows.append(item)
return rows
def build_basic_stats_tab():
empty = "Loading ..."
basic_component_values[:] = [empty, None, empty, empty, empty, empty]
md0 = gr.Markdown(empty)
gr.Markdown("#### Figure 1: Number of model calls and votes")
plot_1 = gr.Plot(show_label=False)
with gr.Row():
with gr.Column():
md1 = gr.Markdown(empty)
with gr.Column():
md2 = gr.Markdown(empty)
with gr.Row():
with gr.Column():
md3 = gr.Markdown(empty)
with gr.Column():
md4 = gr.Markdown(empty)
return [md0, plot_1, md1, md2, md3, md4]
def get_full_table(model_table_df):
values = []
for i in range(len(model_table_df)):
row = []
model_key = model_table_df.iloc[i]["key"]
model_name = model_table_df.iloc[i]["Model"]
# model display name
row.append(model_name)
row.append(np.nan)
row.append(np.nan)
row.append(np.nan)
# row.append(model_table_df.iloc[i]["MT-bench (score)"])
# row.append(model_table_df.iloc[i]["MMLU"])
# Organization
row.append(model_table_df.iloc[i]["Organization"])
# license
row.append(model_table_df.iloc[i]["License"])
values.append(row)
values.sort(key=lambda x: -x[1] if not np.isnan(x[1]) else 1e9)
return values
def create_ranking_str(ranking, ranking_difference):
if ranking_difference > 0:
# return f"{int(ranking)} (\u2191{int(ranking_difference)})"
return f"{int(ranking)} \u2191"
elif ranking_difference < 0:
# return f"{int(ranking)} (\u2193{int(-ranking_difference)})"
return f"{int(ranking)} \u2193"
else:
return f"{int(ranking)}"
def recompute_final_ranking(arena_df):
# compute ranking based on CI
ranking = {}
for i, model_a in enumerate(arena_df.index):
ranking[model_a] = 1
for j, model_b in enumerate(arena_df.index):
if i == j:
continue
if arena_df.loc[model_b]["rating_q025"] > arena_df.loc[model_a]["rating_q975"]:
ranking[model_a] += 1
return list(ranking.values())
def get_arena_table(arena_df, model_table_df, arena_subset_df=None):
arena_df = arena_df.sort_values(by=["final_ranking", "rating"], ascending=[True, False])
arena_df["final_ranking"] = recompute_final_ranking(arena_df)
arena_df = arena_df.sort_values(by=["final_ranking"], ascending=True)
# arena_df["final_ranking"] = range(1, len(arena_df) + 1)
# sort by rating
if arena_subset_df is not None:
# filter out models not in the arena_df
arena_subset_df = arena_subset_df[arena_subset_df.index.isin(arena_df.index)]
arena_subset_df = arena_subset_df.sort_values(by=["rating"], ascending=False)
# arena_subset_df = arena_subset_df.sort_values(by=["final_ranking"], ascending=True)
arena_subset_df["final_ranking"] = recompute_final_ranking(arena_subset_df)
# keep only the models in the subset in arena_df and recompute final_ranking
arena_df = arena_df[arena_df.index.isin(arena_subset_df.index)]
# recompute final ranking
arena_df["final_ranking"] = recompute_final_ranking(arena_df)
# assign ranking by the order
arena_subset_df["final_ranking_no_tie"] = range(1, len(arena_subset_df) + 1)
arena_df["final_ranking_no_tie"] = range(1, len(arena_df) + 1)
# join arena_df and arena_subset_df on index
arena_df = arena_subset_df.join(arena_df["final_ranking"], rsuffix="_global", how="inner")
arena_df["ranking_difference"] = arena_df["final_ranking_global"] - arena_df["final_ranking"]
arena_df = arena_df.sort_values(by=["final_ranking", "rating"], ascending=[True, False])
arena_df["final_ranking"] = arena_df.apply(lambda x: create_ranking_str(x["final_ranking"], x["ranking_difference"]), axis=1)
values = []
for i in range(len(arena_df)):
row = []
model_key = arena_df.index[i]
try: # this is a janky fix for where the model key is not in the model table (model table and arena table dont contain all the same models)
model_name = model_table_df[model_table_df["key"] == model_key]["Model"].values[
0
]
# rank
ranking = arena_df.iloc[i].get("final_ranking") or i+1
row.append(ranking)
if arena_subset_df is not None:
row.append(arena_df.iloc[i].get("ranking_difference") or 0)
# model display name
row.append(model_name)
# elo rating
row.append(round(arena_df.iloc[i]["rating"]))
# Organization
row.append(
model_table_df[model_table_df["key"] == model_key]["Organization"].values[0]
)
# license
row.append(
model_table_df[model_table_df["key"] == model_key]["License"].values[0]
)
values.append(row)
except Exception as e:
print(f"{model_key} - {e}")
return values
key_to_category_name = {
"full": "Overall",
}
cat_name_to_explanation = {
"Overall": "Overall Questions",
}
def build_leaderboard_tab(results_file, leaderboard_table_file, show_plot=False):
arena_dfs = {}
category_elo_results = {}
if results_file is None: # Do live update
default_md = "Loading ..."
else:
with open(results_file, "rb") as fin:
elo_results = pickle.load(fin)
if "full" in elo_results:
print("KEYS ", elo_results.keys())
for k in elo_results.keys():
if k not in key_to_category_name:
continue
arena_dfs[key_to_category_name[k]] = elo_results[k]["leaderboard_table_df"]
category_elo_results[key_to_category_name[k]] = elo_results[k]
arena_df = arena_dfs["Overall"]
default_md = make_default_md(arena_df, category_elo_results["Overall"])
md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown")
if leaderboard_table_file:
data = load_leaderboard_table_csv(leaderboard_table_file)
model_table_df = pd.DataFrame(data)
with gr.Tabs() as tabs:
# arena table
arena_table_vals = get_full_table(model_table_df)
with gr.Tab("Arena Elo", id=0):
md = make_arena_leaderboard_md(arena_df)
leaderboard_markdown = gr.Markdown(md, elem_id="leaderboard_markdown")
with gr.Row():
with gr.Column(scale=2):
category_dropdown = gr.Dropdown(choices=list(arena_dfs.keys()), label="Category", value="Overall")
default_category_details = make_category_arena_leaderboard_md(arena_df, arena_df, name="Overall")
with gr.Column(scale=4, variant="panel"):
category_deets = gr.Markdown(default_category_details, elem_id="category_deets")
elo_display_df = gr.Dataframe(
headers=[
"Rank",
"🤖 Model",
"⭐ Task 1",
"📈 Task 2",
"📚 Task 3",
"Organization",
"License",
],
datatype=[
"number",
"markdown",
"number",
"number",
"number",
"str",
"str",
],
value=arena_table_vals,
elem_id="arena_leaderboard_dataframe",
height=700,
column_widths=[70, 190, 110, 160, 150, 140],
wrap=True,
)
gr.Markdown(
f"""Note: .
""",
elem_id="leaderboard_markdown"
)
leader_component_values[:] = [default_md]
# with gr.Tab("Full Leaderboard", id=0):
# md = make_full_leaderboard_md(elo_results)
# gr.Markdown(md, elem_id="leaderboard_markdown")
# with gr.Row():
# with gr.Column(scale=2):
# category_dropdown = gr.Dropdown(choices=list(arena_dfs.keys()), label="Category", value="Overall")
# default_category_details = make_category_arena_leaderboard_md(arena_df, arena_df, name="Overall")
# with gr.Column(scale=4, variant="panel"):
# category_deets = gr.Markdown(default_category_details, elem_id="category_deets")
# full_table_vals = get_full_table(model_table_df)
# display_df = gr.Dataframe(
# headers=[
# "🤖 Model",
# "⭐ Task 1",
# "📈 Task 2",
# "📚 Task 3",
# "Organization",
# "License",
# ],
# datatype=["markdown", "number", "number", "number", "str", "str"],
# value=full_table_vals,
# elem_id="full_leaderboard_dataframe",
# column_widths=[200, 100, 100, 100, 150, 150],
# height=700,
# wrap=True,
# )
# gr.Markdown(
# f"""Note: .
# """,
# elem_id="leaderboard_markdown"
# )
# leader_component_values[:] = [default_md]
if not show_plot:
gr.Markdown(
""" ## Submit your model [here]().
""",
elem_id="leaderboard_markdown",
)
else:
pass
def update_leaderboard_df(arena_table_vals):
elo_datarame = pd.DataFrame(arena_table_vals, columns=[ "Rank", "🤖 Model", "⭐ Arena Elo", "Organization", "License"])
# goal: color the rows based on the rank with styler
def highlight_max(s):
# all items in S which contain up arrow should be green, down arrow should be red, otherwise black
return ["color: green; font-weight: bold" if "\u2191" in v else "color: red; font-weight: bold" if "\u2193" in v else "" for v in s]
def highlight_rank_max(s):
return ["color: green; font-weight: bold" if v > 0 else "color: red; font-weight: bold" if v < 0 else "" for v in s]
return elo_datarame.style.apply(highlight_max, subset=["Rank"])
def update_leaderboard_and_plots(category):
arena_subset_df = arena_dfs[category]
arena_subset_df = arena_subset_df[arena_subset_df["num_battles"] > 500]
elo_subset_results = category_elo_results[category]
arena_df = arena_dfs["Overall"]
arena_values = get_arena_table(arena_df, model_table_df, arena_subset_df = arena_subset_df if category != "Overall" else None)
if category != "Overall":
arena_values = update_leaderboard_df(arena_values)
arena_values = gr.Dataframe(
headers=[
"Rank",
"🤖 Model",
"⭐ Arena Elo",
"Organization",
"License",
],
datatype=[
"number",
"markdown",
"number",
"str",
"str",
],
value=arena_values,
elem_id="arena_leaderboard_dataframe",
height=700,
column_widths=[60, 190, 110, 160, 150, 140],
wrap=True,
)
else:
arena_values = gr.Dataframe(
headers=[
"Rank",
"🤖 Model",
"⭐ Arena Elo",
"Organization",
"License",
],
datatype=[
"number",
"markdown",
"number",
"str",
"str",
],
value=arena_values,
elem_id="arena_leaderboard_dataframe",
height=700,
column_widths=[70, 190, 110, 160, 150, 140],
wrap=True,
)
leaderboard_md = make_category_arena_leaderboard_md(arena_df, arena_subset_df, name=category)
return arena_values, leaderboard_md
category_dropdown.change(update_leaderboard_and_plots, inputs=[category_dropdown], outputs=[display_df, category_deets])
with gr.Accordion(
"📝 Citation",
open=True,
):
citation_md = """
### Citation
Please cite the following paper
"""
gr.Markdown(citation_md, elem_id="leaderboard_markdown")
gr.Markdown(acknowledgment_md)
if show_plot:
return [md_1]
return [md_1]
block_css = """
#notice_markdown {
font-size: 104%
}
#notice_markdown th {
display: none;
}
#notice_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#category_deets {
text-align: center;
padding: 0px;
padding-left: 5px;
}
#leaderboard_markdown {
font-size: 104%
}
#leaderboard_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#leaderboard_header_markdown {
font-size: 104%;
text-align: center;
display:block;
}
#leaderboard_dataframe td {
line-height: 0.1em;
}
#plot-title {
text-align: center;
display:block;
}
#non-interactive-button {
display: inline-block;
padding: 10px 10px;
background-color: #f7f7f7; /* Super light grey background */
text-align: center;
font-size: 26px; /* Larger text */
border-radius: 0; /* Straight edges, no border radius */
border: 0px solid #dcdcdc; /* A light grey border to match the background */
user-select: none; /* The text inside the button is not selectable */
pointer-events: none; /* The button is non-interactive */
}
footer {
display:none !important
}
.sponsor-image-about img {
margin: 0 20px;
margin-top: 20px;
height: 40px;
max-height: 100%;
width: auto;
float: left;
}
"""
acknowledgment_md = """
### Acknowledgment
We thank []() for their generous [sponsorship]().
<div class="sponsor-image-about">
</div>
"""
def build_demo(elo_results_file, leaderboard_table_file):
text_size = gr.themes.sizes.text_lg
theme = gr.themes.Base(text_size=text_size)
theme.set(button_secondary_background_fill_hover="*primary_300",
button_secondary_background_fill_hover_dark="*primary_700")
with gr.Blocks(
title="LLM Merging Leaderboard",
theme=theme,
# theme = gr.themes.Base.load("theme.json"), # uncomment to use new cool theme
css=block_css,
) as demo:
leader_components = build_leaderboard_tab(
elo_results_file, leaderboard_table_file, show_plot=True
)
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--share", action="store_true")
parser.add_argument("--host", default="0.0.0.0")
parser.add_argument("--port", type=int, default=7860)
args = parser.parse_args()
elo_result_files = glob.glob("elo_results_*.pkl")
elo_result_files.sort(key=lambda x: int(x[12:-4]))
elo_result_file = elo_result_files[-1]
leaderboard_table_files = glob.glob("leaderboard_table_*.csv")
leaderboard_table_files.sort(key=lambda x: int(x[18:-4]))
leaderboard_table_file = leaderboard_table_files[-1]
demo = build_demo(elo_result_file, leaderboard_table_file)
demo.launch(share=args.share, server_name=args.host, server_port=args.port)