import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from gradio.components.textbox import Textbox
from gradio.components.dataframe import Dataframe
from gradio.components.checkboxgroup import CheckboxGroup
import copy
# from fastchat.serve.monitor.monitor import build_leaderboard_tab, build_basic_stats_tab, basic_component_values, leader_component_values
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
LINKS,
)
from src.display.css_html_js import (
custom_css,
CSS_EXTERNAL,
JS_EXTERNAL,
)
from src.display.utils import (
AutoEvalColumn,
fields,
)
from src.envs import (
API,
EVAL_DETAILED_RESULTS_PATH,
EVAL_RESULTS_PATH,
EVAL_DETAILED_RESULTS_REPO,
REPO_ID,
RESULTS_REPO,
TOKEN,
NEWEST_VERSION,
)
from src.populate import get_leaderboard_df
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
try:
print(EVAL_DETAILED_RESULTS_REPO)
snapshot_download(
repo_id=EVAL_DETAILED_RESULTS_REPO,
local_dir=EVAL_DETAILED_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN,
)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO,
local_dir=EVAL_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN,
)
except Exception:
restart_space()
LEADERBOARD_DF = get_leaderboard_df(RESULTS_REPO)
def GET_DEFAULT_TEXTBOX():
return gr.Textbox("", placeholder="🔍 Search Models... [press enter]", label="Filter Models by Name")
def GET_DEFAULT_CHECKBOX(subset):
choices = list(LEADERBOARD_DF[subset].columns)
print("Choices:", choices)
choices.remove("Model Name")
# print("Choices:", [c.name for c in fields(AutoEvalColumn) if not c.hidden])
return gr.CheckboxGroup(
choices=choices,
label="Select Columns to Display",
value=choices,
)
old_version = NEWEST_VERSION
def init_leaderboard(dataframes):
subsets = list(reversed(list(dataframes.keys())))
with gr.Row():
selected_subset = gr.Dropdown(choices=subsets, label="Select Dataset Subset", value=NEWEST_VERSION)
research_textbox = GET_DEFAULT_TEXTBOX()
selected_columns = GET_DEFAULT_CHECKBOX(NEWEST_VERSION)
data = dataframes[NEWEST_VERSION]
with gr.Row():
# datatype =
df = gr.Dataframe(data, type="pandas")
def refresh(subset):
global LEADERBOARD_DF
LEADERBOARD_DF = get_leaderboard_df(RESULTS_REPO)
# default_columns = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default]
default_columns = list(LEADERBOARD_DF[subset].columns)
# default_columns.remove("Model Name")
# return update_data(subset, None, default_columns), GET_DEFAULT_TEXTBOX(), GET_DEFAULT_CHECKBOX(subset)
return update_data(subset, None, default_columns, force_refresh=True)
def update_data(subset, search_term, selected_columns, force_refresh=False):
global old_version
if old_version != subset or force_refresh:
search_term = None
selected_columns = GET_DEFAULT_CHECKBOX(subset)
print("Subset:", subset)
print("Search Term:", search_term)
print("Selected Columns:", selected_columns)
if isinstance(selected_columns, CheckboxGroup):
print("Selected Columns:", selected_columns.choices)
bak_selected_columns = copy.deepcopy(selected_columns)
old_version = subset
filtered_data = dataframes[subset]
if search_term:
filtered_data = filtered_data[dataframes[subset]["Model Name"].str.contains(search_term, case=False)]
filtered_data.sort_values(by="Total", ascending=False, inplace=True)
# selected_columns.append("Model Name")
if isinstance(selected_columns, CheckboxGroup):
selected_columns = selected_columns.choices
if isinstance(selected_columns[0], tuple):
selected_columns = [c[1] for c in selected_columns]
print("Selected Columns:", selected_columns)
selected_columns = [
c for c in filtered_data.columns if c in selected_columns or c == "Model Name"
]
# selected_columns = [c.name for c in fields(AutoEvalColumn) if c.name in selected_columns]
selected_data = filtered_data[selected_columns]
return gr.DataFrame(
selected_data,
type="pandas",
# datatype=[c.type for c in fields(AutoEvalColumn) if c.name in selected_columns],
), research_textbox, bak_selected_columns
with gr.Row():
refresh_button = gr.Button("Refresh")
refresh_button.click(
refresh,
inputs=[
selected_subset,
],
outputs=[df, research_textbox, selected_columns],
concurrency_limit=20,
)
selected_subset.change(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=[df, research_textbox, selected_columns])
research_textbox.submit(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=[df, research_textbox, selected_columns])
selected_columns.change(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=[df, research_textbox, selected_columns])
def init_detailed_results():
with gr.Row():
gr.HTML(
"""\
"""
)
HEAD = "".join(
[f'' for css in CSS_EXTERNAL]
+ [f'' for js in JS_EXTERNAL]
)
demo = gr.Blocks(css=custom_css, head=HEAD)
with demo:
gr.HTML(TITLE)
gr.HTML(LINKS)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("🏅 LiveBench Results", elem_id="llm-benchmark-tab-table", id=0):
init_leaderboard(LEADERBOARD_DF)
with gr.TabItem("📝 Detailed Results", elem_id="llm-benchmark-tab-table", id=2):
init_detailed_results()
with gr.Row():
with gr.Accordion("📙 Citation", open=False):
gr.Markdown("```bib\n" + CITATION_BUTTON_TEXT + "\n```")
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()