Spaces:
Running
Running
__all__ = ['block', 'make_clickable_model', 'make_clickable_user', 'get_submissions'] | |
import gradio as gr | |
import pandas as pd | |
import re | |
import pdb | |
import tempfile | |
import os | |
from constants import * | |
from src.compute import compute_scores | |
global data_component, filter_component | |
from huggingface_hub import HfApi | |
hf_token = os.getenv('HF_TOKEN') | |
api = HfApi(token=hf_token) | |
def validate_model_size(s): | |
pattern = r'^\d+B$|^-$' | |
if re.match(pattern, s): | |
return s | |
else: | |
return '-' | |
def upload_file(files): | |
file_paths = [file.name for file in files] | |
return file_paths | |
def add_new_eval( | |
input_file, | |
model_name_textbox: str, | |
revision_name_textbox: str, | |
model_link: str, | |
model_type: str, | |
model_size: str, | |
notes: str, | |
): | |
if input_file is None: | |
return "Error! Empty file!" | |
else: | |
model_size = validate_model_size(model_size) | |
input_file = compute_scores(input_file) | |
input_data = input_file[1] | |
input_data = [float(i) for i in input_data] | |
csv_data = pd.read_csv(CSV_DIR) | |
if revision_name_textbox == '': | |
col = csv_data.shape[0] | |
model_name = model_name_textbox | |
name_list = [name.split(']')[0][1:] if name.endswith(')') else name for name in csv_data['Model']] | |
assert model_name not in name_list | |
else: | |
model_name = revision_name_textbox | |
model_name_list = csv_data['Model'] | |
name_list = [name.split(']')[0][1:] if name.endswith(')') else name for name in model_name_list] | |
if revision_name_textbox not in name_list: | |
col = csv_data.shape[0] | |
else: | |
col = name_list.index(revision_name_textbox) | |
if model_link == '': | |
model_name = model_name # no url | |
else: | |
model_name = '[' + model_name + '](' + model_link + ')' | |
# add new data | |
new_data = [ | |
model_name, | |
model_type, | |
model_size, | |
input_data[0], | |
input_data[1], | |
input_data[2], | |
input_data[3], | |
input_data[4], | |
input_data[5], | |
input_data[6], | |
input_data[7], | |
input_data[8], | |
input_data[9], | |
input_data[10], | |
input_data[11], | |
input_data[12], | |
input_data[13], | |
input_data[14], | |
input_data[15], | |
input_data[16], | |
input_data[17], | |
input_data[18], | |
input_data[19], | |
input_data[20], | |
input_data[21], | |
input_data[22], | |
input_data[23], | |
input_data[24], | |
notes, | |
] | |
# print(len(new_data), col) | |
# print(csv_data.loc[col]) | |
# print(model_name, model_type, model_size) | |
csv_data.loc[col] = new_data | |
# with open(f'./file/{model_name}.json','w' ,encoding='utf-8') as f: | |
# json.dump(new_data, f) | |
csv_data.to_csv(CSV_DIR, index=False) | |
# push newly added result | |
api.upload_file( | |
path_or_fileobj=CSV_DIR, | |
path_in_repo=CSV_DIR, | |
repo_id="lyx97/TempCompass", | |
repo_type="space", | |
) | |
return 0 | |
def get_baseline_df(): | |
# pdb.set_trace() | |
df = pd.read_csv(CSV_DIR) | |
df = df.sort_values(by="Avg. All", ascending=False) | |
present_columns = MODEL_INFO + checkbox_group.value | |
df = df[present_columns] | |
return df | |
def get_all_df(): | |
df = pd.read_csv(CSV_DIR) | |
df = df.sort_values(by="Avg. All", ascending=False) | |
return df | |
block = gr.Blocks() | |
with block: | |
gr.Markdown( | |
LEADERBORAD_INTRODUCTION | |
) | |
with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
with gr.TabItem("🏅 TempCompass Benchmark", elem_id="video-benchmark-tab-table", id=0): | |
gr.Markdown( | |
TABLE_INTRODUCTION | |
) | |
# selection for column part: | |
checkbox_group = gr.CheckboxGroup( | |
choices=TASK_INFO, | |
value=AVG_INFO, | |
label="Select options", | |
interactive=True, | |
) | |
# 创建数据帧组件 | |
data_component = gr.components.Dataframe( | |
value=get_baseline_df, | |
headers=COLUMN_NAMES, | |
type="pandas", | |
datatype=DATA_TITILE_TYPE, | |
interactive=False, | |
visible=True, | |
) | |
def on_checkbox_group_change(selected_columns): | |
# pdb.set_trace() | |
selected_columns = [item for item in TASK_INFO if item in selected_columns] | |
present_columns = MODEL_INFO + selected_columns | |
updated_data = get_all_df()[present_columns] | |
updated_data = updated_data.sort_values(by=present_columns[1], ascending=False) | |
updated_headers = present_columns | |
print(updated_headers) | |
print([COLUMN_NAMES.index(x) for x in updated_headers]) | |
update_datatype = [DATA_TITILE_TYPE[COLUMN_NAMES.index(x)] for x in updated_headers] | |
filter_component = gr.components.Dataframe( | |
value=updated_data, | |
headers=updated_headers, | |
type="pandas", | |
datatype=update_datatype, | |
interactive=False, | |
visible=True, | |
) | |
# pdb.set_trace() | |
return filter_component.constructor_args['value'] | |
# 将复选框组关联到处理函数 | |
checkbox_group.change(fn=on_checkbox_group_change, inputs=checkbox_group, outputs=data_component) | |
''' | |
# table 2 | |
with gr.TabItem("📝 About", elem_id="seed-benchmark-tab-table", id=2): | |
gr.Markdown(LEADERBORAD_INFO, elem_classes="markdown-text") | |
''' | |
# table 3 | |
with gr.TabItem("🚀 Submit here! ", elem_id="seed-benchmark-tab-table", id=3): | |
# gr.Markdown(LEADERBORAD_INTRODUCTION, elem_classes="markdown-text") | |
with gr.Row(): | |
gr.Markdown(SUBMIT_INTRODUCTION, elem_classes="markdown-text") | |
with gr.Row(): | |
gr.Markdown("# ✉️✨ Submit your model evaluation json file here!", elem_classes="markdown-text") | |
with gr.Row(): | |
with gr.Column(): | |
model_name_textbox = gr.Textbox( | |
label="Model name", placeholder="Video-LLaVA-7B" | |
) | |
revision_name_textbox = gr.Textbox( | |
label="Revision Model Name", placeholder="Video-LLaVA-7B" | |
) | |
model_link = gr.Textbox( | |
label="Model Link", placeholder="https://huggingface.co/LanguageBind/Video-LLaVA-7B" | |
) | |
model_type = gr.Dropdown( | |
choices=[ | |
"LLM", | |
"ImageLLM", | |
"VideoLLM", | |
"Other", | |
], | |
label="Model type", | |
multiselect=False, | |
value=None, | |
interactive=True, | |
) | |
model_size = gr.Textbox( | |
label="Model size", placeholder="7B(Input content format must be 'number+B' or '-', default is '-')" | |
) | |
notes = gr.Textbox( | |
label="Notes", placeholder="Other details of the model or evaluation, e.g., which answer prompt is used." | |
) | |
with gr.Column(): | |
input_file = gr.File(label="Click to Upload a json File", type='binary') | |
submit_button = gr.Button("Submit Eval") | |
submission_result = gr.Markdown() | |
submit_button.click( | |
add_new_eval, | |
inputs=[ | |
input_file, | |
model_name_textbox, | |
revision_name_textbox, | |
model_link, | |
model_type, | |
model_size, | |
notes, | |
], | |
# outputs = submission_result, | |
) | |
with gr.Row(): | |
data_run = gr.Button("Refresh") | |
data_run.click( | |
get_baseline_df, outputs=data_component | |
) | |
with gr.Row(): | |
with gr.Accordion("📙 Citation", open=False): | |
citation_button = gr.Textbox( | |
value=CITATION_BUTTON_TEXT, | |
label=CITATION_BUTTON_LABEL, | |
lines=20, | |
elem_id="citation-button", | |
show_copy_button=True, | |
) | |
# block.load(get_baseline_df, outputs=data_title) | |
block.launch() |