ahmedheakl
commited on
Commit
β’
b0ee7b4
1
Parent(s):
27b66d8
Update app.py
Browse files
app.py
CHANGED
@@ -1,204 +1,75 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
|
3 |
import pandas as pd
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
try:
|
44 |
-
print(EVAL_RESULTS_PATH)
|
45 |
-
snapshot_download(
|
46 |
-
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
47 |
-
)
|
48 |
-
except Exception:
|
49 |
-
restart_space()
|
50 |
-
|
51 |
-
|
52 |
-
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
53 |
-
|
54 |
-
(
|
55 |
-
finished_eval_queue_df,
|
56 |
-
running_eval_queue_df,
|
57 |
-
pending_eval_queue_df,
|
58 |
-
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
59 |
-
|
60 |
-
def init_leaderboard(dataframe):
|
61 |
-
if dataframe is None or dataframe.empty:
|
62 |
-
raise ValueError("Leaderboard DataFrame is empty or None.")
|
63 |
-
return Leaderboard(
|
64 |
-
value=dataframe,
|
65 |
-
datatype=[c.type for c in fields(AutoEvalColumn)],
|
66 |
-
select_columns=SelectColumns(
|
67 |
-
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
|
68 |
-
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
69 |
-
label="Select Columns to Display:",
|
70 |
-
),
|
71 |
-
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
72 |
-
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
73 |
-
filter_columns=[
|
74 |
-
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
|
75 |
-
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
|
76 |
-
ColumnFilter(
|
77 |
-
AutoEvalColumn.params.name,
|
78 |
-
type="slider",
|
79 |
-
min=0.01,
|
80 |
-
max=150,
|
81 |
-
label="Select the number of parameters (B)",
|
82 |
-
),
|
83 |
-
ColumnFilter(
|
84 |
-
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
|
85 |
-
),
|
86 |
-
],
|
87 |
-
bool_checkboxgroup_label="Hide models",
|
88 |
-
interactive=False,
|
89 |
)
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
):
|
125 |
-
with gr.Row():
|
126 |
-
running_eval_table = gr.components.Dataframe(
|
127 |
-
value=running_eval_queue_df,
|
128 |
-
headers=EVAL_COLS,
|
129 |
-
datatype=EVAL_TYPES,
|
130 |
-
row_count=5,
|
131 |
-
)
|
132 |
-
|
133 |
-
with gr.Accordion(
|
134 |
-
f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
135 |
-
open=False,
|
136 |
-
):
|
137 |
-
with gr.Row():
|
138 |
-
pending_eval_table = gr.components.Dataframe(
|
139 |
-
value=pending_eval_queue_df,
|
140 |
-
headers=EVAL_COLS,
|
141 |
-
datatype=EVAL_TYPES,
|
142 |
-
row_count=5,
|
143 |
-
)
|
144 |
-
with gr.Row():
|
145 |
-
gr.Markdown("# βοΈβ¨ Submit your model here!", elem_classes="markdown-text")
|
146 |
-
|
147 |
-
with gr.Row():
|
148 |
-
with gr.Column():
|
149 |
-
model_name_textbox = gr.Textbox(label="Model name")
|
150 |
-
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
151 |
-
model_type = gr.Dropdown(
|
152 |
-
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
153 |
-
label="Model type",
|
154 |
-
multiselect=False,
|
155 |
-
value=None,
|
156 |
-
interactive=True,
|
157 |
-
)
|
158 |
-
|
159 |
-
with gr.Column():
|
160 |
-
precision = gr.Dropdown(
|
161 |
-
choices=[i.value.name for i in Precision if i != Precision.Unknown],
|
162 |
-
label="Precision",
|
163 |
-
multiselect=False,
|
164 |
-
value="float16",
|
165 |
-
interactive=True,
|
166 |
-
)
|
167 |
-
weight_type = gr.Dropdown(
|
168 |
-
choices=[i.value.name for i in WeightType],
|
169 |
-
label="Weights type",
|
170 |
-
multiselect=False,
|
171 |
-
value="Original",
|
172 |
-
interactive=True,
|
173 |
-
)
|
174 |
-
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
175 |
-
|
176 |
-
submit_button = gr.Button("Submit Eval")
|
177 |
-
submission_result = gr.Markdown()
|
178 |
-
submit_button.click(
|
179 |
-
add_new_eval,
|
180 |
-
[
|
181 |
-
model_name_textbox,
|
182 |
-
base_model_name_textbox,
|
183 |
-
revision_name_textbox,
|
184 |
-
precision,
|
185 |
-
weight_type,
|
186 |
-
model_type,
|
187 |
-
],
|
188 |
-
submission_result,
|
189 |
-
)
|
190 |
-
|
191 |
-
with gr.Row():
|
192 |
-
with gr.Accordion("π Citation", open=False):
|
193 |
-
citation_button = gr.Textbox(
|
194 |
-
value=CITATION_BUTTON_TEXT,
|
195 |
-
label=CITATION_BUTTON_LABEL,
|
196 |
-
lines=20,
|
197 |
-
elem_id="citation-button",
|
198 |
-
show_copy_button=True,
|
199 |
-
)
|
200 |
-
|
201 |
-
scheduler = BackgroundScheduler()
|
202 |
-
scheduler.add_job(restart_space, "interval", seconds=1800)
|
203 |
-
scheduler.start()
|
204 |
-
demo.queue(default_concurrency_limit=40).launch()
|
|
|
|
|
|
|
1 |
import pandas as pd
|
2 |
+
import gradio as gr
|
3 |
+
import plotly.graph_objects as go
|
4 |
+
|
5 |
+
# Create the DataFrame
|
6 |
+
data = {
|
7 |
+
'Method': ['GPT-4o', 'GPT-4o-mini', 'Gemini-1.5-Pro', 'Gemini-1.5-Flash', 'Qwen2-VL-2B'],
|
8 |
+
'MM Understanding & Reasoning': [57.90, 48.82, 46.67, 45.58, 40.59],
|
9 |
+
'OCR & Document Understanding': [59.11, 42.89, 36.59, 33.59, 25.68],
|
10 |
+
'Charts & Diagram Understanding': [73.57, 64.98, 47.06, 48.25, 27.83],
|
11 |
+
'Video Understanding': [74.27, 68.11, 42.94, 53.31, 38.90],
|
12 |
+
'Cultural Specific Understanding': [80.86, 65.92, 56.24, 46.54, 34.27],
|
13 |
+
'Medical Imaging': [49.90, 47.37, 33.77, 42.86, 29.12],
|
14 |
+
'Agro Specific': [80.75, 79.58, 72.12, 76.06, 52.02],
|
15 |
+
'Remote Sensing Understanding': [22.85, 16.93, 17.07, 14.95, 12.56]
|
16 |
+
}
|
17 |
+
|
18 |
+
df = pd.DataFrame(data)
|
19 |
+
|
20 |
+
def plot_performance():
|
21 |
+
categories = df.columns[1:]
|
22 |
+
fig = go.Figure()
|
23 |
+
|
24 |
+
for method in df['Method']:
|
25 |
+
values = df[df['Method'] == method].iloc[0, 1:].tolist()
|
26 |
+
fig.add_trace(go.Scatterpolar(
|
27 |
+
r=values,
|
28 |
+
theta=categories,
|
29 |
+
fill='toself',
|
30 |
+
name=method
|
31 |
+
))
|
32 |
+
|
33 |
+
fig.update_layout(
|
34 |
+
polar=dict(
|
35 |
+
radialaxis=dict(
|
36 |
+
visible=True,
|
37 |
+
range=[0, 100]
|
38 |
+
)),
|
39 |
+
showlegend=True,
|
40 |
+
title="Performance Comparison across Categories"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
)
|
42 |
+
return fig
|
43 |
+
|
44 |
+
def create_leaderboard():
|
45 |
+
return df
|
46 |
+
|
47 |
+
# Define the Gradio interface
|
48 |
+
with gr.Blocks() as demo:
|
49 |
+
gr.Markdown("# Multimodal Understanding Leaderboard")
|
50 |
+
|
51 |
+
with gr.Tabs():
|
52 |
+
with gr.TabItem("π Performance Plot"):
|
53 |
+
gr.Plot(plot_performance)
|
54 |
+
|
55 |
+
with gr.TabItem("π Leaderboard Table"):
|
56 |
+
gr.DataFrame(create_leaderboard)
|
57 |
+
|
58 |
+
with gr.TabItem("π About"):
|
59 |
+
gr.Markdown("""
|
60 |
+
This leaderboard compares the performance of various models across different categories of multimodal understanding tasks. The scores represent the accuracy or performance metric for each model in the respective category.
|
61 |
+
|
62 |
+
**Categories:**
|
63 |
+
- MM Understanding & Reasoning
|
64 |
+
- OCR & Document Understanding
|
65 |
+
- Charts & Diagram Understanding
|
66 |
+
- Video Understanding
|
67 |
+
- Cultural Specific Understanding
|
68 |
+
- Medical Imaging
|
69 |
+
- Agro Specific
|
70 |
+
- Remote Sensing Understanding
|
71 |
+
|
72 |
+
The data is presented both as a radar chart for visual comparison and as a table for detailed viewing.
|
73 |
+
""")
|
74 |
+
|
75 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|