ahmedheakl commited on
Commit
20f0a61
1 Parent(s): aae65ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -9
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import pandas as pd
2
  import gradio as gr
3
 
4
- # Create the dataset based on the table you provided
5
  data = {
6
  "Method": ["GPT-4o", "GPT-4o-mini", "Gemini-1.5-Pro", "Gemini-1.5-Flash", "Qwen2-VL-2B"],
7
  "MM Understanding & Reasoning": [57.90, 48.82, 46.67, 45.58, 40.59],
@@ -14,20 +13,18 @@ data = {
14
  "Remote Sensing Understanding": [22.85, 16.93, 17.07, 14.95, 12.56]
15
  }
16
 
17
- # Convert the dataset into a DataFrame
18
  df = pd.DataFrame(data)
19
-
20
- # Calculate the average score for each model across the different tasks
21
  df['Average Score'] = df.iloc[:, 1:].mean(axis=1)
22
 
23
- # Function to display the data in a Gradio interface
24
  def display_data():
25
  return df
26
 
27
- # Create the Gradio interface
28
  with gr.Blocks() as demo:
29
- gr.Markdown("# Model Performance Across Various Understanding Tasks")
30
- gr.Markdown("This table shows the performance of different models across various tasks including OCR, chart understanding, video, medical imaging, and more. An average score is also calculated for each model.")
31
- gr.Dataframe(value=df, label="Model Performance with Average Scores", interactive=False)
 
 
 
32
 
33
  demo.launch()
 
1
  import pandas as pd
2
  import gradio as gr
3
 
 
4
  data = {
5
  "Method": ["GPT-4o", "GPT-4o-mini", "Gemini-1.5-Pro", "Gemini-1.5-Flash", "Qwen2-VL-2B"],
6
  "MM Understanding & Reasoning": [57.90, 48.82, 46.67, 45.58, 40.59],
 
13
  "Remote Sensing Understanding": [22.85, 16.93, 17.07, 14.95, 12.56]
14
  }
15
 
 
16
  df = pd.DataFrame(data)
 
 
17
  df['Average Score'] = df.iloc[:, 1:].mean(axis=1)
18
 
 
19
  def display_data():
20
  return df
21
 
 
22
  with gr.Blocks() as demo:
23
+ gr.Markdown("![camel icon](https://cdn-uploads.huggingface.co/production/uploads/656864e12d73834278a8dea7/n-XfVKd1xVywH_vgPyJyQ.png)", elem_id="camel-icon") # Replace with actual camel icon URL
24
+ gr.Markdown("# **CAMEL-Bench: Model Performance Across Vision Understanding Tasks**")
25
+ gr.Markdown("""
26
+ This table shows the performance of different models across various tasks including OCR, chart understanding, video, medical imaging, and more.
27
+ """)
28
+ gr.Dataframe(value=df, label="CAMEL-Bench Model Performance", interactive=False)
29
 
30
  demo.launch()