clefourrier HF staff chriscanal commited on
Commit
f2bc0a5
1 Parent(s): 36bf18d

Creating functions for plotting results over time (#295)

Browse files

- Creating functions for plotting results over time (319b0b7936fb504f9017c3c4ce9f10466ad55202)
- Added graphs tab (1d6addaf6050a163efb58af4eb6bc6346adfeaac)
- Changed to Plotly for interactive graphs! (65fc294da6b2789e87fd20d916732b3f91391843)
- Updated main to include title in the graph function parameters (e872e8a162076990e64ef65a05611bc0d042848a)
- Added y-axis range to make graph more aesthetically pleasing (02700b60517a1e28b27cdc57ffea040f9e6cf830)
- Fixing bug that messes up the order of models (75297e78c74b787229e69009b2ab9dfd3a339e20)
- Updated app.py to fix conflict and changed name of tab per Clémentine Fourrier's request (8e47868563c084edcd00b0f8cb696872404003b1)
- Updated plotted models to exclude flagged models (36bf409eccd16b3db35bd48882cf4b27cb73c832)
- Merge branch 'main' into pr/295 (81c331307b066857e513829a0ab9372421e315ca)


Co-authored-by: Christopher Canal <[email protected]>

Files changed (2) hide show
  1. app.py +27 -0
  2. src/display_models/plot_results.py +223 -0
app.py CHANGED
@@ -17,6 +17,13 @@ from src.assets.text_content import (
17
  LLM_BENCHMARKS_TEXT,
18
  TITLE,
19
  )
 
 
 
 
 
 
 
20
  from src.display_models.get_model_metadata import DO_NOT_SUBMIT_MODELS, ModelType
21
  from src.display_models.modelcard_filter import check_model_card
22
  from src.display_models.utils import (
@@ -93,6 +100,7 @@ update_collections(original_df.copy())
93
  leaderboard_df = original_df.copy()
94
 
95
  models = original_df["model_name_for_query"].tolist() # needed for model backlinks in their to the leaderboard
 
96
  to_be_dumped = f"models = {repr(models)}\n"
97
 
98
  (
@@ -515,6 +523,25 @@ with demo:
515
  leaderboard_table,
516
  queue=True,
517
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
519
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
520
 
 
17
  LLM_BENCHMARKS_TEXT,
18
  TITLE,
19
  )
20
+ from src.display_models.plot_results import (
21
+ create_metric_plot_obj,
22
+ create_scores_df,
23
+ create_plot_df,
24
+ join_model_info_with_results,
25
+ HUMAN_BASELINES,
26
+ )
27
  from src.display_models.get_model_metadata import DO_NOT_SUBMIT_MODELS, ModelType
28
  from src.display_models.modelcard_filter import check_model_card
29
  from src.display_models.utils import (
 
100
  leaderboard_df = original_df.copy()
101
 
102
  models = original_df["model_name_for_query"].tolist() # needed for model backlinks in their to the leaderboard
103
+ plot_df = create_plot_df(create_scores_df(join_model_info_with_results(original_df)))
104
  to_be_dumped = f"models = {repr(models)}\n"
105
 
106
  (
 
523
  leaderboard_table,
524
  queue=True,
525
  )
526
+
527
+ with gr.TabItem("📈 Metrics evolution through time", elem_id="llm-benchmark-tab-table", id=4):
528
+ with gr.Row():
529
+ with gr.Column():
530
+ chart = create_metric_plot_obj(
531
+ plot_df,
532
+ ["Average ⬆️"],
533
+ HUMAN_BASELINES,
534
+ title="Average of Top Scores and Human Baseline Over Time",
535
+ )
536
+ gr.Plot(value=chart, interactive=False, width=500, height=500)
537
+ with gr.Column():
538
+ chart = create_metric_plot_obj(
539
+ plot_df,
540
+ ["ARC", "HellaSwag", "MMLU", "TruthfulQA"],
541
+ HUMAN_BASELINES,
542
+ title="Top Scores and Human Baseline Over Time",
543
+ )
544
+ gr.Plot(value=chart, interactive=False, width=500, height=500)
545
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
546
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
547
 
src/display_models/plot_results.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import plotly.express as px
3
+ from plotly.graph_objs import Figure
4
+ import pickle
5
+ from datetime import datetime, timezone
6
+ from typing import List, Dict, Tuple, Any
7
+ from src.display_models.model_metadata_flags import FLAGGED_MODELS
8
+
9
+ # Average ⬆️ human baseline is 0.897 (source: averaging human baselines below)
10
+ # ARC human baseline is 0.80 (source: https://lab42.global/arc/)
11
+ # HellaSwag human baseline is 0.95 (source: https://deepgram.com/learn/hellaswag-llm-benchmark-guide)
12
+ # MMLU human baseline is 0.898 (source: https://openreview.net/forum?id=d7KBjmI3GmQ)
13
+ # TruthfulQA human baseline is 0.94(source: https://arxiv.org/pdf/2109.07958.pdf)
14
+ # Define the human baselines
15
+ HUMAN_BASELINES = {
16
+ "Average ⬆️": 0.897 * 100,
17
+ "ARC": 0.80 * 100,
18
+ "HellaSwag": 0.95 * 100,
19
+ "MMLU": 0.898 * 100,
20
+ "TruthfulQA": 0.94 * 100,
21
+ }
22
+
23
+
24
+ def to_datetime(model_info: Tuple[str, Any]) -> datetime:
25
+ """
26
+ Converts the lastModified attribute of the object to datetime.
27
+
28
+ :param model_info: A tuple containing the name and object.
29
+ The object must have a lastModified attribute
30
+ with a string representing the date and time.
31
+ :return: A datetime object converted from the lastModified attribute of the input object.
32
+ """
33
+ name, obj = model_info
34
+ return datetime.strptime(obj.lastModified, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=timezone.utc)
35
+
36
+
37
+ def join_model_info_with_results(results_df: pd.DataFrame) -> pd.DataFrame:
38
+ """
39
+ Integrates model information with the results DataFrame by matching 'Model sha'.
40
+ :param results_df: A DataFrame containing results information including 'Model sha' column.
41
+ :return: A DataFrame with updated 'Results Date' columns, which are synchronized with model information.
42
+ """
43
+ # copy dataframe to avoid modifying the original
44
+ df = results_df.copy(deep=True)
45
+
46
+ # Filter out FLAGGED_MODELS to ensure graph is not skewed by mistakes
47
+ df = df[~df["model_name_for_query"].isin(FLAGGED_MODELS.keys())].reset_index(drop=True)
48
+
49
+ # load cache from disk
50
+ try:
51
+ with open("model_info_cache.pkl", "rb") as f:
52
+ model_info_cache = pickle.load(f)
53
+ except (EOFError, FileNotFoundError):
54
+ model_info_cache = {}
55
+
56
+ # Sort date strings using datetime objects as keys
57
+ sorted_dates = sorted(list(model_info_cache.items()), key=to_datetime, reverse=True)
58
+ df["Results Date"] = datetime.now().replace(tzinfo=timezone.utc)
59
+
60
+ # Define the date format string
61
+ date_format = "%Y-%m-%dT%H:%M:%S.%fZ"
62
+
63
+ # Iterate over sorted_dates and update the dataframe
64
+ for name, obj in sorted_dates:
65
+ # Convert the lastModified string to a datetime object
66
+ last_modified_datetime = datetime.strptime(obj.lastModified, date_format).replace(tzinfo=timezone.utc)
67
+
68
+ # Update the "Results Date" column where "Model sha" equals obj.sha
69
+ df.loc[df["Model sha"] == obj.sha, "Results Date"] = last_modified_datetime
70
+ return df
71
+
72
+
73
+ def create_scores_df(results_df: pd.DataFrame) -> pd.DataFrame:
74
+ """
75
+ Generates a DataFrame containing the maximum scores until each result date.
76
+
77
+ :param results_df: A DataFrame containing result information including metric scores and result dates.
78
+ :return: A new DataFrame containing the maximum scores until each result date for every metric.
79
+ """
80
+ # Step 1: Ensure 'Results Date' is in datetime format and sort the DataFrame by it
81
+ results_df["Results Date"] = pd.to_datetime(results_df["Results Date"])
82
+ results_df.sort_values(by="Results Date", inplace=True)
83
+
84
+ # Step 2: Initialize the scores dictionary
85
+ scores = {
86
+ "Average ⬆️": [],
87
+ "ARC": [],
88
+ "HellaSwag": [],
89
+ "MMLU": [],
90
+ "TruthfulQA": [],
91
+ "Result Date": [],
92
+ "Model Name": [],
93
+ }
94
+
95
+ # Step 3: Iterate over the rows of the DataFrame and update the scores dictionary
96
+ for i, row in results_df.iterrows():
97
+ date = row["Results Date"]
98
+ for column in scores.keys():
99
+ if column == "Result Date":
100
+ if not scores[column] or scores[column][-1] <= date:
101
+ scores[column].append(date)
102
+ continue
103
+ if column == "Model Name":
104
+ scores[column].append(row["model_name_for_query"])
105
+ continue
106
+ current_max = scores[column][-1] if scores[column] else float("-inf")
107
+ scores[column].append(max(current_max, row[column]))
108
+
109
+ # Step 4: Convert the dictionary to a DataFrame
110
+ return pd.DataFrame(scores)
111
+
112
+
113
+ def create_plot_df(scores_df: pd.DataFrame) -> pd.DataFrame:
114
+ """
115
+ Transforms the scores DataFrame into a new format suitable for plotting.
116
+
117
+ :param scores_df: A DataFrame containing metric scores and result dates.
118
+ :return: A new DataFrame reshaped for plotting purposes.
119
+ """
120
+ # Sample columns
121
+ cols = ["Average ⬆️", "ARC", "HellaSwag", "MMLU", "TruthfulQA"]
122
+
123
+ # Initialize the list to store DataFrames
124
+ dfs = []
125
+
126
+ # Iterate over the cols and create a new DataFrame for each column
127
+ for col in cols:
128
+ d = scores_df[[col, "Model Name", "Result Date"]].copy().reset_index(drop=True)
129
+ d["Metric Name"] = col
130
+ d.rename(columns={col: "Metric Value"}, inplace=True)
131
+ dfs.append(d)
132
+
133
+ # Concatenate all the created DataFrames
134
+ concat_df = pd.concat(dfs, ignore_index=True)
135
+
136
+ # Sort values by 'Result Date'
137
+ concat_df.sort_values(by="Result Date", inplace=True)
138
+ concat_df.reset_index(drop=True, inplace=True)
139
+
140
+ # Drop duplicates based on 'Metric Name' and 'Metric Value' and keep the first (earliest) occurrence
141
+ concat_df.drop_duplicates(subset=["Metric Name", "Metric Value"], keep="first", inplace=True)
142
+
143
+ concat_df.reset_index(drop=True, inplace=True)
144
+ return concat_df
145
+
146
+
147
+ def create_metric_plot_obj(
148
+ df: pd.DataFrame, metrics: List[str], human_baselines: Dict[str, float], title: str
149
+ ) -> Figure:
150
+ """
151
+ Create a Plotly figure object with lines representing different metrics
152
+ and horizontal dotted lines representing human baselines.
153
+
154
+ :param df: The DataFrame containing the metric values, names, and dates.
155
+ :param metrics: A list of strings representing the names of the metrics
156
+ to be included in the plot.
157
+ :param human_baselines: A dictionary where keys are metric names
158
+ and values are human baseline values for the metrics.
159
+ :param title: A string representing the title of the plot.
160
+ :return: A Plotly figure object with lines representing metrics and
161
+ horizontal dotted lines representing human baselines.
162
+ """
163
+
164
+ # Filter the DataFrame based on the specified metrics
165
+ df = df[df["Metric Name"].isin(metrics)]
166
+
167
+ # Filter the human baselines based on the specified metrics
168
+ filtered_human_baselines = {k: v for k, v in human_baselines.items() if k in metrics}
169
+
170
+ # Create a line figure using plotly express with specified markers and custom data
171
+ fig = px.line(
172
+ df,
173
+ x="Result Date",
174
+ y="Metric Value",
175
+ color="Metric Name",
176
+ markers=True,
177
+ custom_data=["Metric Name", "Metric Value", "Model Name"],
178
+ title=title,
179
+ )
180
+
181
+ # Update hovertemplate for better hover interaction experience
182
+ fig.update_traces(
183
+ hovertemplate="<br>".join(
184
+ [
185
+ "Model Name: %{customdata[2]}",
186
+ "Metric Name: %{customdata[0]}",
187
+ "Date: %{x}",
188
+ "Metric Value: %{y}",
189
+ ]
190
+ )
191
+ )
192
+
193
+ # Update the range of the y-axis
194
+ fig.update_layout(yaxis_range=[0, 100])
195
+
196
+ # Create a dictionary to hold the color mapping for each metric
197
+ metric_color_mapping = {}
198
+
199
+ # Map each metric name to its color in the figure
200
+ for trace in fig.data:
201
+ metric_color_mapping[trace.name] = trace.line.color
202
+
203
+ # Iterate over filtered human baselines and add horizontal lines to the figure
204
+ for metric, value in filtered_human_baselines.items():
205
+ color = metric_color_mapping.get(metric, "blue") # Retrieve color from mapping; default to blue if not found
206
+ location = "top left" if metric == "HellaSwag" else "bottom left" # Set annotation position
207
+ # Add horizontal line with matched color and positioned annotation
208
+ fig.add_hline(
209
+ y=value,
210
+ line_dash="dot",
211
+ annotation_text=f"{metric} human baseline",
212
+ annotation_position=location,
213
+ annotation_font_size=10,
214
+ annotation_font_color=color,
215
+ line_color=color,
216
+ )
217
+
218
+ return fig
219
+
220
+
221
+ # Example Usage:
222
+ # human_baselines dictionary is defined.
223
+ # chart = create_metric_plot_obj(scores_df, ["ARC", "HellaSwag", "MMLU", "TruthfulQA"], human_baselines, "Graph Title")