Spaces:
Runtime error
Runtime error
sherzod-hakimov
commited on
Commit
•
250513d
1
Parent(s):
b03204d
adapt short model names
Browse files- src/assets/text_content.py +36 -10
- src/utils.py +38 -17
- versions/v1.0.csv +26 -0
src/assets/text_content.py
CHANGED
@@ -1,18 +1,44 @@
|
|
1 |
TITLE = """<h1 align="center" id="space-title"> 🏆 CLEM Leaderboard</h1>"""
|
2 |
|
3 |
INTRODUCTION_TEXT = """
|
4 |
-
|
|
|
|
|
|
|
|
|
5 |
"""
|
6 |
|
7 |
SHORT_NAMES = {
|
8 |
"t0.0": "",
|
9 |
-
"claude-v1.3
|
10 |
-
"
|
11 |
-
"
|
12 |
-
"
|
13 |
-
"
|
14 |
-
"
|
15 |
-
"
|
16 |
-
"
|
17 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
}
|
|
|
1 |
TITLE = """<h1 align="center" id="space-title"> 🏆 CLEM Leaderboard</h1>"""
|
2 |
|
3 |
INTRODUCTION_TEXT = """
|
4 |
+
The CLEM Leaderboard aims to track, rank and evaluate current cLLMs (chat-optimized Large Language Models) with the suggested pronounciation “clems”.
|
5 |
+
|
6 |
+
The benchmarking approach is described in [Clembench: Using Game Play to Evaluate Chat-Optimized Language Models as Conversational Agents](https://arxiv.org/abs/2305.13455).
|
7 |
+
|
8 |
+
[Github repo](https://github.com/clembench/clembench)
|
9 |
"""
|
10 |
|
11 |
SHORT_NAMES = {
|
12 |
"t0.0": "",
|
13 |
+
"claude-v1.3": "cl-1.3",
|
14 |
+
"claude-2": "cl-2",
|
15 |
+
"claude-2.1": "cl-2.1",
|
16 |
+
"claude-instant-1.2": "cl-ins-1.2",
|
17 |
+
"gpt-3.5-turbo-0613": "3.5-0613",
|
18 |
+
"gpt-3.5-turbo-1106": "3.5-1106",
|
19 |
+
"gpt-4-0613": "4-0613",
|
20 |
+
"gpt-4-1106-preview": "4-1106",
|
21 |
+
"gpt-4-0314": "4-0314",
|
22 |
+
"gpt-4": "4",
|
23 |
+
"text-davinci-003": "3",
|
24 |
+
"luminous-supreme": "lm",
|
25 |
+
"koala-13b": "ko",
|
26 |
+
"falcon-40b": "flc",
|
27 |
+
"falcon-7b-instruct": "fal-7b",
|
28 |
+
"falcon-40b-instruct": "flc-i-40b",
|
29 |
+
"oasst-12b": "oas-12b",
|
30 |
+
"oasst-sft-4-pythia-12b-epoch-3.5": "ost-12b",
|
31 |
+
"vicuna-13b": "vic-13b",
|
32 |
+
"vicuna-33b-v1.3": "vic-33b",
|
33 |
+
"sheep-duck-llama-2-70b-v1.1": "sd-l2-70b",
|
34 |
+
"sheep-duck-llama-2-13b": "sd-l2-13b",
|
35 |
+
"WizardLM-70b-v1.0": "w-70b",
|
36 |
+
"CodeLlama-34b-Instruct-hf": "cl-34b",
|
37 |
+
"command": "com",
|
38 |
+
"Mistral-7B-Instruct-v0.1": "m-i-7b",
|
39 |
+
"Wizard-Vicuna-13B-Uncensored-HF": "vcn-13b",
|
40 |
+
"llama-2-13b-chat-hf": "l2-13b",
|
41 |
+
"llama-2-70b-chat-hf": "l2-70b",
|
42 |
+
"llama-2-7b-chat-hf": "l2-7b",
|
43 |
+
"koala-13B-HF": "k-13b"
|
44 |
}
|
src/utils.py
CHANGED
@@ -140,17 +140,17 @@ def compare_plots(df: pd.DataFrame, LIST: list):
|
|
140 |
X = df[list_columns[2]]
|
141 |
fig, ax = plt.subplots()
|
142 |
for model in LIST:
|
143 |
-
short = short_names[model]
|
144 |
-
same_flag = short_names[model][1]
|
145 |
model_df = df[df[list_columns[0]] == model]
|
146 |
x = model_df[list_columns[2]]
|
147 |
y = model_df[list_columns[3]]
|
148 |
color = plt.cm.rainbow(x / max(X)) # Use a colormap for different colors
|
149 |
plt.scatter(x, y, color=color)
|
150 |
-
if same_flag:
|
151 |
-
|
152 |
-
else:
|
153 |
-
|
154 |
ax.grid(which='both', color='grey', linewidth=1, linestyle='-', alpha=0.2)
|
155 |
ax.set_xticks(np.arange(0,110,10))
|
156 |
plt.xlim(-10, 110)
|
@@ -162,6 +162,23 @@ def compare_plots(df: pd.DataFrame, LIST: list):
|
|
162 |
|
163 |
return fig
|
164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
def label_map(model_list: list) -> dict:
|
167 |
'''
|
@@ -172,20 +189,24 @@ def label_map(model_list: list) -> dict:
|
|
172 |
Returns:
|
173 |
short_name: A map from long to list of short name + indication if models are same or different
|
174 |
'''
|
175 |
-
|
176 |
for model_name in model_list:
|
177 |
-
splits = model_name.split('--')
|
178 |
-
if len(splits) != 1:
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
|
|
|
|
183 |
else:
|
184 |
-
|
185 |
-
# Define the short name and indicate both models are same
|
186 |
-
short_name[model_name] = [splits[0], 1]
|
187 |
|
188 |
-
|
|
|
|
|
|
|
189 |
|
190 |
def filter_search(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
191 |
'''
|
|
|
140 |
X = df[list_columns[2]]
|
141 |
fig, ax = plt.subplots()
|
142 |
for model in LIST:
|
143 |
+
short = short_names[model]
|
144 |
+
# same_flag = short_names[model][1]
|
145 |
model_df = df[df[list_columns[0]] == model]
|
146 |
x = model_df[list_columns[2]]
|
147 |
y = model_df[list_columns[3]]
|
148 |
color = plt.cm.rainbow(x / max(X)) # Use a colormap for different colors
|
149 |
plt.scatter(x, y, color=color)
|
150 |
+
# if same_flag:
|
151 |
+
plt.annotate(f'{short}', (x, y), textcoords="offset points", xytext=(0, -15), ha='center', rotation=0)
|
152 |
+
# else:
|
153 |
+
# plt.annotate(f'{short}', (x, y), textcoords="offset points", xytext=(20, -3), ha='center', rotation=0)
|
154 |
ax.grid(which='both', color='grey', linewidth=1, linestyle='-', alpha=0.2)
|
155 |
ax.set_xticks(np.arange(0,110,10))
|
156 |
plt.xlim(-10, 110)
|
|
|
162 |
|
163 |
return fig
|
164 |
|
165 |
+
def shorten_model_name(full_name):
|
166 |
+
# Split the name into parts
|
167 |
+
parts = full_name.split('-')
|
168 |
+
|
169 |
+
# Process the name parts to keep only the parts with digits (model sizes and versions)
|
170 |
+
short_name_parts = [part for part in parts if any(char.isdigit() for char in part)]
|
171 |
+
|
172 |
+
if len(parts) == 1:
|
173 |
+
short_name = ''.join(full_name[0:min(3, len(full_name))])
|
174 |
+
else:
|
175 |
+
# Join the parts to form the short name
|
176 |
+
short_name = '-'.join(short_name_parts)
|
177 |
+
|
178 |
+
# Remove any leading or trailing hyphens
|
179 |
+
short_name = full_name[0] + '-'+ short_name.strip('-')
|
180 |
+
|
181 |
+
return short_name
|
182 |
|
183 |
def label_map(model_list: list) -> dict:
|
184 |
'''
|
|
|
189 |
Returns:
|
190 |
short_name: A map from long to list of short name + indication if models are same or different
|
191 |
'''
|
192 |
+
short_names = {}
|
193 |
for model_name in model_list:
|
194 |
+
# splits = model_name.split('--')
|
195 |
+
# if len(splits) != 1:
|
196 |
+
# splits[0] = SHORT_NAMES[splits[0] + '-']
|
197 |
+
# splits[1] = SHORT_NAMES[splits[1] + '-']
|
198 |
+
# # Define the short name and indicate there are two different models
|
199 |
+
# short_names[model_name] = [splits[0] + '--' + splits[1], 0]
|
200 |
+
# else:
|
201 |
+
if model_name in SHORT_NAMES:
|
202 |
+
short_name = SHORT_NAMES[model_name]
|
203 |
else:
|
204 |
+
short_name = shorten_model_name(model_name)
|
|
|
|
|
205 |
|
206 |
+
# Define the short name and indicate both models are same
|
207 |
+
short_names[model_name] = short_name
|
208 |
+
|
209 |
+
return short_names
|
210 |
|
211 |
def filter_search(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
212 |
'''
|
versions/v1.0.csv
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
,-,all,all,imagegame,imagegame,imagegame,privateshared,privateshared,privateshared,referencegame,referencegame,referencegame,taboo,taboo,taboo,wordle,wordle,wordle,wordle_withclue,wordle_withclue,wordle_withclue,wordle_withcritic,wordle_withcritic,wordle_withcritic
|
2 |
+
,clemscore,Average % Played,Average Quality Score,% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std),% Played,Quality Score,Quality Score (std)
|
3 |
+
model,,,,,,,,,,,,,,,,,,,,,,,,
|
4 |
+
CodeLlama-34b-Instruct-hf-t0.0--CodeLlama-34b-Instruct-hf-t0.0,10.34,23.96,43.15,7.5,37.67,36.83,0.0,,,15.0,83.33,40.82,74.58,29.55,44.87,53.33,0.0,0.0,13.33,8.33,16.66,4.0,100.0,
|
5 |
+
Mistral-7B-Instruct-v0.1-t0.0--Mistral-7B-Instruct-v0.1-t0.0,2.72,17.5,15.56,,,,20.0,0.0,0.0,,,,50.85,46.67,49.01,0.0,,,16.67,0.0,0.0,0.0,,
|
6 |
+
Wizard-Vicuna-13B-Uncensored-HF-t0.0--Wizard-Vicuna-13B-Uncensored-HF-t0.0,2.06,9.49,21.71,0.0,,,0.0,,,7.5,66.67,57.74,22.03,30.77,48.04,3.33,0.0,,30.0,11.11,33.33,3.57,0.0,
|
7 |
+
WizardLM-70b-v1.0-t0.0--WizardLM-70b-v1.0-t0.0,16.7,51.65,32.34,0.0,,,24.0,62.3,19.84,100.0,32.5,47.43,62.71,67.57,47.46,60.0,0.0,0.0,70.0,21.43,40.53,44.83,10.26,28.49
|
8 |
+
claude-2-t0.0--claude-2-t0.0,33.71,82.12,41.05,0.0,,,100.0,75.89,18.57,100.0,45.0,50.38,91.53,73.15,39.71,100.0,0.0,0.0,90.0,21.6,36.56,93.33,30.65,43.22
|
9 |
+
claude-2.1-t0.0--claude-2.1-t0.0,36.38,83.08,43.79,0.0,,,100.0,73.01,23.8,100.0,55.0,50.38,94.92,69.35,41.41,100.0,0.67,3.65,93.33,29.29,40.95,93.33,35.42,41.74
|
10 |
+
claude-instant-1.2-t0.0--claude-instant-1.2-t0.0,15.44,59.61,25.91,0.0,,,96.0,34.37,30.79,0.0,,,77.97,60.51,44.77,100.0,0.0,0.0,90.0,18.52,37.08,53.33,16.15,28.94
|
11 |
+
claude-v1.3-t0.0--claude-v1.3-t0.0,37.64,74.24,50.7,0.0,,,100.0,84.99,18.63,100.0,85.0,36.16,79.66,72.34,37.31,96.67,0.0,0.0,100.0,31.11,39.81,43.33,30.77,48.04
|
12 |
+
command-t0.0--command-t0.0,3.12,10.01,31.13,0.0,,,0.0,,,2.5,0.0,,47.46,17.86,39.0,0.0,,,16.67,6.67,14.91,3.45,100.0,
|
13 |
+
falcon-7b-instruct-t0.0--falcon-7b-instruct-t0.0,0.0,14.29,0.0,0.0,,,0.0,,,0.0,,,0.0,,,100.0,0.0,0.0,0.0,,,0.0,,
|
14 |
+
gpt-3.5-turbo-0613-t0.0--gpt-3.5-turbo-0613-t0.0,32.53,91.96,35.37,97.5,62.49,30.09,91.84,21.17,25.65,100.0,55.0,50.38,64.41,63.16,47.48,100.0,0.0,0.0,100.0,21.38,38.8,90.0,24.38,35.8
|
15 |
+
gpt-3.5-turbo-1106-t0.0--gpt-3.5-turbo-1106-t0.0,30.45,77.12,39.49,40.0,51.25,34.69,46.94,27.41,33.38,100.0,52.5,50.57,72.88,76.74,39.86,100.0,0.0,0.0,86.67,30.9,44.36,93.33,37.62,45.9
|
16 |
+
gpt-4-0314-t0.0--gpt-4-0314-t0.0,58.81,93.79,62.7,65.0,88.92,16.24,100.0,91.12,7.48,100.0,77.5,42.29,91.53,79.63,32.48,100.0,2.78,7.37,100.0,50.78,41.69,100.0,48.17,41.42
|
17 |
+
gpt-4-0613-t0.0--gpt-4-0613-t0.0,60.9,97.22,62.64,97.5,97.28,10.38,100.0,97.34,5.02,100.0,80.0,40.51,83.05,81.97,29.03,100.0,4.25,8.62,100.0,46.11,41.85,100.0,31.5,38.1
|
18 |
+
gpt-4-1106-preview-t0.0--gpt-4-1106-preview-t0.0,60.33,97.95,61.59,97.5,94.15,14.85,100.0,83.25,12.51,100.0,90.0,30.38,88.14,83.97,29.88,100.0,7.5,13.01,100.0,49.11,42.93,100.0,23.17,37.74
|
19 |
+
koala-13B-HF-t0.0--koala-13B-HF-t0.0,1.25,23.22,5.38,0.0,,,0.0,,,0.0,,,52.54,16.13,37.39,100.0,0.0,0.0,10.0,0.0,0.0,0.0,,
|
20 |
+
llama-2-13b-chat-hf-t0.0--llama-2-13b-chat-hf-t0.0,1.89,3.43,55.09,0.0,,,24.0,55.09,33.68,0.0,,,0.0,,,0.0,,,0.0,,,0.0,,
|
21 |
+
llama-2-70b-chat-hf-t0.0--llama-2-70b-chat-hf-t0.0,1.39,3.79,36.74,0.0,,,14.0,13.48,11.98,12.5,60.0,54.77,0.0,,,0.0,,,0.0,,,0.0,,
|
22 |
+
llama-2-7b-chat-hf-t0.0--llama-2-7b-chat-hf-t0.0,0.24,6.05,4.0,0.0,,,0.0,,,0.0,,,42.37,4.0,20.0,0.0,,,0.0,,,0.0,,
|
23 |
+
oasst-sft-4-pythia-12b-epoch-3.5-t0.0--oasst-sft-4-pythia-12b-epoch-3.5-t0.0,0.0,14.76,0.0,0.0,,,0.0,,,0.0,,,0.0,,,100.0,0.0,0.0,3.33,0.0,,0.0,,
|
24 |
+
sheep-duck-llama-2-13b-t0.0--sheep-duck-llama-2-13b-t0.0,6.74,34.86,19.34,0.0,,,0.0,,,97.5,33.33,47.76,89.83,0.0,0.0,0.0,,,23.33,19.05,37.8,33.33,25.0,42.49
|
25 |
+
sheep-duck-llama-2-70b-v1.1-t0.0--sheep-duck-llama-2-70b-v1.1-t0.0,17.12,40.82,41.93,40.0,23.19,28.06,0.0,,,35.0,57.14,51.36,59.32,74.29,44.34,34.78,0.0,0.0,63.33,43.86,43.82,53.33,53.12,49.9
|
26 |
+
vicuna-33b-v1.3-t0.0--vicuna-33b-v1.3-t0.0,9.15,17.47,52.36,15.0,23.67,24.34,40.0,34.58,26.47,0.0,,,37.29,50.0,51.18,0.0,,,23.33,53.57,46.61,6.67,100.0,0.0
|