Spaces:
Sleeping
Sleeping
add models
Browse files- .gitignore +2 -0
- .pre-commit-config.yaml +0 -53
- Makefile +0 -13
- STRUCT_RES.json +222 -0
- app.py +120 -187
- src/display/css_html_js.py → css_html.py +12 -53
- pyproject.toml +0 -13
- src/about.py +0 -72
- src/display/formatting.py +0 -27
- src/display/utils.py +0 -110
- src/envs.py +0 -25
- src/leaderboard/read_evals.py +0 -196
- src/populate.py +0 -58
- src/submission/check_validity.py +0 -99
- src/submission/submit.py +0 -119
- text_content.py +41 -0
- utils.py +32 -0
.gitignore
CHANGED
@@ -11,3 +11,5 @@ eval-results/
|
|
11 |
eval-queue-bk/
|
12 |
eval-results-bk/
|
13 |
logs/
|
|
|
|
|
|
11 |
eval-queue-bk/
|
12 |
eval-results-bk/
|
13 |
logs/
|
14 |
+
|
15 |
+
.history
|
.pre-commit-config.yaml
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
default_language_version:
|
16 |
-
python: python3
|
17 |
-
|
18 |
-
ci:
|
19 |
-
autofix_prs: true
|
20 |
-
autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
|
21 |
-
autoupdate_schedule: quarterly
|
22 |
-
|
23 |
-
repos:
|
24 |
-
- repo: https://github.com/pre-commit/pre-commit-hooks
|
25 |
-
rev: v4.3.0
|
26 |
-
hooks:
|
27 |
-
- id: check-yaml
|
28 |
-
- id: check-case-conflict
|
29 |
-
- id: detect-private-key
|
30 |
-
- id: check-added-large-files
|
31 |
-
args: ['--maxkb=1000']
|
32 |
-
- id: requirements-txt-fixer
|
33 |
-
- id: end-of-file-fixer
|
34 |
-
- id: trailing-whitespace
|
35 |
-
|
36 |
-
- repo: https://github.com/PyCQA/isort
|
37 |
-
rev: 5.12.0
|
38 |
-
hooks:
|
39 |
-
- id: isort
|
40 |
-
name: Format imports
|
41 |
-
|
42 |
-
- repo: https://github.com/psf/black
|
43 |
-
rev: 22.12.0
|
44 |
-
hooks:
|
45 |
-
- id: black
|
46 |
-
name: Format code
|
47 |
-
additional_dependencies: ['click==8.0.2']
|
48 |
-
|
49 |
-
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
50 |
-
# Ruff version.
|
51 |
-
rev: 'v0.0.267'
|
52 |
-
hooks:
|
53 |
-
- id: ruff
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Makefile
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
.PHONY: style format
|
2 |
-
|
3 |
-
|
4 |
-
style:
|
5 |
-
python -m black --line-length 119 .
|
6 |
-
python -m isort .
|
7 |
-
ruff check --fix .
|
8 |
-
|
9 |
-
|
10 |
-
quality:
|
11 |
-
python -m black --check --line-length 119 .
|
12 |
-
python -m isort --check-only .
|
13 |
-
ruff check .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
STRUCT_RES.json
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"deepseek-v2-lite-chat": {
|
3 |
+
"BaseARC": 69.01287553648069,
|
4 |
+
"BaseOpenbook": 64.60000000000001,
|
5 |
+
"📝StructARC": 40.155538694992416,
|
6 |
+
"📝StructOpenbook": 41.98437130002368,
|
7 |
+
"📝StructMMLU": 40.95659182440028,
|
8 |
+
"📝StructAverage": 41.03216727313879,
|
9 |
+
"BaseMMLU": 55.7,
|
10 |
+
"BaseAverage": 63.10429184549357
|
11 |
+
},
|
12 |
+
"baichuan2-7b-chat": {
|
13 |
+
"BaseARC": 57.596566523605155,
|
14 |
+
"BaseOpenbook": 60.8,
|
15 |
+
"📝StructARC": 31.39226100151745,
|
16 |
+
"📝StructOpenbook": 33.838503433578026,
|
17 |
+
"📝StructMMLU": 32.019719285283976,
|
18 |
+
"📝StructAverage": 32.41682790679315,
|
19 |
+
"BaseMMLU": 52.49,
|
20 |
+
"BaseAverage": 56.962188841201716
|
21 |
+
},
|
22 |
+
"mistral-7b-instruct-v0.3": {
|
23 |
+
"BaseARC": 76.39484978540773,
|
24 |
+
"BaseOpenbook": 74.4,
|
25 |
+
"📝StructARC": 42.640364188163886,
|
26 |
+
"📝StructOpenbook": 43.6182808430026,
|
27 |
+
"📝StructMMLU": 41.756990621302315,
|
28 |
+
"📝StructAverage": 42.67187855082293,
|
29 |
+
"BaseMMLU": 61.63,
|
30 |
+
"BaseAverage": 70.80828326180257
|
31 |
+
},
|
32 |
+
"yi-6b-chat": {
|
33 |
+
"BaseARC": 59.227467811158796,
|
34 |
+
"BaseOpenbook": 55.60000000000001,
|
35 |
+
"📝StructARC": 38.39150227617603,
|
36 |
+
"📝StructOpenbook": 38.977030547004496,
|
37 |
+
"📝StructMMLU": 36.34025031348594,
|
38 |
+
"📝StructAverage": 37.90292771222215,
|
39 |
+
"BaseMMLU": 58.24,
|
40 |
+
"BaseAverage": 57.68915593705293
|
41 |
+
},
|
42 |
+
"yi-1.5-9b-chat": {
|
43 |
+
"BaseARC": 85.75107296137338,
|
44 |
+
"BaseOpenbook": 83.0,
|
45 |
+
"📝StructARC": 57.01820940819423,
|
46 |
+
"📝StructOpenbook": 54.53469097797774,
|
47 |
+
"📝StructMMLU": 53.431308242228596,
|
48 |
+
"📝StructAverage": 54.99473620946685,
|
49 |
+
"BaseMMLU": 69.5,
|
50 |
+
"BaseAverage": 79.4170243204578
|
51 |
+
},
|
52 |
+
"llama-2-7b-chat": {
|
53 |
+
"BaseARC": 53.648068669527895,
|
54 |
+
"BaseOpenbook": 57.99999999999999,
|
55 |
+
"📝StructARC": 29.429059180576633,
|
56 |
+
"📝StructOpenbook": 33.24650722235378,
|
57 |
+
"📝StructMMLU": 27.884038239302765,
|
58 |
+
"📝StructAverage": 30.186534880744393,
|
59 |
+
"BaseMMLU": 34.1,
|
60 |
+
"BaseAverage": 48.5826895565093
|
61 |
+
},
|
62 |
+
"llama-3-8b-instruct": {
|
63 |
+
"BaseARC": 80.68669527896995,
|
64 |
+
"BaseOpenbook": 79.60000000000001,
|
65 |
+
"📝StructARC": 56.74317147192717,
|
66 |
+
"📝StructOpenbook": 57.06843476201753,
|
67 |
+
"📝StructMMLU": 54.63728961686302,
|
68 |
+
"📝StructAverage": 56.149631950269246,
|
69 |
+
"BaseMMLU": 68.4,
|
70 |
+
"BaseAverage": 76.22889842632333
|
71 |
+
},
|
72 |
+
"qwen1.5-7b-chat": {
|
73 |
+
"BaseARC": 70.21459227467811,
|
74 |
+
"BaseOpenbook": 68.60000000000001,
|
75 |
+
"📝StructARC": 40.92374810318665,
|
76 |
+
"📝StructOpenbook": 41.439734785697375,
|
77 |
+
"📝StructMMLU": 39.75951206275273,
|
78 |
+
"📝StructAverage": 40.707664983878914,
|
79 |
+
"BaseMMLU": 61.70508772,
|
80 |
+
"BaseAverage": 66.83989333155938
|
81 |
+
},
|
82 |
+
"qwen2-7b-instruct": {
|
83 |
+
"BaseARC": 84.29184549356223,
|
84 |
+
"BaseOpenbook": 82.39999999999999,
|
85 |
+
"📝StructARC": 58.34597875569044,
|
86 |
+
"📝StructOpenbook": 57.28155339805825,
|
87 |
+
"📝StructMMLU": 56.78738087582798,
|
88 |
+
"📝StructAverage": 57.47163767652555,
|
89 |
+
"BaseMMLU": 70.5,
|
90 |
+
"BaseAverage": 79.06394849785407
|
91 |
+
},
|
92 |
+
"mistral-7b-v0.3": {
|
93 |
+
"BaseARC": 76.65236051502146,
|
94 |
+
"BaseOpenbook": 72.8,
|
95 |
+
"📝StructARC": 46.936646433990894,
|
96 |
+
"📝StructOpenbook": 48.23585129055174,
|
97 |
+
"📝StructMMLU": 46.05517926584278,
|
98 |
+
"📝StructAverage": 47.075892330128475,
|
99 |
+
"BaseMMLU": 60.1,
|
100 |
+
"BaseAverage": 69.85078683834048
|
101 |
+
},
|
102 |
+
"yi-1.5-9b": {
|
103 |
+
"BaseARC": 88.41201716738198,
|
104 |
+
"BaseOpenbook": 83.0,
|
105 |
+
"📝StructARC": 60.60318664643399,
|
106 |
+
"📝StructOpenbook": 59.2943405162207,
|
107 |
+
"📝StructMMLU": 57.41526232409641,
|
108 |
+
"📝StructAverage": 59.10426316225036,
|
109 |
+
"BaseMMLU": 69.5,
|
110 |
+
"BaseAverage": 80.30400572246066
|
111 |
+
},
|
112 |
+
"yi-6b": {
|
113 |
+
"BaseARC": 77.6824034334764,
|
114 |
+
"BaseOpenbook": 73.6,
|
115 |
+
"📝StructARC": 46.936646433990894,
|
116 |
+
"📝StructOpenbook": 49.51456310679612,
|
117 |
+
"📝StructMMLU": 46.21888085492341,
|
118 |
+
"📝StructAverage": 47.55669679857014,
|
119 |
+
"BaseMMLU": 63.2,
|
120 |
+
"BaseAverage": 71.49413447782545
|
121 |
+
},
|
122 |
+
"deepseek-v2-lite": {
|
123 |
+
"BaseARC": 68.24034334763948,
|
124 |
+
"BaseOpenbook": 66.4,
|
125 |
+
"📝StructARC": 40.73406676783004,
|
126 |
+
"📝StructOpenbook": 42.97892493488042,
|
127 |
+
"📝StructMMLU": 41.024161265238256,
|
128 |
+
"📝StructAverage": 41.579050989316244,
|
129 |
+
"BaseMMLU": 58.3,
|
130 |
+
"BaseAverage": 64.3134477825465
|
131 |
+
},
|
132 |
+
"llama-3-8b": {
|
133 |
+
"BaseARC": 76.22317596566523,
|
134 |
+
"BaseOpenbook": 75.0,
|
135 |
+
"📝StructARC": 52.78831562974203,
|
136 |
+
"📝StructOpenbook": 53.04286052569264,
|
137 |
+
"📝StructMMLU": 51.37044261097523,
|
138 |
+
"📝StructAverage": 52.4005395888033,
|
139 |
+
"BaseMMLU": 66.6,
|
140 |
+
"BaseAverage": 72.60772532188841
|
141 |
+
},
|
142 |
+
"llama-2-7b": {
|
143 |
+
"BaseARC": 48.66952789699571,
|
144 |
+
"BaseOpenbook": 43.6,
|
145 |
+
"📝StructARC": 29.040212443095598,
|
146 |
+
"📝StructOpenbook": 31.730996921619703,
|
147 |
+
"📝StructMMLU": 29.187423497033315,
|
148 |
+
"📝StructAverage": 29.986210953916203,
|
149 |
+
"BaseMMLU": 45.7,
|
150 |
+
"BaseAverage": 45.98984263233191
|
151 |
+
},
|
152 |
+
"qwen2-7b": {
|
153 |
+
"BaseARC": 86.18025751072962,
|
154 |
+
"BaseOpenbook": 84.2,
|
155 |
+
"📝StructARC": 66.17033383915023,
|
156 |
+
"📝StructOpenbook": 66.13781671797301,
|
157 |
+
"📝StructMMLU": 65.24702121505192,
|
158 |
+
"📝StructAverage": 65.85172392405839,
|
159 |
+
"BaseMMLU": 70.3,
|
160 |
+
"BaseAverage": 80.22675250357655
|
161 |
+
},
|
162 |
+
"qwen1.5-7b": {
|
163 |
+
"BaseARC": 78.54077253218884,
|
164 |
+
"BaseOpenbook": 78.8,
|
165 |
+
"📝StructARC": 48.501517450682854,
|
166 |
+
"📝StructOpenbook": 50.248638408714186,
|
167 |
+
"📝StructMMLU": 46.709001366105916,
|
168 |
+
"📝StructAverage": 48.48638574183432,
|
169 |
+
"BaseMMLU": 61.0,
|
170 |
+
"BaseAverage": 72.78025751072961
|
171 |
+
},
|
172 |
+
"baichuan2-7b-base": {
|
173 |
+
"BaseARC": 61.630901287553655,
|
174 |
+
"BaseOpenbook": 62.6,
|
175 |
+
"📝StructARC": 32.42602427921092,
|
176 |
+
"📝StructOpenbook": 37.08264267108691,
|
177 |
+
"📝StructMMLU": 34.59571358681649,
|
178 |
+
"📝StructAverage": 34.7014601790381,
|
179 |
+
"BaseMMLU": 54.16,
|
180 |
+
"BaseAverage": 59.46363376251788
|
181 |
+
},
|
182 |
+
"llama-3-70b": {
|
183 |
+
"BaseARC": 92.1030042918455,
|
184 |
+
"BaseOpenbook": 89.60000000000001,
|
185 |
+
"📝StructARC": 73.12215477996965,
|
186 |
+
"📝StructOpenbook": 72.6024153445418,
|
187 |
+
"📝StructMMLU": 70.85211858440155,
|
188 |
+
"📝StructAverage": 72.19222956963766,
|
189 |
+
"BaseMMLU": 79.5,
|
190 |
+
"BaseAverage": 87.06766809728184
|
191 |
+
},
|
192 |
+
"qwen2-72b": {
|
193 |
+
"BaseARC": 95.79399141630901,
|
194 |
+
"BaseOpenbook": 96.39999999999999,
|
195 |
+
"📝StructARC": 79.10660091047042,
|
196 |
+
"📝StructOpenbook": 78.4986976083353,
|
197 |
+
"📝StructMMLU": 78.57972263190929,
|
198 |
+
"📝StructAverage": 78.72834038357168,
|
199 |
+
"BaseMMLU": 84.2,
|
200 |
+
"BaseAverage": 92.131330472103
|
201 |
+
},
|
202 |
+
"yi-1.5-34b": {
|
203 |
+
"BaseARC": 92.1030042918455,
|
204 |
+
"BaseOpenbook": 83.6,
|
205 |
+
"📝StructARC": 64.37784522003035,
|
206 |
+
"📝StructOpenbook": 63.76983187307601,
|
207 |
+
"📝StructMMLU": 63.296635991674755,
|
208 |
+
"📝StructAverage": 63.81477102826037,
|
209 |
+
"BaseMMLU": 77.1,
|
210 |
+
"BaseAverage": 84.26766809728183
|
211 |
+
},
|
212 |
+
"mixtral-8x7b-v0.1": {
|
213 |
+
"BaseARC": 84.0343347639485,
|
214 |
+
"BaseOpenbook": 77.8,
|
215 |
+
"📝StructARC": 62.3103186646434,
|
216 |
+
"📝StructOpenbook": 62.32536111768885,
|
217 |
+
"📝StructMMLU": 60.46643846946602,
|
218 |
+
"📝StructAverage": 61.70070608393275,
|
219 |
+
"BaseMMLU": 70.6,
|
220 |
+
"BaseAverage": 77.47811158798282
|
221 |
+
}
|
222 |
+
}
|
app.py
CHANGED
@@ -1,204 +1,137 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
|
3 |
import pandas as pd
|
4 |
-
from apscheduler.schedulers.background import BackgroundScheduler
|
5 |
-
from huggingface_hub import snapshot_download
|
6 |
|
7 |
-
from
|
8 |
-
|
9 |
-
|
10 |
-
EVALUATION_QUEUE_TEXT,
|
11 |
-
INTRODUCTION_TEXT,
|
12 |
-
LLM_BENCHMARKS_TEXT,
|
13 |
-
TITLE,
|
14 |
-
)
|
15 |
-
from src.display.css_html_js import custom_css
|
16 |
-
from src.display.utils import (
|
17 |
-
BENCHMARK_COLS,
|
18 |
-
COLS,
|
19 |
-
EVAL_COLS,
|
20 |
-
EVAL_TYPES,
|
21 |
AutoEvalColumn,
|
22 |
-
ModelType,
|
23 |
fields,
|
24 |
-
WeightType,
|
25 |
-
Precision
|
26 |
)
|
27 |
-
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
28 |
-
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
29 |
-
from src.submission.submit import add_new_eval
|
30 |
-
|
31 |
-
|
32 |
-
def restart_space():
|
33 |
-
API.restart_space(repo_id=REPO_ID)
|
34 |
-
|
35 |
-
### Space initialisation
|
36 |
-
try:
|
37 |
-
print(EVAL_REQUESTS_PATH)
|
38 |
-
snapshot_download(
|
39 |
-
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
40 |
-
)
|
41 |
-
except Exception:
|
42 |
-
restart_space()
|
43 |
-
try:
|
44 |
-
print(EVAL_RESULTS_PATH)
|
45 |
-
snapshot_download(
|
46 |
-
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
47 |
-
)
|
48 |
-
except Exception:
|
49 |
-
restart_space()
|
50 |
-
|
51 |
-
|
52 |
-
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
53 |
-
|
54 |
-
(
|
55 |
-
finished_eval_queue_df,
|
56 |
-
running_eval_queue_df,
|
57 |
-
pending_eval_queue_df,
|
58 |
-
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
59 |
-
|
60 |
-
def init_leaderboard(dataframe):
|
61 |
-
if dataframe is None or dataframe.empty:
|
62 |
-
raise ValueError("Leaderboard DataFrame is empty or None.")
|
63 |
-
return Leaderboard(
|
64 |
-
value=dataframe,
|
65 |
-
datatype=[c.type for c in fields(AutoEvalColumn)],
|
66 |
-
select_columns=SelectColumns(
|
67 |
-
default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
|
68 |
-
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
69 |
-
label="Select Columns to Display:",
|
70 |
-
),
|
71 |
-
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
72 |
-
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
|
73 |
-
filter_columns=[
|
74 |
-
ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
|
75 |
-
ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
|
76 |
-
ColumnFilter(
|
77 |
-
AutoEvalColumn.params.name,
|
78 |
-
type="slider",
|
79 |
-
min=0.01,
|
80 |
-
max=150,
|
81 |
-
label="Select the number of parameters (B)",
|
82 |
-
),
|
83 |
-
ColumnFilter(
|
84 |
-
AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
|
85 |
-
),
|
86 |
-
],
|
87 |
-
bool_checkboxgroup_label="Hide models",
|
88 |
-
interactive=False,
|
89 |
-
)
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
demo = gr.Blocks(css=custom_css)
|
93 |
with demo:
|
94 |
-
gr.
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
98 |
-
with gr.
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
open=False,
|
124 |
-
):
|
125 |
-
with gr.Row():
|
126 |
-
running_eval_table = gr.components.Dataframe(
|
127 |
-
value=running_eval_queue_df,
|
128 |
-
headers=EVAL_COLS,
|
129 |
-
datatype=EVAL_TYPES,
|
130 |
-
row_count=5,
|
131 |
-
)
|
132 |
-
|
133 |
-
with gr.Accordion(
|
134 |
-
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
135 |
-
open=False,
|
136 |
-
):
|
137 |
-
with gr.Row():
|
138 |
-
pending_eval_table = gr.components.Dataframe(
|
139 |
-
value=pending_eval_queue_df,
|
140 |
-
headers=EVAL_COLS,
|
141 |
-
datatype=EVAL_TYPES,
|
142 |
-
row_count=5,
|
143 |
)
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
interactive=True,
|
157 |
)
|
158 |
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
value="float16",
|
165 |
-
interactive=True,
|
166 |
)
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
interactive=True,
|
173 |
)
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
show_copy_button=True,
|
199 |
-
)
|
200 |
-
|
201 |
-
scheduler = BackgroundScheduler()
|
202 |
-
scheduler.add_job(restart_space, "interval", seconds=1800)
|
203 |
-
scheduler.start()
|
204 |
-
demo.queue(default_concurrency_limit=40).launch()
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
import gradio as gr
|
|
|
4 |
import pandas as pd
|
|
|
|
|
5 |
|
6 |
+
from css_html import custom_css
|
7 |
+
from text_content import ABOUT_TEXT, CITATION_BUTTON_TEXT, CITATION_BUTTON_LABEL, ACKNOWLEDGEMENT_TEXT, NOTES_TEXT, HEAD_TEXT
|
8 |
+
from utils import (
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
AutoEvalColumn,
|
|
|
10 |
fields,
|
|
|
|
|
11 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
result_path = './STRUCT_RES.json'
|
14 |
+
with open(result_path, 'r') as f:
|
15 |
+
data = json.load(f)
|
16 |
+
|
17 |
+
rows = []
|
18 |
+
for model in data:
|
19 |
+
row = {"model": model}
|
20 |
+
for key in data[model]:
|
21 |
+
print(key)
|
22 |
+
row[key] = round(data[model][key], 2)
|
23 |
+
rows.append(row)
|
24 |
+
|
25 |
+
df = pd.DataFrame(rows)
|
26 |
+
df = df.sort_values(by='📝StructAverage', ascending=False)
|
27 |
+
|
28 |
+
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
29 |
+
TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden]
|
30 |
+
|
31 |
+
COLS_LITE = [
|
32 |
+
c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden
|
33 |
+
]
|
34 |
+
|
35 |
+
TYPES_LITE = [
|
36 |
+
c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden
|
37 |
+
]
|
38 |
+
|
39 |
+
def select_columns(df, columns):
|
40 |
+
always_here_cols = [
|
41 |
+
AutoEvalColumn.model.name,
|
42 |
+
]
|
43 |
+
# We use COLS to maintain sorting
|
44 |
+
filtered_df = df[
|
45 |
+
always_here_cols + [c for c in COLS if c in df.columns and c in columns]
|
46 |
+
]
|
47 |
+
return filtered_df
|
48 |
|
49 |
demo = gr.Blocks(css=custom_css)
|
50 |
with demo:
|
51 |
+
with gr.Column():
|
52 |
+
gr.Markdown(
|
53 |
+
"""<div style="text-align: center;"><h1> 🏅 StructEval Leaderboard</h1></div>\
|
54 |
+
<br>\
|
55 |
+
""",
|
56 |
+
elem_classes="markdown-text",
|
57 |
+
)
|
58 |
+
|
59 |
+
gr.Markdown(HEAD_TEXT, elem_classes="markdown-text")
|
60 |
|
61 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
62 |
+
with gr.Column():
|
63 |
+
with gr.Tabs(elem_classes="A100-tabs") as A100_tabs:
|
64 |
+
with gr.TabItem("🔍 Evaluation Table", id=0):
|
65 |
+
with gr.Column():
|
66 |
+
with gr.Accordion("⏬ Hidden Columns", open=False):
|
67 |
+
shown_columns = gr.CheckboxGroup(
|
68 |
+
choices=[
|
69 |
+
c
|
70 |
+
for c in COLS
|
71 |
+
if c
|
72 |
+
not in [
|
73 |
+
AutoEvalColumn.model.name,
|
74 |
+
]
|
75 |
+
],
|
76 |
+
value=[
|
77 |
+
c
|
78 |
+
for c in COLS_LITE
|
79 |
+
if c
|
80 |
+
not in [
|
81 |
+
AutoEvalColumn.model.name,
|
82 |
+
]
|
83 |
+
],
|
84 |
+
label="",
|
85 |
+
elem_id="column-select",
|
86 |
+
interactive=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
)
|
88 |
+
|
89 |
+
leaderboard_df = gr.components.Dataframe(
|
90 |
+
value=df[
|
91 |
+
[
|
92 |
+
AutoEvalColumn.model.name,
|
93 |
+
]
|
94 |
+
+ shown_columns.value
|
95 |
+
],
|
96 |
+
headers=COLS,
|
97 |
+
datatype=TYPES,
|
98 |
+
elem_id="leaderboard-table",
|
99 |
+
interactive=False,
|
|
|
100 |
)
|
101 |
|
102 |
+
hidden_leaderboard_df = gr.components.Dataframe(
|
103 |
+
value=df,
|
104 |
+
headers=COLS,
|
105 |
+
datatype=["str" for _ in range(len(COLS))],
|
106 |
+
visible=False,
|
|
|
|
|
107 |
)
|
108 |
+
|
109 |
+
shown_columns.change(
|
110 |
+
select_columns,
|
111 |
+
[hidden_leaderboard_df, shown_columns],
|
112 |
+
leaderboard_df,
|
|
|
113 |
)
|
114 |
+
|
115 |
+
|
116 |
+
with gr.TabItem("📝 About", id=1):
|
117 |
+
gr.Markdown(ABOUT_TEXT, elem_classes="markdown-text")
|
118 |
+
|
119 |
+
with gr.Row():
|
120 |
+
with gr.Accordion("📒 Notes"):
|
121 |
+
gr.Markdown(NOTES_TEXT, elem_classes="markdown-text")
|
122 |
+
|
123 |
+
with gr.Row():
|
124 |
+
with gr.Accordion("📜 Citation", open=False):
|
125 |
+
citation_button = gr.Textbox(
|
126 |
+
value=CITATION_BUTTON_TEXT,
|
127 |
+
label=CITATION_BUTTON_LABEL,
|
128 |
+
lines=10,
|
129 |
+
elem_id="citation-button",
|
130 |
+
show_copy_button=True,
|
131 |
+
)
|
132 |
+
|
133 |
+
with gr.Row():
|
134 |
+
with gr.Accordion("🙏 Acknowledgement", open=False):
|
135 |
+
gr.Markdown(ACKNOWLEDGEMENT_TEXT)
|
136 |
+
|
137 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/css_html_js.py → css_html.py
RENAMED
@@ -1,34 +1,32 @@
|
|
1 |
custom_css = """
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
3 |
.markdown-text {
|
4 |
font-size: 16px !important;
|
5 |
}
|
6 |
-
|
7 |
#models-to-add-text {
|
8 |
font-size: 18px !important;
|
9 |
}
|
10 |
-
|
11 |
#citation-button span {
|
12 |
font-size: 16px !important;
|
13 |
}
|
14 |
-
|
15 |
#citation-button textarea {
|
16 |
font-size: 16px !important;
|
17 |
}
|
18 |
-
|
19 |
#citation-button > label > button {
|
20 |
margin: 6px;
|
21 |
transform: scale(1.3);
|
22 |
}
|
23 |
-
|
24 |
#leaderboard-table {
|
25 |
margin-top: 15px
|
26 |
}
|
27 |
-
|
28 |
#leaderboard-table-lite {
|
29 |
margin-top: 15px
|
30 |
}
|
31 |
-
|
32 |
#search-bar-table-box > div:first-child {
|
33 |
background: none;
|
34 |
border: none;
|
@@ -37,7 +35,11 @@ custom_css = """
|
|
37 |
#search-bar {
|
38 |
padding: 0px;
|
39 |
}
|
40 |
-
|
|
|
|
|
|
|
|
|
41 |
/* Limit the width of the first AutoEvalColumn so that names don't expand too much */
|
42 |
table td:first-child,
|
43 |
table th:first-child {
|
@@ -45,11 +47,9 @@ table th:first-child {
|
|
45 |
overflow: auto;
|
46 |
white-space: nowrap;
|
47 |
}
|
48 |
-
|
49 |
.tab-buttons button {
|
50 |
font-size: 20px;
|
51 |
}
|
52 |
-
|
53 |
#scale-logo {
|
54 |
border-style: none !important;
|
55 |
box-shadow: none;
|
@@ -58,48 +58,7 @@ table th:first-child {
|
|
58 |
margin-right: auto;
|
59 |
max-width: 600px;
|
60 |
}
|
61 |
-
|
62 |
#scale-logo .download {
|
63 |
display: none;
|
64 |
}
|
65 |
-
|
66 |
-
border: 0;
|
67 |
-
padding-left: 0;
|
68 |
-
padding-top: 0;
|
69 |
-
}
|
70 |
-
#filter_type label {
|
71 |
-
display: flex;
|
72 |
-
}
|
73 |
-
#filter_type label > span{
|
74 |
-
margin-top: var(--spacing-lg);
|
75 |
-
margin-right: 0.5em;
|
76 |
-
}
|
77 |
-
#filter_type label > .wrap{
|
78 |
-
width: 103px;
|
79 |
-
}
|
80 |
-
#filter_type label > .wrap .wrap-inner{
|
81 |
-
padding: 2px;
|
82 |
-
}
|
83 |
-
#filter_type label > .wrap .wrap-inner input{
|
84 |
-
width: 1px
|
85 |
-
}
|
86 |
-
#filter-columns-type{
|
87 |
-
border:0;
|
88 |
-
padding:0.5;
|
89 |
-
}
|
90 |
-
#filter-columns-size{
|
91 |
-
border:0;
|
92 |
-
padding:0.5;
|
93 |
-
}
|
94 |
-
#box-filter > .form{
|
95 |
-
border: 0
|
96 |
-
}
|
97 |
-
"""
|
98 |
-
|
99 |
-
get_window_url_params = """
|
100 |
-
function(url_params) {
|
101 |
-
const params = new URLSearchParams(window.location.search);
|
102 |
-
url_params = Object.fromEntries(params);
|
103 |
-
return url_params;
|
104 |
-
}
|
105 |
-
"""
|
|
|
1 |
custom_css = """
|
2 |
+
#changelog-text {
|
3 |
+
font-size: 16px !important;
|
4 |
+
}
|
5 |
+
#changelog-text h2 {
|
6 |
+
font-size: 18px !important;
|
7 |
+
}
|
8 |
.markdown-text {
|
9 |
font-size: 16px !important;
|
10 |
}
|
|
|
11 |
#models-to-add-text {
|
12 |
font-size: 18px !important;
|
13 |
}
|
|
|
14 |
#citation-button span {
|
15 |
font-size: 16px !important;
|
16 |
}
|
|
|
17 |
#citation-button textarea {
|
18 |
font-size: 16px !important;
|
19 |
}
|
|
|
20 |
#citation-button > label > button {
|
21 |
margin: 6px;
|
22 |
transform: scale(1.3);
|
23 |
}
|
|
|
24 |
#leaderboard-table {
|
25 |
margin-top: 15px
|
26 |
}
|
|
|
27 |
#leaderboard-table-lite {
|
28 |
margin-top: 15px
|
29 |
}
|
|
|
30 |
#search-bar-table-box > div:first-child {
|
31 |
background: none;
|
32 |
border: none;
|
|
|
35 |
#search-bar {
|
36 |
padding: 0px;
|
37 |
}
|
38 |
+
/* Hides the final AutoEvalColumn */
|
39 |
+
#llm-benchmark-tab-table table td:last-child,
|
40 |
+
#llm-benchmark-tab-table table th:last-child {
|
41 |
+
display: none;
|
42 |
+
}
|
43 |
/* Limit the width of the first AutoEvalColumn so that names don't expand too much */
|
44 |
table td:first-child,
|
45 |
table th:first-child {
|
|
|
47 |
overflow: auto;
|
48 |
white-space: nowrap;
|
49 |
}
|
|
|
50 |
.tab-buttons button {
|
51 |
font-size: 20px;
|
52 |
}
|
|
|
53 |
#scale-logo {
|
54 |
border-style: none !important;
|
55 |
box-shadow: none;
|
|
|
58 |
margin-right: auto;
|
59 |
max-width: 600px;
|
60 |
}
|
|
|
61 |
#scale-logo .download {
|
62 |
display: none;
|
63 |
}
|
64 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pyproject.toml
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
[tool.ruff]
|
2 |
-
# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
|
3 |
-
select = ["E", "F"]
|
4 |
-
ignore = ["E501"] # line too long (black is taking care of this)
|
5 |
-
line-length = 119
|
6 |
-
fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
|
7 |
-
|
8 |
-
[tool.isort]
|
9 |
-
profile = "black"
|
10 |
-
line_length = 119
|
11 |
-
|
12 |
-
[tool.black]
|
13 |
-
line-length = 119
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/about.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
@dataclass
|
5 |
-
class Task:
|
6 |
-
benchmark: str
|
7 |
-
metric: str
|
8 |
-
col_name: str
|
9 |
-
|
10 |
-
|
11 |
-
# Select your tasks here
|
12 |
-
# ---------------------------------------------------
|
13 |
-
class Tasks(Enum):
|
14 |
-
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
-
task0 = Task("anli_r1", "acc", "ANLI")
|
16 |
-
task1 = Task("logiqa", "acc_norm", "LogiQA")
|
17 |
-
|
18 |
-
NUM_FEWSHOT = 0 # Change with your few shot
|
19 |
-
# ---------------------------------------------------
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
# Your leaderboard name
|
24 |
-
TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
|
25 |
-
|
26 |
-
# What does your leaderboard evaluate?
|
27 |
-
INTRODUCTION_TEXT = """
|
28 |
-
Intro text
|
29 |
-
"""
|
30 |
-
|
31 |
-
# Which evaluations are you running? how can people reproduce what you have?
|
32 |
-
LLM_BENCHMARKS_TEXT = f"""
|
33 |
-
## How it works
|
34 |
-
|
35 |
-
## Reproducibility
|
36 |
-
To reproduce our results, here is the commands you can run:
|
37 |
-
|
38 |
-
"""
|
39 |
-
|
40 |
-
EVALUATION_QUEUE_TEXT = """
|
41 |
-
## Some good practices before submitting a model
|
42 |
-
|
43 |
-
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
44 |
-
```python
|
45 |
-
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
46 |
-
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
47 |
-
model = AutoModel.from_pretrained("your model name", revision=revision)
|
48 |
-
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
49 |
-
```
|
50 |
-
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
51 |
-
|
52 |
-
Note: make sure your model is public!
|
53 |
-
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
54 |
-
|
55 |
-
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
|
56 |
-
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
57 |
-
|
58 |
-
### 3) Make sure your model has an open license!
|
59 |
-
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
|
60 |
-
|
61 |
-
### 4) Fill up your model card
|
62 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
63 |
-
|
64 |
-
## In case of model failure
|
65 |
-
If your model is displayed in the `FAILED` category, its execution stopped.
|
66 |
-
Make sure you have followed the above steps first.
|
67 |
-
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
|
68 |
-
"""
|
69 |
-
|
70 |
-
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
71 |
-
CITATION_BUTTON_TEXT = r"""
|
72 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/formatting.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
def model_hyperlink(link, model_name):
|
2 |
-
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
|
3 |
-
|
4 |
-
|
5 |
-
def make_clickable_model(model_name):
|
6 |
-
link = f"https://huggingface.co/{model_name}"
|
7 |
-
return model_hyperlink(link, model_name)
|
8 |
-
|
9 |
-
|
10 |
-
def styled_error(error):
|
11 |
-
return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
|
12 |
-
|
13 |
-
|
14 |
-
def styled_warning(warn):
|
15 |
-
return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
|
16 |
-
|
17 |
-
|
18 |
-
def styled_message(message):
|
19 |
-
return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
|
20 |
-
|
21 |
-
|
22 |
-
def has_no_nan_values(df, columns):
|
23 |
-
return df[columns].notna().all(axis=1)
|
24 |
-
|
25 |
-
|
26 |
-
def has_nan_values(df, columns):
|
27 |
-
return df[columns].isna().any(axis=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/utils.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass, make_dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
|
6 |
-
from src.about import Tasks
|
7 |
-
|
8 |
-
def fields(raw_class):
|
9 |
-
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
|
10 |
-
|
11 |
-
|
12 |
-
# These classes are for user facing column names,
|
13 |
-
# to avoid having to change them all around the code
|
14 |
-
# when a modif is needed
|
15 |
-
@dataclass
|
16 |
-
class ColumnContent:
|
17 |
-
name: str
|
18 |
-
type: str
|
19 |
-
displayed_by_default: bool
|
20 |
-
hidden: bool = False
|
21 |
-
never_hidden: bool = False
|
22 |
-
|
23 |
-
## Leaderboard columns
|
24 |
-
auto_eval_column_dict = []
|
25 |
-
# Init
|
26 |
-
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
27 |
-
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
28 |
-
#Scores
|
29 |
-
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
30 |
-
for task in Tasks:
|
31 |
-
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
|
32 |
-
# Model information
|
33 |
-
auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
34 |
-
auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
35 |
-
auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
36 |
-
auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
37 |
-
auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
|
38 |
-
auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
39 |
-
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
40 |
-
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
41 |
-
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
42 |
-
|
43 |
-
# We use make dataclass to dynamically fill the scores from Tasks
|
44 |
-
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
45 |
-
|
46 |
-
## For the queue columns in the submission tab
|
47 |
-
@dataclass(frozen=True)
|
48 |
-
class EvalQueueColumn: # Queue column
|
49 |
-
model = ColumnContent("model", "markdown", True)
|
50 |
-
revision = ColumnContent("revision", "str", True)
|
51 |
-
private = ColumnContent("private", "bool", True)
|
52 |
-
precision = ColumnContent("precision", "str", True)
|
53 |
-
weight_type = ColumnContent("weight_type", "str", "Original")
|
54 |
-
status = ColumnContent("status", "str", True)
|
55 |
-
|
56 |
-
## All the model information that we might need
|
57 |
-
@dataclass
|
58 |
-
class ModelDetails:
|
59 |
-
name: str
|
60 |
-
display_name: str = ""
|
61 |
-
symbol: str = "" # emoji
|
62 |
-
|
63 |
-
|
64 |
-
class ModelType(Enum):
|
65 |
-
PT = ModelDetails(name="pretrained", symbol="🟢")
|
66 |
-
FT = ModelDetails(name="fine-tuned", symbol="🔶")
|
67 |
-
IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
|
68 |
-
RL = ModelDetails(name="RL-tuned", symbol="🟦")
|
69 |
-
Unknown = ModelDetails(name="", symbol="?")
|
70 |
-
|
71 |
-
def to_str(self, separator=" "):
|
72 |
-
return f"{self.value.symbol}{separator}{self.value.name}"
|
73 |
-
|
74 |
-
@staticmethod
|
75 |
-
def from_str(type):
|
76 |
-
if "fine-tuned" in type or "🔶" in type:
|
77 |
-
return ModelType.FT
|
78 |
-
if "pretrained" in type or "🟢" in type:
|
79 |
-
return ModelType.PT
|
80 |
-
if "RL-tuned" in type or "🟦" in type:
|
81 |
-
return ModelType.RL
|
82 |
-
if "instruction-tuned" in type or "⭕" in type:
|
83 |
-
return ModelType.IFT
|
84 |
-
return ModelType.Unknown
|
85 |
-
|
86 |
-
class WeightType(Enum):
|
87 |
-
Adapter = ModelDetails("Adapter")
|
88 |
-
Original = ModelDetails("Original")
|
89 |
-
Delta = ModelDetails("Delta")
|
90 |
-
|
91 |
-
class Precision(Enum):
|
92 |
-
float16 = ModelDetails("float16")
|
93 |
-
bfloat16 = ModelDetails("bfloat16")
|
94 |
-
Unknown = ModelDetails("?")
|
95 |
-
|
96 |
-
def from_str(precision):
|
97 |
-
if precision in ["torch.float16", "float16"]:
|
98 |
-
return Precision.float16
|
99 |
-
if precision in ["torch.bfloat16", "bfloat16"]:
|
100 |
-
return Precision.bfloat16
|
101 |
-
return Precision.Unknown
|
102 |
-
|
103 |
-
# Column selection
|
104 |
-
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
105 |
-
|
106 |
-
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
|
107 |
-
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
|
108 |
-
|
109 |
-
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/envs.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from huggingface_hub import HfApi
|
4 |
-
|
5 |
-
# Info to change for your repository
|
6 |
-
# ----------------------------------
|
7 |
-
TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
|
8 |
-
|
9 |
-
OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
|
10 |
-
# ----------------------------------
|
11 |
-
|
12 |
-
REPO_ID = f"{OWNER}/leaderboard"
|
13 |
-
QUEUE_REPO = f"{OWNER}/requests"
|
14 |
-
RESULTS_REPO = f"{OWNER}/results"
|
15 |
-
|
16 |
-
# If you setup a cache later, just change HF_HOME
|
17 |
-
CACHE_PATH=os.getenv("HF_HOME", ".")
|
18 |
-
|
19 |
-
# Local caches
|
20 |
-
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
|
21 |
-
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
|
22 |
-
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
23 |
-
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
24 |
-
|
25 |
-
API = HfApi(token=TOKEN)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/leaderboard/read_evals.py
DELETED
@@ -1,196 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import json
|
3 |
-
import math
|
4 |
-
import os
|
5 |
-
from dataclasses import dataclass
|
6 |
-
|
7 |
-
import dateutil
|
8 |
-
import numpy as np
|
9 |
-
|
10 |
-
from src.display.formatting import make_clickable_model
|
11 |
-
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
12 |
-
from src.submission.check_validity import is_model_on_hub
|
13 |
-
|
14 |
-
|
15 |
-
@dataclass
|
16 |
-
class EvalResult:
|
17 |
-
"""Represents one full evaluation. Built from a combination of the result and request file for a given run.
|
18 |
-
"""
|
19 |
-
eval_name: str # org_model_precision (uid)
|
20 |
-
full_model: str # org/model (path on hub)
|
21 |
-
org: str
|
22 |
-
model: str
|
23 |
-
revision: str # commit hash, "" if main
|
24 |
-
results: dict
|
25 |
-
precision: Precision = Precision.Unknown
|
26 |
-
model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
|
27 |
-
weight_type: WeightType = WeightType.Original # Original or Adapter
|
28 |
-
architecture: str = "Unknown"
|
29 |
-
license: str = "?"
|
30 |
-
likes: int = 0
|
31 |
-
num_params: int = 0
|
32 |
-
date: str = "" # submission date of request file
|
33 |
-
still_on_hub: bool = False
|
34 |
-
|
35 |
-
@classmethod
|
36 |
-
def init_from_json_file(self, json_filepath):
|
37 |
-
"""Inits the result from the specific model result file"""
|
38 |
-
with open(json_filepath) as fp:
|
39 |
-
data = json.load(fp)
|
40 |
-
|
41 |
-
config = data.get("config")
|
42 |
-
|
43 |
-
# Precision
|
44 |
-
precision = Precision.from_str(config.get("model_dtype"))
|
45 |
-
|
46 |
-
# Get model and org
|
47 |
-
org_and_model = config.get("model_name", config.get("model_args", None))
|
48 |
-
org_and_model = org_and_model.split("/", 1)
|
49 |
-
|
50 |
-
if len(org_and_model) == 1:
|
51 |
-
org = None
|
52 |
-
model = org_and_model[0]
|
53 |
-
result_key = f"{model}_{precision.value.name}"
|
54 |
-
else:
|
55 |
-
org = org_and_model[0]
|
56 |
-
model = org_and_model[1]
|
57 |
-
result_key = f"{org}_{model}_{precision.value.name}"
|
58 |
-
full_model = "/".join(org_and_model)
|
59 |
-
|
60 |
-
still_on_hub, _, model_config = is_model_on_hub(
|
61 |
-
full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
62 |
-
)
|
63 |
-
architecture = "?"
|
64 |
-
if model_config is not None:
|
65 |
-
architectures = getattr(model_config, "architectures", None)
|
66 |
-
if architectures:
|
67 |
-
architecture = ";".join(architectures)
|
68 |
-
|
69 |
-
# Extract results available in this file (some results are split in several files)
|
70 |
-
results = {}
|
71 |
-
for task in Tasks:
|
72 |
-
task = task.value
|
73 |
-
|
74 |
-
# We average all scores of a given metric (not all metrics are present in all files)
|
75 |
-
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
|
76 |
-
if accs.size == 0 or any([acc is None for acc in accs]):
|
77 |
-
continue
|
78 |
-
|
79 |
-
mean_acc = np.mean(accs) * 100.0
|
80 |
-
results[task.benchmark] = mean_acc
|
81 |
-
|
82 |
-
return self(
|
83 |
-
eval_name=result_key,
|
84 |
-
full_model=full_model,
|
85 |
-
org=org,
|
86 |
-
model=model,
|
87 |
-
results=results,
|
88 |
-
precision=precision,
|
89 |
-
revision= config.get("model_sha", ""),
|
90 |
-
still_on_hub=still_on_hub,
|
91 |
-
architecture=architecture
|
92 |
-
)
|
93 |
-
|
94 |
-
def update_with_request_file(self, requests_path):
|
95 |
-
"""Finds the relevant request file for the current model and updates info with it"""
|
96 |
-
request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
|
97 |
-
|
98 |
-
try:
|
99 |
-
with open(request_file, "r") as f:
|
100 |
-
request = json.load(f)
|
101 |
-
self.model_type = ModelType.from_str(request.get("model_type", ""))
|
102 |
-
self.weight_type = WeightType[request.get("weight_type", "Original")]
|
103 |
-
self.license = request.get("license", "?")
|
104 |
-
self.likes = request.get("likes", 0)
|
105 |
-
self.num_params = request.get("params", 0)
|
106 |
-
self.date = request.get("submitted_time", "")
|
107 |
-
except Exception:
|
108 |
-
print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
|
109 |
-
|
110 |
-
def to_dict(self):
|
111 |
-
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
112 |
-
average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
|
113 |
-
data_dict = {
|
114 |
-
"eval_name": self.eval_name, # not a column, just a save name,
|
115 |
-
AutoEvalColumn.precision.name: self.precision.value.name,
|
116 |
-
AutoEvalColumn.model_type.name: self.model_type.value.name,
|
117 |
-
AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
118 |
-
AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
119 |
-
AutoEvalColumn.architecture.name: self.architecture,
|
120 |
-
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
121 |
-
AutoEvalColumn.revision.name: self.revision,
|
122 |
-
AutoEvalColumn.average.name: average,
|
123 |
-
AutoEvalColumn.license.name: self.license,
|
124 |
-
AutoEvalColumn.likes.name: self.likes,
|
125 |
-
AutoEvalColumn.params.name: self.num_params,
|
126 |
-
AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
127 |
-
}
|
128 |
-
|
129 |
-
for task in Tasks:
|
130 |
-
data_dict[task.value.col_name] = self.results[task.value.benchmark]
|
131 |
-
|
132 |
-
return data_dict
|
133 |
-
|
134 |
-
|
135 |
-
def get_request_file_for_model(requests_path, model_name, precision):
|
136 |
-
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
137 |
-
request_files = os.path.join(
|
138 |
-
requests_path,
|
139 |
-
f"{model_name}_eval_request_*.json",
|
140 |
-
)
|
141 |
-
request_files = glob.glob(request_files)
|
142 |
-
|
143 |
-
# Select correct request file (precision)
|
144 |
-
request_file = ""
|
145 |
-
request_files = sorted(request_files, reverse=True)
|
146 |
-
for tmp_request_file in request_files:
|
147 |
-
with open(tmp_request_file, "r") as f:
|
148 |
-
req_content = json.load(f)
|
149 |
-
if (
|
150 |
-
req_content["status"] in ["FINISHED"]
|
151 |
-
and req_content["precision"] == precision.split(".")[-1]
|
152 |
-
):
|
153 |
-
request_file = tmp_request_file
|
154 |
-
return request_file
|
155 |
-
|
156 |
-
|
157 |
-
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
158 |
-
"""From the path of the results folder root, extract all needed info for results"""
|
159 |
-
model_result_filepaths = []
|
160 |
-
|
161 |
-
for root, _, files in os.walk(results_path):
|
162 |
-
# We should only have json files in model results
|
163 |
-
if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
164 |
-
continue
|
165 |
-
|
166 |
-
# Sort the files by date
|
167 |
-
try:
|
168 |
-
files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
|
169 |
-
except dateutil.parser._parser.ParserError:
|
170 |
-
files = [files[-1]]
|
171 |
-
|
172 |
-
for file in files:
|
173 |
-
model_result_filepaths.append(os.path.join(root, file))
|
174 |
-
|
175 |
-
eval_results = {}
|
176 |
-
for model_result_filepath in model_result_filepaths:
|
177 |
-
# Creation of result
|
178 |
-
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
179 |
-
eval_result.update_with_request_file(requests_path)
|
180 |
-
|
181 |
-
# Store results of same eval together
|
182 |
-
eval_name = eval_result.eval_name
|
183 |
-
if eval_name in eval_results.keys():
|
184 |
-
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
|
185 |
-
else:
|
186 |
-
eval_results[eval_name] = eval_result
|
187 |
-
|
188 |
-
results = []
|
189 |
-
for v in eval_results.values():
|
190 |
-
try:
|
191 |
-
v.to_dict() # we test if the dict version is complete
|
192 |
-
results.append(v)
|
193 |
-
except KeyError: # not all eval values present
|
194 |
-
continue
|
195 |
-
|
196 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/populate.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
|
6 |
-
from src.display.formatting import has_no_nan_values, make_clickable_model
|
7 |
-
from src.display.utils import AutoEvalColumn, EvalQueueColumn
|
8 |
-
from src.leaderboard.read_evals import get_raw_eval_results
|
9 |
-
|
10 |
-
|
11 |
-
def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
|
12 |
-
"""Creates a dataframe from all the individual experiment results"""
|
13 |
-
raw_data = get_raw_eval_results(results_path, requests_path)
|
14 |
-
all_data_json = [v.to_dict() for v in raw_data]
|
15 |
-
|
16 |
-
df = pd.DataFrame.from_records(all_data_json)
|
17 |
-
df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
|
18 |
-
df = df[cols].round(decimals=2)
|
19 |
-
|
20 |
-
# filter out if any of the benchmarks have not been produced
|
21 |
-
df = df[has_no_nan_values(df, benchmark_cols)]
|
22 |
-
return df
|
23 |
-
|
24 |
-
|
25 |
-
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
26 |
-
"""Creates the different dataframes for the evaluation queues requestes"""
|
27 |
-
entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
|
28 |
-
all_evals = []
|
29 |
-
|
30 |
-
for entry in entries:
|
31 |
-
if ".json" in entry:
|
32 |
-
file_path = os.path.join(save_path, entry)
|
33 |
-
with open(file_path) as fp:
|
34 |
-
data = json.load(fp)
|
35 |
-
|
36 |
-
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
37 |
-
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
38 |
-
|
39 |
-
all_evals.append(data)
|
40 |
-
elif ".md" not in entry:
|
41 |
-
# this is a folder
|
42 |
-
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
|
43 |
-
for sub_entry in sub_entries:
|
44 |
-
file_path = os.path.join(save_path, entry, sub_entry)
|
45 |
-
with open(file_path) as fp:
|
46 |
-
data = json.load(fp)
|
47 |
-
|
48 |
-
data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
|
49 |
-
data[EvalQueueColumn.revision.name] = data.get("revision", "main")
|
50 |
-
all_evals.append(data)
|
51 |
-
|
52 |
-
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
53 |
-
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
54 |
-
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
55 |
-
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
56 |
-
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
57 |
-
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
58 |
-
return df_finished[cols], df_running[cols], df_pending[cols]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/check_validity.py
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import re
|
4 |
-
from collections import defaultdict
|
5 |
-
from datetime import datetime, timedelta, timezone
|
6 |
-
|
7 |
-
import huggingface_hub
|
8 |
-
from huggingface_hub import ModelCard
|
9 |
-
from huggingface_hub.hf_api import ModelInfo
|
10 |
-
from transformers import AutoConfig
|
11 |
-
from transformers.models.auto.tokenization_auto import AutoTokenizer
|
12 |
-
|
13 |
-
def check_model_card(repo_id: str) -> tuple[bool, str]:
|
14 |
-
"""Checks if the model card and license exist and have been filled"""
|
15 |
-
try:
|
16 |
-
card = ModelCard.load(repo_id)
|
17 |
-
except huggingface_hub.utils.EntryNotFoundError:
|
18 |
-
return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
|
19 |
-
|
20 |
-
# Enforce license metadata
|
21 |
-
if card.data.license is None:
|
22 |
-
if not ("license_name" in card.data and "license_link" in card.data):
|
23 |
-
return False, (
|
24 |
-
"License not found. Please add a license to your model card using the `license` metadata or a"
|
25 |
-
" `license_name`/`license_link` pair."
|
26 |
-
)
|
27 |
-
|
28 |
-
# Enforce card content
|
29 |
-
if len(card.text) < 200:
|
30 |
-
return False, "Please add a description to your model card, it is too short."
|
31 |
-
|
32 |
-
return True, ""
|
33 |
-
|
34 |
-
def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
|
35 |
-
"""Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
|
36 |
-
try:
|
37 |
-
config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
38 |
-
if test_tokenizer:
|
39 |
-
try:
|
40 |
-
tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
|
41 |
-
except ValueError as e:
|
42 |
-
return (
|
43 |
-
False,
|
44 |
-
f"uses a tokenizer which is not in a transformers release: {e}",
|
45 |
-
None
|
46 |
-
)
|
47 |
-
except Exception as e:
|
48 |
-
return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
|
49 |
-
return True, None, config
|
50 |
-
|
51 |
-
except ValueError:
|
52 |
-
return (
|
53 |
-
False,
|
54 |
-
"needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
|
55 |
-
None
|
56 |
-
)
|
57 |
-
|
58 |
-
except Exception as e:
|
59 |
-
return False, "was not found on hub!", None
|
60 |
-
|
61 |
-
|
62 |
-
def get_model_size(model_info: ModelInfo, precision: str):
|
63 |
-
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
|
64 |
-
try:
|
65 |
-
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
66 |
-
except (AttributeError, TypeError):
|
67 |
-
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
68 |
-
|
69 |
-
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
70 |
-
model_size = size_factor * model_size
|
71 |
-
return model_size
|
72 |
-
|
73 |
-
def get_model_arch(model_info: ModelInfo):
|
74 |
-
"""Gets the model architecture from the configuration"""
|
75 |
-
return model_info.config.get("architectures", "Unknown")
|
76 |
-
|
77 |
-
def already_submitted_models(requested_models_dir: str) -> set[str]:
|
78 |
-
"""Gather a list of already submitted models to avoid duplicates"""
|
79 |
-
depth = 1
|
80 |
-
file_names = []
|
81 |
-
users_to_submission_dates = defaultdict(list)
|
82 |
-
|
83 |
-
for root, _, files in os.walk(requested_models_dir):
|
84 |
-
current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
|
85 |
-
if current_depth == depth:
|
86 |
-
for file in files:
|
87 |
-
if not file.endswith(".json"):
|
88 |
-
continue
|
89 |
-
with open(os.path.join(root, file), "r") as f:
|
90 |
-
info = json.load(f)
|
91 |
-
file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
|
92 |
-
|
93 |
-
# Select organisation
|
94 |
-
if info["model"].count("/") == 0 or "submitted_time" not in info:
|
95 |
-
continue
|
96 |
-
organisation, _ = info["model"].split("/")
|
97 |
-
users_to_submission_dates[organisation].append(info["submitted_time"])
|
98 |
-
|
99 |
-
return set(file_names), users_to_submission_dates
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/submission/submit.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
from datetime import datetime, timezone
|
4 |
-
|
5 |
-
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
-
from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
|
7 |
-
from src.submission.check_validity import (
|
8 |
-
already_submitted_models,
|
9 |
-
check_model_card,
|
10 |
-
get_model_size,
|
11 |
-
is_model_on_hub,
|
12 |
-
)
|
13 |
-
|
14 |
-
REQUESTED_MODELS = None
|
15 |
-
USERS_TO_SUBMISSION_DATES = None
|
16 |
-
|
17 |
-
def add_new_eval(
|
18 |
-
model: str,
|
19 |
-
base_model: str,
|
20 |
-
revision: str,
|
21 |
-
precision: str,
|
22 |
-
weight_type: str,
|
23 |
-
model_type: str,
|
24 |
-
):
|
25 |
-
global REQUESTED_MODELS
|
26 |
-
global USERS_TO_SUBMISSION_DATES
|
27 |
-
if not REQUESTED_MODELS:
|
28 |
-
REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
|
29 |
-
|
30 |
-
user_name = ""
|
31 |
-
model_path = model
|
32 |
-
if "/" in model:
|
33 |
-
user_name = model.split("/")[0]
|
34 |
-
model_path = model.split("/")[1]
|
35 |
-
|
36 |
-
precision = precision.split(" ")[0]
|
37 |
-
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
38 |
-
|
39 |
-
if model_type is None or model_type == "":
|
40 |
-
return styled_error("Please select a model type.")
|
41 |
-
|
42 |
-
# Does the model actually exist?
|
43 |
-
if revision == "":
|
44 |
-
revision = "main"
|
45 |
-
|
46 |
-
# Is the model on the hub?
|
47 |
-
if weight_type in ["Delta", "Adapter"]:
|
48 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
|
49 |
-
if not base_model_on_hub:
|
50 |
-
return styled_error(f'Base model "{base_model}" {error}')
|
51 |
-
|
52 |
-
if not weight_type == "Adapter":
|
53 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
|
54 |
-
if not model_on_hub:
|
55 |
-
return styled_error(f'Model "{model}" {error}')
|
56 |
-
|
57 |
-
# Is the model info correctly filled?
|
58 |
-
try:
|
59 |
-
model_info = API.model_info(repo_id=model, revision=revision)
|
60 |
-
except Exception:
|
61 |
-
return styled_error("Could not get your model information. Please fill it up properly.")
|
62 |
-
|
63 |
-
model_size = get_model_size(model_info=model_info, precision=precision)
|
64 |
-
|
65 |
-
# Were the model card and license filled?
|
66 |
-
try:
|
67 |
-
license = model_info.cardData["license"]
|
68 |
-
except Exception:
|
69 |
-
return styled_error("Please select a license for your model")
|
70 |
-
|
71 |
-
modelcard_OK, error_msg = check_model_card(model)
|
72 |
-
if not modelcard_OK:
|
73 |
-
return styled_error(error_msg)
|
74 |
-
|
75 |
-
# Seems good, creating the eval
|
76 |
-
print("Adding new eval")
|
77 |
-
|
78 |
-
eval_entry = {
|
79 |
-
"model": model,
|
80 |
-
"base_model": base_model,
|
81 |
-
"revision": revision,
|
82 |
-
"precision": precision,
|
83 |
-
"weight_type": weight_type,
|
84 |
-
"status": "PENDING",
|
85 |
-
"submitted_time": current_time,
|
86 |
-
"model_type": model_type,
|
87 |
-
"likes": model_info.likes,
|
88 |
-
"params": model_size,
|
89 |
-
"license": license,
|
90 |
-
"private": False,
|
91 |
-
}
|
92 |
-
|
93 |
-
# Check for duplicate submission
|
94 |
-
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
|
95 |
-
return styled_warning("This model has been already submitted.")
|
96 |
-
|
97 |
-
print("Creating eval file")
|
98 |
-
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
99 |
-
os.makedirs(OUT_DIR, exist_ok=True)
|
100 |
-
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
|
101 |
-
|
102 |
-
with open(out_path, "w") as f:
|
103 |
-
f.write(json.dumps(eval_entry))
|
104 |
-
|
105 |
-
print("Uploading eval file")
|
106 |
-
API.upload_file(
|
107 |
-
path_or_fileobj=out_path,
|
108 |
-
path_in_repo=out_path.split("eval-queue/")[1],
|
109 |
-
repo_id=QUEUE_REPO,
|
110 |
-
repo_type="dataset",
|
111 |
-
commit_message=f"Add {model} to eval queue",
|
112 |
-
)
|
113 |
-
|
114 |
-
# Remove the local file
|
115 |
-
os.remove(out_path)
|
116 |
-
|
117 |
-
return styled_message(
|
118 |
-
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
|
119 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
text_content.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
HEAD_TEXT = """
|
2 |
+
This is the official leaderboard for 🏅StructEval benchmark. Starting from an atomic test objective, StructEval deepens and broadens the evaluation by conducting a **structured assessment across multiple cognitive levels and critical concepts**, and therefore offers a comprehensive, robust and consistent evaluation for LLMs.
|
3 |
+
|
4 |
+
Please refer to 🐱[StructEval repository](https://github.com/c-box/StructEval) for model evaluation and 📖[our paper]() for experimental analysis.
|
5 |
+
|
6 |
+
🚀 **_Latest News_**
|
7 |
+
* [2024.8.2] We released the first version of StructEval leaderboard, which includes 21 open-sourced language models, more datasets and models as comming soon🔥🔥🔥.
|
8 |
+
|
9 |
+
* [2024.7.31] We regenerated the StructEval Benchmark based on the latest [Wikipedia](https://www.wikipedia.org/) pages (20240601) using [GPT-4o-mini](https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/) model, which could minimize the impact of data contamination🔥🔥🔥.
|
10 |
+
"""
|
11 |
+
|
12 |
+
ABOUT_TEXT = """# What is StructEval?
|
13 |
+
Evaluation is the baton for the development of large language models.
|
14 |
+
Current evaluations typically employ *a single-item assessment paradigm* for each atomic test objective, which struggles to discern whether a model genuinely possesses the required capabilities or merely memorizes/guesses the answers to specific questions.
|
15 |
+
To this end, we propose a novel evaluation framework referred to as ***StructEval***.
|
16 |
+
Starting from an atomic test objective, StructEval deepens and broadens the evaluation by conducting a **structured assessment across multiple cognitive levels and critical concepts**, and therefore offers a comprehensive, robust and consistent evaluation for LLMs.
|
17 |
+
Experiments demonstrate that **StructEval serves as a reliable tool for resisting the risk of data contamination and reducing the interference of potential biases**, thereby providing more reliable and consistent conclusions regarding model capabilities.
|
18 |
+
Our framework also sheds light on the design of future principled and trustworthy LLM evaluation protocols.
|
19 |
+
|
20 |
+
# How to evaluate?
|
21 |
+
Our 🐱[repo](https://github.com/c-box/StructEval) provides easy-to-use scripts for both evaluating LLMs on existing StructEval benchmarks and generating new benchmarks based on StructEval framework.
|
22 |
+
|
23 |
+
# Contact
|
24 |
+
If you have any questions, feel free to reach out to us at [[email protected]](mailto:[email protected]).
|
25 |
+
"""
|
26 |
+
|
27 |
+
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
28 |
+
|
29 |
+
CITATION_BUTTON_TEXT = r"""
|
30 |
+
comming soon
|
31 |
+
"""
|
32 |
+
|
33 |
+
ACKNOWLEDGEMENT_TEXT = """
|
34 |
+
Inspired from the [🤗 Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard).
|
35 |
+
"""
|
36 |
+
|
37 |
+
|
38 |
+
NOTES_TEXT = """
|
39 |
+
* On most models on base MMLU, we collected the results for their official technical report. For the models that have not been reported, we use opencompass for evaluation.
|
40 |
+
* For other 2 base benchmarks and all 3 structured benchmarks: for chat models, we evaluate them under 0-shot setting; for completion model, we evaluate them under 0-shot setting with ppl.
|
41 |
+
"""
|
utils.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
|
3 |
+
|
4 |
+
@dataclass
|
5 |
+
class ColumnContent:
|
6 |
+
name: str
|
7 |
+
type: str
|
8 |
+
displayed_by_default: bool
|
9 |
+
hidden: bool = False
|
10 |
+
|
11 |
+
|
12 |
+
def fields(raw_class):
|
13 |
+
return [
|
14 |
+
v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"
|
15 |
+
]
|
16 |
+
|
17 |
+
|
18 |
+
@dataclass(frozen=True)
|
19 |
+
class AutoEvalColumn: # Auto evals column
|
20 |
+
model = ColumnContent("model", "markdown", True)
|
21 |
+
base_average = ColumnContent("BaseAverage", "number", True)
|
22 |
+
struct_average = ColumnContent("📝StructAverage", "number", True)
|
23 |
+
|
24 |
+
base_mmlu = ColumnContent("BaseMMLU", "number", True)
|
25 |
+
struct_mmlu = ColumnContent("📝StructMMLU", "number", True)
|
26 |
+
|
27 |
+
base_arc = ColumnContent("BaseARC", "number", True)
|
28 |
+
struct_arc = ColumnContent("📝StructARC", "number", True)
|
29 |
+
|
30 |
+
base_openbook = ColumnContent("BaseOpenbook", "number", True)
|
31 |
+
struct_openbook = ColumnContent("📝StructOpenbook", "number", True)
|
32 |
+
|