AlyxTeam commited on
Commit
2bc5a2c
1 Parent(s): 836c21f

feat: init

Browse files
README.md CHANGED
@@ -4,41 +4,16 @@ emoji: 🥇
4
  colorFrom: green
5
  colorTo: indigo
6
  sdk: gradio
 
7
  app_file: app.py
8
  pinned: true
9
  license: apache-2.0
10
  ---
11
 
12
- # Start the configuration
 
13
 
14
- Most of the variables to change for a default leaderboard are in `src/env.py` (replace the path for your leaderboard) and `src/about.py` (for tasks).
 
15
 
16
- Results files should have the following format and be stored as json files:
17
- ```json
18
- {
19
- "config": {
20
- "model_dtype": "torch.float16", # or torch.bfloat16 or 8bit or 4bit
21
- "model_name": "path of the model on the hub: org/model",
22
- "model_sha": "revision on the hub",
23
- },
24
- "results": {
25
- "task_name": {
26
- "metric_name": score,
27
- },
28
- "task_name2": {
29
- "metric_name": score,
30
- }
31
- }
32
- }
33
- ```
34
-
35
- Request files are created automatically by this tool.
36
-
37
- If you encounter problem on the space, don't hesitate to restart it to remove the create eval-queue, eval-queue-bk, eval-results and eval-results-bk created folder.
38
-
39
- # Code logic for more complex edits
40
-
41
- You'll find
42
- - the main table' columns names and properties in `src/display/utils.py`
43
- - the logic to read all results and request files, then convert them in dataframe lines, in `src/leaderboard/read_evals.py`, and `src/populate.py`
44
- - the logic to allow or filter submissions in `src/submission/submit.py` and `src/submission/check_validity.py`
 
4
  colorFrom: green
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.26.0
8
  app_file: app.py
9
  pinned: true
10
  license: apache-2.0
11
  ---
12
 
13
+ Depending on whether you want to use lighteval or lm_eval for your evaluations, you might need to complete the
14
+ requirements.txt file to contain relevant dependencies.
15
 
16
+ You'll also need to select, in app.py, whether you want to use the ligtheval or lm_eval by selecting the correct
17
+ import and commenting the other.
18
 
19
+ All env variables that you should need to edit to launch the evaluations should be in `envs`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,204 +1,71 @@
1
- import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
- import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
-
7
- from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
- )
15
- from src.display.css_html_js import custom_css
16
- from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- AutoEvalColumn,
22
- ModelType,
23
- fields,
24
- WeightType,
25
- Precision
26
- )
27
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
- from src.submission.submit import add_new_eval
30
-
31
-
32
- def restart_space():
33
- API.restart_space(repo_id=REPO_ID)
34
-
35
- ### Space initialisation
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
- )
41
- except Exception:
42
- restart_space()
43
- try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
- )
48
- except Exception:
49
- restart_space()
50
-
51
-
52
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
-
54
- (
55
- finished_eval_queue_df,
56
- running_eval_queue_df,
57
- pending_eval_queue_df,
58
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
-
60
- def init_leaderboard(dataframe):
61
- if dataframe is None or dataframe.empty:
62
- raise ValueError("Leaderboard DataFrame is empty or None.")
63
- return Leaderboard(
64
- value=dataframe,
65
- datatype=[c.type for c in fields(AutoEvalColumn)],
66
- select_columns=SelectColumns(
67
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
- label="Select Columns to Display:",
70
- ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
87
- bool_checkboxgroup_label="Hide models",
88
- interactive=False,
89
- )
90
-
91
-
92
- demo = gr.Blocks(css=custom_css)
93
- with demo:
94
- gr.HTML(TITLE)
95
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
-
97
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
- leaderboard = init_leaderboard(LEADERBOARD_DF)
100
-
101
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
102
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
-
104
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
- with gr.Row():
145
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
146
-
147
- with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
158
-
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
-
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
189
- )
190
-
191
- with gr.Row():
192
- with gr.Accordion("📙 Citation", open=False):
193
- citation_button = gr.Textbox(
194
- value=CITATION_BUTTON_TEXT,
195
- label=CITATION_BUTTON_LABEL,
196
- lines=20,
197
- elem_id="citation-button",
198
- show_copy_button=True,
199
- )
200
-
201
- scheduler = BackgroundScheduler()
202
- scheduler.add_job(restart_space, "interval", seconds=1800)
203
- scheduler.start()
204
- demo.queue(default_concurrency_limit=40).launch()
 
1
+ import logging
 
 
2
  from apscheduler.schedulers.background import BackgroundScheduler
3
+
4
+ from src.logging import configure_root_logger
5
+
6
+ logging.getLogger("httpx").setLevel(logging.WARNING)
7
+ logging.getLogger("numexpr").setLevel(logging.WARNING)
8
+ logging.getLogger("absl").setLevel(logging.WARNING)
9
+ configure_root_logger()
10
+
11
+ from functools import partial
12
+
13
+ import gradio as gr
14
+ # Choose ligtheval or harness backend
15
+ # from main_backend_lighteval import run_auto_eval
16
+ from main_backend_harness import run_auto_eval
17
+
18
+ from src.display.log_visualizer import log_file_to_html_string
19
+ from src.display.css_html_js import dark_mode_gradio_js
20
+ from src.envs import REFRESH_RATE, REPO_ID, QUEUE_REPO, RESULTS_REPO
21
+ from src.logging import setup_logger, log_file
22
+
23
+ logging.basicConfig(level=logging.INFO)
24
+ logger = setup_logger(__name__)
25
+
26
+
27
+ intro_md = f"""
28
+ # Intro
29
+ This is a visual for the auto evaluator.
30
+ """
31
+
32
+ links_md = f"""
33
+ # Important links
34
+
35
+ | Description | Link |
36
+ |-----------------|------|
37
+ | Leaderboard | [{REPO_ID}](https://huggingface.co/spaces/{REPO_ID}) |
38
+ | Queue Repo | [{QUEUE_REPO}](https://huggingface.co/datasets/{QUEUE_REPO}) |
39
+ | Results Repo | [{RESULTS_REPO}](https://huggingface.co/datasets/{RESULTS_REPO}) |
40
+ """
41
+
42
+ def auto_eval():
43
+ logger.info("Triggering Auto Eval")
44
+ run_auto_eval()
45
+
46
+
47
+ reverse_order_checkbox = gr.Checkbox(label="Reverse Order", value=True)
48
+
49
+ with gr.Blocks(js=dark_mode_gradio_js) as demo:
50
+ gr.Markdown(intro_md)
51
+ with gr.Tab("Application"):
52
+ output_html = gr.HTML(partial(log_file_to_html_string, reverse=reverse_order_checkbox), every=1)
53
+ with gr.Row():
54
+ download_button = gr.DownloadButton("Download Log File", value=log_file)
55
+ with gr.Accordion('Log View Configuration', open=False):
56
+ reverse_order_checkbox.render()
57
+ # Add a button that when pressed, triggers run_auto_eval
58
+ button = gr.Button("Manually Run Evaluation")
59
+ gr.Markdown(links_md)
60
+
61
+ #dummy = gr.Markdown(auto_eval, every=REFRESH_RATE, visible=False)
62
+
63
+ button.click(fn=auto_eval, inputs=[], outputs=[])
64
+
65
+ if __name__ == '__main__':
66
+ scheduler = BackgroundScheduler()
67
+ scheduler.add_job(auto_eval, "interval", seconds=REFRESH_RATE)
68
+ scheduler.start()
69
+ demo.queue(default_concurrency_limit=40).launch(server_name="0.0.0.0",
70
+ show_error=True,
71
+ server_port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
custom_tasks.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa: F405, F403, F401
2
+ """
3
+ Custom evaluation tasks for lighteval. Complete this task with your own configuration if you want to use a custom lighteval task.
4
+
5
+ This file generally create just a TASKS_TABLE and TASKS_GROUPS which are then imported by LightEval.
6
+
7
+ Author:
8
+ """
9
+ from lighteval.tasks.lighteval_task import LightevalTaskConfig
10
+ from lighteval.tasks.requests import Doc
11
+ from lighteval.tasks.tasks_prompt_formatting import LETTER_INDICES
12
+
13
+
14
+ ## EVAL WITH NO SUBSET ##
15
+ # This is how you create a simple tasks (like hellaswag) which has one single subset
16
+ # attached to it, and one evaluation possible.
17
+ task = LightevalTaskConfig(
18
+ name="myothertask",
19
+ prompt_function="prompt_fn", # must be defined in the file or imported from src/lighteval/tasks/tasks_prompt_formatting.py
20
+ suite=["community"],
21
+ hf_repo="",
22
+ hf_subset="default",
23
+ hf_avail_splits=[],
24
+ evaluation_splits=[],
25
+ few_shots_split="",
26
+ few_shots_select="",
27
+ metric=[""],
28
+ )
29
+
30
+ ## EVALS WITH SUBSET
31
+ # This is how you create a subset task (like MMLU), which has several subset
32
+ # each being its own evaluation task.
33
+
34
+ # fmt: off
35
+ SAMPLE_SUBSETS = [] # list of all the subsets to use for this eval
36
+ # fmt: on
37
+
38
+
39
+ class CustomSubsetTask(LightevalTaskConfig):
40
+ def __init__(
41
+ self,
42
+ name,
43
+ hf_subset,
44
+ ):
45
+ super().__init__(
46
+ name=name,
47
+ hf_subset=hf_subset,
48
+ prompt_function="prompt_fn", # must be defined in the file
49
+ hf_repo="",
50
+ metric=[""],
51
+ hf_avail_splits=[],
52
+ evaluation_splits=[],
53
+ few_shots_split="",
54
+ few_shots_select="",
55
+ suite=["community"],
56
+ generation_size=-1,
57
+ stop_sequence=None,
58
+ output_regex=None,
59
+ frozen=False,
60
+ )
61
+
62
+
63
+ ## DEFINE YOUR PROMPT FUNCTIONS
64
+ # Define as many as you need for your different tasks
65
+ def prompt_fn(line, task_name: str = None):
66
+ """Defines how to go from a dataset line to a doc object.
67
+ Follow examples in src/lighteval/tasks/tasks_prompt_formatting.py, or get more info
68
+ about what this function should do in the README.
69
+ """
70
+ return Doc(
71
+ task_name=task_name,
72
+ query="",
73
+ choices="",
74
+ gold_index=0,
75
+ instruction="",
76
+ )
77
+
78
+
79
+ ## STORE YOUR EVALS
80
+ SUBSET_TASKS = [CustomSubsetTask(name=f"mytask:{subset}", hf_subset=subset) for subset in SAMPLE_SUBSETS]
81
+ _TASKS = SUBSET_TASKS + [task]
82
+
83
+ ## MODULE LOGIC
84
+ # You should not need to touch this
85
+ # Convert to dict for lighteval
86
+ TASKS_TABLE = [task.as_dict() for task in _TASKS]
87
+
88
+ if __name__ == "__main__":
89
+ print(t["name"] for t in TASKS_TABLE)
90
+ print(len(TASKS_TABLE))
main_backend_harness.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import pprint
3
+
4
+ from huggingface_hub import snapshot_download
5
+
6
+ logging.getLogger("openai").setLevel(logging.WARNING)
7
+
8
+ from src.backend.run_eval_suite_harness import run_evaluation
9
+ from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request, PENDING_STATUS, RUNNING_STATUS, FINISHED_STATUS, FAILED_STATUS
10
+ from src.backend.sort_queue import sort_models_by_priority
11
+
12
+ from src.envs import QUEUE_REPO, EVAL_REQUESTS_PATH_BACKEND, RESULTS_REPO, EVAL_RESULTS_PATH_BACKEND, DEVICE, API, LIMIT, TOKEN
13
+ from src.envs import TASKS_HARNESS, NUM_FEWSHOT
14
+ from src.logging import setup_logger
15
+
16
+
17
+
18
+ # logging.basicConfig(level=logging.ERROR)
19
+ logger = setup_logger(__name__)
20
+ pp = pprint.PrettyPrinter(width=80)
21
+
22
+
23
+ snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
24
+ snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
25
+
26
+ def run_auto_eval():
27
+ current_pending_status = [PENDING_STATUS]
28
+
29
+ # pull the eval dataset from the hub and parse any eval requests
30
+ # check completed evals and set them to finished
31
+ check_completed_evals(
32
+ api=API,
33
+ checked_status=RUNNING_STATUS,
34
+ completed_status=FINISHED_STATUS,
35
+ failed_status=FAILED_STATUS,
36
+ hf_repo=QUEUE_REPO,
37
+ local_dir=EVAL_REQUESTS_PATH_BACKEND,
38
+ hf_repo_results=RESULTS_REPO,
39
+ local_dir_results=EVAL_RESULTS_PATH_BACKEND
40
+ )
41
+
42
+ # Get all eval request that are PENDING, if you want to run other evals, change this parameter
43
+ eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
44
+ # Sort the evals by priority (first submitted first run)
45
+ eval_requests = sort_models_by_priority(api=API, models=eval_requests)
46
+
47
+ print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
48
+
49
+ if len(eval_requests) == 0:
50
+ return
51
+
52
+ eval_request = eval_requests[0]
53
+ logger.info(pp.pformat(eval_request))
54
+
55
+ set_eval_request(
56
+ api=API,
57
+ eval_request=eval_request,
58
+ set_to_status=RUNNING_STATUS,
59
+ hf_repo=QUEUE_REPO,
60
+ local_dir=EVAL_REQUESTS_PATH_BACKEND,
61
+ )
62
+
63
+ run_evaluation(
64
+ eval_request=eval_request,
65
+ task_names=TASKS_HARNESS,
66
+ num_fewshot=NUM_FEWSHOT,
67
+ local_dir=EVAL_RESULTS_PATH_BACKEND,
68
+ results_repo=RESULTS_REPO,
69
+ batch_size="auto",
70
+ device=DEVICE,
71
+ limit=LIMIT
72
+ )
73
+
74
+
75
+ if __name__ == "__main__":
76
+ run_auto_eval()
main_backend_lighteval.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import pprint
3
+
4
+ from huggingface_hub import snapshot_download
5
+
6
+ logging.getLogger("openai").setLevel(logging.WARNING)
7
+
8
+ from src.backend.run_eval_suite_lighteval import run_evaluation
9
+ from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request, PENDING_STATUS, RUNNING_STATUS, FINISHED_STATUS, FAILED_STATUS
10
+ from src.backend.sort_queue import sort_models_by_priority
11
+
12
+ from src.envs import QUEUE_REPO, EVAL_REQUESTS_PATH_BACKEND, RESULTS_REPO, EVAL_RESULTS_PATH_BACKEND, API, LIMIT, TOKEN, ACCELERATOR, VENDOR, REGION, TASKS_LIGHTEVAL
13
+ from src.logging import setup_logger
14
+
15
+ logger = setup_logger(__name__)
16
+
17
+ # logging.basicConfig(level=logging.ERROR)
18
+ pp = pprint.PrettyPrinter(width=80)
19
+
20
+ snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
21
+ snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60, token=TOKEN)
22
+
23
+ def run_auto_eval():
24
+ current_pending_status = [PENDING_STATUS]
25
+
26
+ # pull the eval dataset from the hub and parse any eval requests
27
+ # check completed evals and set them to finished
28
+ check_completed_evals(
29
+ api=API,
30
+ checked_status=RUNNING_STATUS,
31
+ completed_status=FINISHED_STATUS,
32
+ failed_status=FAILED_STATUS,
33
+ hf_repo=QUEUE_REPO,
34
+ local_dir=EVAL_REQUESTS_PATH_BACKEND,
35
+ hf_repo_results=RESULTS_REPO,
36
+ local_dir_results=EVAL_RESULTS_PATH_BACKEND
37
+ )
38
+
39
+ # Get all eval request that are PENDING, if you want to run other evals, change this parameter
40
+ eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND)
41
+ # Sort the evals by priority (first submitted first run)
42
+ eval_requests = sort_models_by_priority(api=API, models=eval_requests)
43
+
44
+ logger.info(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
45
+
46
+ if len(eval_requests) == 0:
47
+ return
48
+
49
+ eval_request = eval_requests[0]
50
+ logger.info(pp.pformat(eval_request))
51
+
52
+
53
+ set_eval_request(
54
+ api=API,
55
+ eval_request=eval_request,
56
+ set_to_status=RUNNING_STATUS,
57
+ hf_repo=QUEUE_REPO,
58
+ local_dir=EVAL_REQUESTS_PATH_BACKEND,
59
+ )
60
+
61
+ # This needs to be done
62
+ #instance_size, instance_type = get_instance_for_model(eval_request)
63
+ # For GPU
64
+ # instance_size, instance_type = "small", "g4dn.xlarge"
65
+ # For CPU
66
+ # Updated naming available at https://huggingface.co/docs/inference-endpoints/pricing
67
+ instance_size, instance_type = "x4", "intel-icl"
68
+ logger.info(f'Starting Evaluation of {eval_request.json_filepath} on Inference endpoints: {instance_size} {instance_type}')
69
+
70
+ run_evaluation(
71
+ eval_request=eval_request,
72
+ task_names=TASKS_LIGHTEVAL,
73
+ local_dir=EVAL_RESULTS_PATH_BACKEND,
74
+ batch_size=1,
75
+ accelerator=ACCELERATOR,
76
+ region=REGION,
77
+ vendor=VENDOR,
78
+ instance_size=instance_size,
79
+ instance_type=instance_type,
80
+ limit=LIMIT
81
+ )
82
+
83
+ logger.info(f'Completed Evaluation of {eval_request.json_filepath} on Inference endpoints: {instance_size} {instance_type}')
84
+
85
+
86
+ if __name__ == "__main__":
87
+ run_auto_eval()
requirements.txt CHANGED
@@ -1,16 +1,18 @@
1
- APScheduler
2
- black
3
- datasets
4
- gradio
5
- gradio[oauth]
6
- gradio_leaderboard==0.0.9
7
- gradio_client
8
  huggingface-hub>=0.18.0
9
- matplotlib
10
- numpy
11
- pandas
12
- python-dateutil
13
- tqdm
14
- transformers
15
- tokenizers>=0.15.0
16
- sentencepiece
 
 
 
 
 
 
 
1
+ APScheduler==3.10.1
2
+ black==23.11.0
3
+ click==8.1.3
 
 
 
 
4
  huggingface-hub>=0.18.0
5
+ python-dateutil==2.8.2
6
+ requests==2.28.2
7
+ tqdm==4.65.0
8
+ accelerate>=0.26.0
9
+ sentencepiece
10
+
11
+ # Evaluation suites
12
+ lighteval
13
+ lm_eval==0.4.3
14
+
15
+ # Log Visualizer
16
+ BeautifulSoup4==4.12.2
17
+ lxml==4.9.3
18
+ rich==13.3.4
scripts/create_request_file.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import pprint
4
+ import re
5
+ from datetime import datetime, timezone
6
+
7
+ import click
8
+ from colorama import Fore
9
+ from huggingface_hub import HfApi, snapshot_download
10
+ from src.envs import TOKEN, EVAL_REQUESTS_PATH, QUEUE_REPO
11
+
12
+ precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ", "float32")
13
+ model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
14
+ weight_types = ("Original", "Delta", "Adapter")
15
+
16
+
17
+ def get_model_size(model_info, precision: str):
18
+ size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
19
+ try:
20
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
21
+ except (AttributeError, TypeError):
22
+ try:
23
+ size_match = re.search(size_pattern, model_info.modelId.lower())
24
+ model_size = size_match.group(0)
25
+ model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3)
26
+ except AttributeError:
27
+ return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
28
+
29
+ size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
30
+ model_size = size_factor * model_size
31
+ return model_size
32
+
33
+
34
+ def main():
35
+ api = HfApi()
36
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
37
+ snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", token=TOKEN)
38
+
39
+ model_name = click.prompt("Enter model name")
40
+ revision = click.prompt("Enter revision", default="main")
41
+ precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions))
42
+ model_type = click.prompt("Enter model type", type=click.Choice(model_types))
43
+ weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types))
44
+ base_model = click.prompt("Enter base model", default="")
45
+ status = click.prompt("Enter status", default="FINISHED")
46
+
47
+ try:
48
+ model_info = api.model_info(repo_id=model_name, revision=revision)
49
+ except Exception as e:
50
+ print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}")
51
+ return 1
52
+
53
+ model_size = get_model_size(model_info=model_info, precision=precision)
54
+
55
+ try:
56
+ license = model_info.cardData["license"]
57
+ except Exception:
58
+ license = "?"
59
+
60
+ eval_entry = {
61
+ "model": model_name,
62
+ "base_model": base_model,
63
+ "revision": revision,
64
+ "private": False,
65
+ "precision": precision,
66
+ "weight_type": weight_type,
67
+ "status": status,
68
+ "submitted_time": current_time,
69
+ "model_type": model_type,
70
+ "likes": model_info.likes,
71
+ "params": model_size,
72
+ "license": license,
73
+ }
74
+
75
+ user_name = ""
76
+ model_path = model_name
77
+ if "/" in model_name:
78
+ user_name = model_name.split("/")[0]
79
+ model_path = model_name.split("/")[1]
80
+
81
+ pprint.pprint(eval_entry)
82
+
83
+ if click.confirm("Do you want to continue? This request file will be pushed to the hub"):
84
+ click.echo("continuing...")
85
+
86
+ out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}"
87
+ os.makedirs(out_dir, exist_ok=True)
88
+ out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json"
89
+
90
+ with open(out_path, "w") as f:
91
+ f.write(json.dumps(eval_entry))
92
+
93
+ api.upload_file(
94
+ path_or_fileobj=out_path,
95
+ path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1],
96
+ repo_id=QUEUE_REPO,
97
+ repo_type="dataset",
98
+ commit_message=f"Add {model_name} to eval queue",
99
+ )
100
+ else:
101
+ click.echo("aborting...")
102
+
103
+
104
+ if __name__ == "__main__":
105
+ main()
scripts/fix_harness_import.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This file should be used after pip install -r requirements.
2
+ It creates a folder not ported during harness package creation (as they don't use a Manifest file atm and it ignore `.json` files).
3
+ It will need to be updated if we want to use the harness' version of big bench to actually copy the json files.
4
+ """
5
+ import os
6
+
7
+ import lm_eval
8
+
9
+ if __name__ == "__main__":
10
+ lm_eval_path = lm_eval.__path__[0]
11
+ os.makedirs(os.path.join(lm_eval_path, "datasets", "bigbench_resources"), exist_ok=True)
src/about.py DELETED
@@ -1,72 +0,0 @@
1
- from dataclasses import dataclass
2
- from enum import Enum
3
-
4
- @dataclass
5
- class Task:
6
- benchmark: str
7
- metric: str
8
- col_name: str
9
-
10
-
11
- # Select your tasks here
12
- # ---------------------------------------------------
13
- class Tasks(Enum):
14
- # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
- task0 = Task("anli_r1", "acc", "ANLI")
16
- task1 = Task("logiqa", "acc_norm", "LogiQA")
17
-
18
- NUM_FEWSHOT = 0 # Change with your few shot
19
- # ---------------------------------------------------
20
-
21
-
22
-
23
- # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
-
26
- # What does your leaderboard evaluate?
27
- INTRODUCTION_TEXT = """
28
- Intro text
29
- """
30
-
31
- # Which evaluations are you running? how can people reproduce what you have?
32
- LLM_BENCHMARKS_TEXT = f"""
33
- ## How it works
34
-
35
- ## Reproducibility
36
- To reproduce our results, here is the commands you can run:
37
-
38
- """
39
-
40
- EVALUATION_QUEUE_TEXT = """
41
- ## Some good practices before submitting a model
42
-
43
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
44
- ```python
45
- from transformers import AutoConfig, AutoModel, AutoTokenizer
46
- config = AutoConfig.from_pretrained("your model name", revision=revision)
47
- model = AutoModel.from_pretrained("your model name", revision=revision)
48
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
49
- ```
50
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
51
-
52
- Note: make sure your model is public!
53
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
54
-
55
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
56
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
57
-
58
- ### 3) Make sure your model has an open license!
59
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
60
-
61
- ### 4) Fill up your model card
62
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
63
-
64
- ## In case of model failure
65
- If your model is displayed in the `FAILED` category, its execution stopped.
66
- Make sure you have followed the above steps first.
67
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
68
- """
69
-
70
- CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
71
- CITATION_BUTTON_TEXT = r"""
72
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/backend/manage_requests.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ from dataclasses import dataclass
4
+ from typing import Optional
5
+
6
+ from huggingface_hub import HfApi, snapshot_download
7
+ from src.envs import TOKEN
8
+ from src.logging import setup_logger
9
+
10
+ logger = setup_logger(__name__)
11
+
12
+ PENDING_STATUS = "PENDING"
13
+ RUNNING_STATUS = "RUNNING"
14
+ FINISHED_STATUS = "FINISHED"
15
+ FAILED_STATUS = "FAILED"
16
+
17
+ @dataclass
18
+ class EvalRequest:
19
+ """This class represents one evaluation request file.
20
+ """
21
+ model: str
22
+ status: str
23
+ json_filepath: str
24
+ weight_type: str = "Original"
25
+ model_type: str = "" # pretrained, finetuned, with RL
26
+ precision: str = "" # float16, bfloat16
27
+ revision: str = "main" # commit hash
28
+ submitted_time: Optional[str] = "2022-05-18T11:40:22.519222" # random date just so that we can still order requests by date
29
+ model_type: Optional[str] = None # pretrained, fine-tuned, etc - define your own categories in
30
+ likes: Optional[int] = 0
31
+ params: Optional[int] = None
32
+ license: Optional[str] = ""
33
+ base_model: Optional[str] = ""
34
+ private: Optional[bool] = False
35
+
36
+ def get_model_args(self):
37
+ """Edit this function if you want to manage more complex quantization issues. You'll need to map it to
38
+ the evaluation suite you chose.
39
+ """
40
+ model_args = f"pretrained={self.model},revision={self.revision}"
41
+
42
+ if self.precision in ["float16", "bfloat16"]:
43
+ model_args += f",dtype={self.precision}"
44
+
45
+ # Quantized models need some added config, the install of bits and bytes, etc
46
+ else:
47
+ raise Exception(f"Unknown precision {self.precision}.")
48
+
49
+ return model_args
50
+
51
+
52
+ def set_eval_request(api: HfApi, eval_request: EvalRequest, set_to_status: str, hf_repo: str, local_dir: str):
53
+ """Updates a given eval request with its new status on the hub (running, completed, failed, ...)"""
54
+ json_filepath = eval_request.json_filepath
55
+
56
+ with open(json_filepath) as fp:
57
+ data = json.load(fp)
58
+
59
+ data["status"] = set_to_status
60
+
61
+ with open(json_filepath, "w") as f:
62
+ f.write(json.dumps(data))
63
+
64
+ api.upload_file(
65
+ path_or_fileobj=json_filepath,
66
+ path_in_repo=json_filepath.replace(local_dir, ""),
67
+ repo_id=hf_repo,
68
+ repo_type="dataset",
69
+ )
70
+
71
+
72
+ def get_eval_requests(job_status: list, local_dir: str, hf_repo: str) -> list[EvalRequest]:
73
+ """Gets all pending evaluation requests and return a list in which private
74
+ models appearing first, followed by public models sorted by the number of
75
+ likes.
76
+
77
+ Returns:
78
+ `list[EvalRequest]`: a list of model info dicts.
79
+ """
80
+ snapshot_download(repo_id=hf_repo, revision="main", local_dir=local_dir, repo_type="dataset", max_workers=60, token=TOKEN)
81
+ json_files = glob.glob(f"{local_dir}/**/*.json", recursive=True)
82
+
83
+ eval_requests = []
84
+ for json_filepath in json_files:
85
+ with open(json_filepath) as fp:
86
+ data = json.load(fp)
87
+ if data["status"] in job_status:
88
+ data["json_filepath"] = json_filepath
89
+ eval_request = EvalRequest(**data)
90
+ eval_requests.append(eval_request)
91
+
92
+ return eval_requests
93
+
94
+
95
+ def eval_was_running(eval_request: EvalRequest):
96
+ """Checks whether a file says it's RUNNING to determine whether to FAIL"""
97
+ json_filepath = eval_request.json_filepath
98
+
99
+ with open(json_filepath) as fp:
100
+ data = json.load(fp)
101
+
102
+ status = data["status"]
103
+ return status == RUNNING_STATUS
104
+
105
+ def check_completed_evals(
106
+ api: HfApi,
107
+ hf_repo: str,
108
+ local_dir: str,
109
+ checked_status: str,
110
+ completed_status: str,
111
+ failed_status: str,
112
+ hf_repo_results: str,
113
+ local_dir_results: str,
114
+ ):
115
+ """Checks if the currently running evals are completed, if yes, update their status on the hub."""
116
+ snapshot_download(
117
+ repo_id=hf_repo_results,
118
+ revision="main",
119
+ local_dir=local_dir_results,
120
+ repo_type="dataset",
121
+ max_workers=60,
122
+ token=TOKEN
123
+ )
124
+
125
+ running_evals = get_eval_requests(checked_status, hf_repo=hf_repo, local_dir=local_dir)
126
+
127
+ for eval_request in running_evals:
128
+ model = eval_request.model
129
+ logger.info("====================================")
130
+ logger.info(f"Checking {model}")
131
+
132
+ output_path = model
133
+ output_file = f"{local_dir_results}/{output_path}/results*.json"
134
+ output_file_exists = len(glob.glob(output_file)) > 0
135
+
136
+ if output_file_exists:
137
+ logger.info(
138
+ f"EXISTS output file exists for {model} setting it to {completed_status}"
139
+ )
140
+ set_eval_request(api, eval_request, completed_status, hf_repo, local_dir)
141
+ else:
142
+ if eval_was_running(eval_request=eval_request):
143
+ logger.info(
144
+ f"No result file found for {model} setting it to {failed_status}"
145
+ )
146
+ set_eval_request(api, eval_request, failed_status, hf_repo, local_dir)
src/backend/run_eval_suite_harness.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import logging
4
+ from datetime import datetime
5
+
6
+ from lm_eval import tasks, evaluator, utils
7
+ from lm_eval.tasks import TaskManager
8
+
9
+ from src.envs import RESULTS_REPO, API
10
+ from src.backend.manage_requests import EvalRequest
11
+ from src.logging import setup_logger
12
+
13
+ from typing import Union
14
+
15
+ logging.getLogger("openai").setLevel(logging.WARNING)
16
+ logger = setup_logger(__name__)
17
+
18
+ def run_evaluation(eval_request: EvalRequest, task_names: list, num_fewshot: int, batch_size: Union[int, str], device: str, local_dir: str, results_repo: str, no_cache: bool =True, limit: int =None):
19
+ """Runs one evaluation for the current evaluation request file, then pushes the results to the hub.
20
+
21
+ Args:
22
+ eval_request (EvalRequest): Input evaluation request file representation
23
+ task_names (list): Tasks to launch
24
+ num_fewshot (int): Number of few shots to use
25
+ batch_size (int or str): Selected batch size or 'auto'
26
+ device (str): "cpu" or "cuda:0", depending on what you assigned to the space
27
+ local_dir (str): Where to save the results locally
28
+ results_repo (str): To which repository to upload the results
29
+ no_cache (bool, optional): Whether to use a cache or not
30
+ limit (int, optional): Whether to use a number of samples only for the evaluation - only for debugging
31
+
32
+ Returns:
33
+ _type_: _description_
34
+ """
35
+ if limit:
36
+ logger.info(
37
+ "WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT."
38
+ )
39
+
40
+ task_manager = TaskManager()
41
+ all_tasks = task_manager.all_tasks
42
+ task_names = utils.pattern_match(task_names, all_tasks)
43
+
44
+ logger.info(f"Selected Tasks: {task_names}")
45
+
46
+ results = evaluator.simple_evaluate(
47
+ model="hf",
48
+ model_args=eval_request.get_model_args(),
49
+ tasks=task_names,
50
+ num_fewshot=num_fewshot,
51
+ batch_size=batch_size,
52
+ device=device,
53
+ limit=limit,
54
+ write_out=True # Whether to write out an example document and model input, for checking task integrity
55
+ )
56
+
57
+ results["config"]["model_dtype"] = eval_request.precision
58
+ results["config"]["model_name"] = eval_request.model
59
+ results["config"]["model_sha"] = eval_request.revision
60
+
61
+ dumped = json.dumps(results, indent=2)
62
+ logger.info(dumped)
63
+
64
+ output_path = os.path.join(local_dir, *eval_request.model.split("/"), f"results_{datetime.now()}.json")
65
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
66
+ with open(output_path, "w") as f:
67
+ f.write(dumped)
68
+
69
+ logger.info(evaluator.make_table(results))
70
+
71
+ API.upload_file(
72
+ path_or_fileobj=output_path,
73
+ path_in_repo=f"{eval_request.model}/results_{datetime.now()}.json",
74
+ repo_id=results_repo,
75
+ repo_type="dataset",
76
+ )
77
+
78
+ return results
src/backend/run_eval_suite_lighteval.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ import logging
4
+ from datetime import datetime
5
+
6
+ from lighteval.main_accelerate import main, EnvConfig, create_model_config, load_model
7
+
8
+ from src.envs import RESULTS_REPO, CACHE_PATH, TOKEN
9
+ from src.backend.manage_requests import EvalRequest
10
+ from src.logging import setup_logger
11
+
12
+ logging.getLogger("openai").setLevel(logging.WARNING)
13
+ logger = setup_logger(__name__)
14
+
15
+ def run_evaluation(eval_request: EvalRequest, task_names: str, batch_size: int, local_dir: str, accelerator: str, region: str, vendor: str, instance_size: str, instance_type: str, limit=None):
16
+ """Runs one evaluation for the current evaluation request file using lighteval, then pushes the results to the hub.
17
+
18
+ Args:
19
+ eval_request (EvalRequest): Input evaluation request file representation
20
+ task_names (list): Tasks to launch
21
+ batch_size (int): Selected batch size
22
+ accelerator (str): Inference endpoint parameter for running the evaluation
23
+ region (str): Inference endpoint parameter for running the evaluation
24
+ vendor (str): Inference endpoint parameter for running the evaluation
25
+ instance_size (str): Inference endpoint parameter for running the evaluation
26
+ instance_type (str): Inference endpoint parameter for running the evaluation
27
+ local_dir (str): Where to save the results locally
28
+ no_cache (bool, optional): Whether to use a cache or not.
29
+ limit (int, optional): Whether to use a number of samples only for the evaluation - only for debugging
30
+ """
31
+
32
+ if limit:
33
+ logger.info("WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.")
34
+
35
+ args_dict = {
36
+ # Endpoint parameters
37
+ "endpoint_model_name":eval_request.model,
38
+ "accelerator": accelerator,
39
+ "vendor": vendor,
40
+ "region": region,
41
+ "instance_size": instance_size,
42
+ "instance_type": instance_type,
43
+ "reuse_existing": False,
44
+ "model_dtype": eval_request.precision,
45
+ "revision": eval_request.revision,
46
+ # Save parameters
47
+ "push_results_to_hub": True,
48
+ "save_details": True,
49
+ "push_details_to_hub": True,
50
+ "public_run": False,
51
+ "cache_dir": CACHE_PATH,
52
+ "results_org": RESULTS_REPO,
53
+ "output_dir": local_dir,
54
+ "job_id": str(datetime.now()),
55
+ # Experiment parameters
56
+ "override_batch_size": batch_size,
57
+ "custom_tasks": "custom_tasks.py",
58
+ "tasks": task_names,
59
+ "max_samples": limit,
60
+ "use_chat_template": False,
61
+ "system_prompt": None,
62
+ # Parameters which would be set to things by the kwargs if actually using argparse
63
+ "inference_server_address": None,
64
+ "model_args": None,
65
+ "num_fewshot_seeds": None,
66
+ "delta_weights": False,
67
+ "adapter_weights": False
68
+ }
69
+ args = argparse.Namespace(**args_dict)
70
+
71
+ try:
72
+ results = main(args)
73
+
74
+ results["config"]["model_dtype"] = eval_request.precision
75
+ results["config"]["model_name"] = eval_request.model
76
+ results["config"]["model_sha"] = eval_request.revision
77
+
78
+ dumped = json.dumps(results, indent=2)
79
+ logger.info(dumped)
80
+ except Exception as e: # if eval failed, we force a cleanup
81
+ env_config = EnvConfig(token=TOKEN, cache_dir=args.cache_dir)
82
+
83
+ model_config = create_model_config(args=args, accelerator=accelerator)
84
+ model, _ = load_model(config=model_config, env_config=env_config)
85
+ model.cleanup()
86
+
87
+
88
+ return results
src/backend/sort_queue.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from dataclasses import dataclass
3
+
4
+ from huggingface_hub import HfApi
5
+
6
+ from src.backend.manage_requests import EvalRequest
7
+
8
+
9
+ @dataclass
10
+ class ModelMetadata:
11
+ likes: int = 0
12
+ size: int = 15
13
+
14
+ # All the functions below sort the models in the queue based on different parameters
15
+ def sort_models_by_priority(api: HfApi, models: list[EvalRequest]) -> list[EvalRequest]:
16
+ private_models = [model for model in models if model.private]
17
+ public_models = [model for model in models if not model.private]
18
+
19
+ return sort_by_submit_date(private_models) + sort_by_submit_date(public_models)
20
+
21
+ def sort_by_submit_date(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
22
+ return sorted(eval_requests, key=lambda x: x.submitted_time, reverse=False)
23
+
24
+ def sort_by_size(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
25
+ return sorted(eval_requests, key=lambda x: x.params, reverse=False)
26
+
27
+ def sort_by_likes(eval_requests: list[EvalRequest]) -> list[EvalRequest]:
28
+ return sorted(eval_requests, key=lambda x: x.likes, reverse=False)
src/display/css_html_js.py CHANGED
@@ -1,105 +1,20 @@
1
- custom_css = """
2
-
3
- .markdown-text {
4
- font-size: 16px !important;
5
- }
6
-
7
- #models-to-add-text {
8
- font-size: 18px !important;
9
- }
10
-
11
- #citation-button span {
12
- font-size: 16px !important;
13
- }
14
-
15
- #citation-button textarea {
16
- font-size: 16px !important;
17
- }
18
-
19
- #citation-button > label > button {
20
- margin: 6px;
21
- transform: scale(1.3);
22
- }
23
-
24
- #leaderboard-table {
25
- margin-top: 15px
26
- }
27
-
28
- #leaderboard-table-lite {
29
- margin-top: 15px
30
- }
31
-
32
- #search-bar-table-box > div:first-child {
33
- background: none;
34
- border: none;
35
- }
36
-
37
- #search-bar {
38
- padding: 0px;
39
- }
40
-
41
- /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
42
- table td:first-child,
43
- table th:first-child {
44
- max-width: 400px;
45
- overflow: auto;
46
- white-space: nowrap;
47
- }
48
-
49
- .tab-buttons button {
50
- font-size: 20px;
51
- }
52
-
53
- #scale-logo {
54
- border-style: none !important;
55
- box-shadow: none;
56
- display: block;
57
- margin-left: auto;
58
- margin-right: auto;
59
- max-width: 600px;
60
- }
61
 
62
- #scale-logo .download {
63
- display: none;
64
- }
65
- #filter_type{
66
- border: 0;
67
- padding-left: 0;
68
- padding-top: 0;
69
- }
70
- #filter_type label {
71
- display: flex;
72
- }
73
- #filter_type label > span{
74
- margin-top: var(--spacing-lg);
75
- margin-right: 0.5em;
76
- }
77
- #filter_type label > .wrap{
78
- width: 103px;
79
- }
80
- #filter_type label > .wrap .wrap-inner{
81
- padding: 2px;
82
- }
83
- #filter_type label > .wrap .wrap-inner input{
84
- width: 1px
85
- }
86
- #filter-columns-type{
87
- border:0;
88
- padding:0.5;
89
- }
90
- #filter-columns-size{
91
- border:0;
92
- padding:0.5;
93
- }
94
- #box-filter > .form{
95
- border: 0
96
  }
97
  """
98
-
99
- get_window_url_params = """
100
- function(url_params) {
101
- const params = new URLSearchParams(window.location.search);
102
- url_params = Object.fromEntries(params);
103
- return url_params;
104
- }
105
- """
 
1
+ style_content = """
2
+ pre, code {
3
+ background-color: #272822;
4
+ }
5
+ .scrollable {
6
+ font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace;
7
+ height: 500px;
8
+ overflow: auto;
9
+ }
10
+ """
11
+ dark_mode_gradio_js = """
12
+ function refresh() {
13
+ const url = new URL(window.location);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ if (url.searchParams.get('__theme') !== 'dark') {
16
+ url.searchParams.set('__theme', 'dark');
17
+ window.location.href = url.href;
18
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  }
20
  """
 
 
 
 
 
 
 
 
src/display/formatting.py DELETED
@@ -1,27 +0,0 @@
1
- def model_hyperlink(link, model_name):
2
- return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
3
-
4
-
5
- def make_clickable_model(model_name):
6
- link = f"https://huggingface.co/{model_name}"
7
- return model_hyperlink(link, model_name)
8
-
9
-
10
- def styled_error(error):
11
- return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
12
-
13
-
14
- def styled_warning(warn):
15
- return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
16
-
17
-
18
- def styled_message(message):
19
- return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
20
-
21
-
22
- def has_no_nan_values(df, columns):
23
- return df[columns].notna().all(axis=1)
24
-
25
-
26
- def has_nan_values(df, columns):
27
- return df[columns].isna().any(axis=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/log_visualizer.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import StringIO
2
+ from pathlib import Path
3
+
4
+ from bs4 import BeautifulSoup
5
+ from rich.console import Console
6
+ from rich.syntax import Syntax
7
+
8
+ from src.display.css_html_js import style_content
9
+ from src.envs import NUM_LINES_VISUALIZE
10
+ from src.logging import log_file
11
+
12
+
13
+ def log_file_to_html_string(reverse=True):
14
+ with open(log_file, "rt") as f:
15
+ lines = f.readlines()
16
+ lines = lines[-NUM_LINES_VISUALIZE:]
17
+
18
+ if reverse:
19
+ lines = reversed(lines)
20
+
21
+ output = "".join(lines)
22
+ syntax = Syntax(output, "python", theme="monokai", word_wrap=True)
23
+
24
+ console = Console(record=True, width=150, style="#272822", file=StringIO())
25
+ console.print(syntax)
26
+ html_content = console.export_html(inline_styles=True)
27
+
28
+ # Parse the HTML content using BeautifulSoup
29
+ soup = BeautifulSoup(html_content, 'lxml')
30
+
31
+ # Modify the <pre> tag and add custom styles
32
+ pre_tag = soup.pre
33
+ pre_tag['class'] = 'scrollable'
34
+ del pre_tag['style']
35
+
36
+ # Add your custom styles and the .scrollable CSS to the <style> tag
37
+ style_tag = soup.style
38
+ style_tag.append(style_content)
39
+
40
+ return soup.prettify()
src/display/utils.py DELETED
@@ -1,110 +0,0 @@
1
- from dataclasses import dataclass, make_dataclass
2
- from enum import Enum
3
-
4
- import pandas as pd
5
-
6
- from src.about import Tasks
7
-
8
- def fields(raw_class):
9
- return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
-
11
-
12
- # These classes are for user facing column names,
13
- # to avoid having to change them all around the code
14
- # when a modif is needed
15
- @dataclass
16
- class ColumnContent:
17
- name: str
18
- type: str
19
- displayed_by_default: bool
20
- hidden: bool = False
21
- never_hidden: bool = False
22
-
23
- ## Leaderboard columns
24
- auto_eval_column_dict = []
25
- # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
- auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
- #Scores
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
- for task in Tasks:
31
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
- # Model information
33
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
-
43
- # We use make dataclass to dynamically fill the scores from Tasks
44
- AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
45
-
46
- ## For the queue columns in the submission tab
47
- @dataclass(frozen=True)
48
- class EvalQueueColumn: # Queue column
49
- model = ColumnContent("model", "markdown", True)
50
- revision = ColumnContent("revision", "str", True)
51
- private = ColumnContent("private", "bool", True)
52
- precision = ColumnContent("precision", "str", True)
53
- weight_type = ColumnContent("weight_type", "str", "Original")
54
- status = ColumnContent("status", "str", True)
55
-
56
- ## All the model information that we might need
57
- @dataclass
58
- class ModelDetails:
59
- name: str
60
- display_name: str = ""
61
- symbol: str = "" # emoji
62
-
63
-
64
- class ModelType(Enum):
65
- PT = ModelDetails(name="pretrained", symbol="🟢")
66
- FT = ModelDetails(name="fine-tuned", symbol="🔶")
67
- IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
68
- RL = ModelDetails(name="RL-tuned", symbol="🟦")
69
- Unknown = ModelDetails(name="", symbol="?")
70
-
71
- def to_str(self, separator=" "):
72
- return f"{self.value.symbol}{separator}{self.value.name}"
73
-
74
- @staticmethod
75
- def from_str(type):
76
- if "fine-tuned" in type or "🔶" in type:
77
- return ModelType.FT
78
- if "pretrained" in type or "🟢" in type:
79
- return ModelType.PT
80
- if "RL-tuned" in type or "🟦" in type:
81
- return ModelType.RL
82
- if "instruction-tuned" in type or "⭕" in type:
83
- return ModelType.IFT
84
- return ModelType.Unknown
85
-
86
- class WeightType(Enum):
87
- Adapter = ModelDetails("Adapter")
88
- Original = ModelDetails("Original")
89
- Delta = ModelDetails("Delta")
90
-
91
- class Precision(Enum):
92
- float16 = ModelDetails("float16")
93
- bfloat16 = ModelDetails("bfloat16")
94
- Unknown = ModelDetails("?")
95
-
96
- def from_str(precision):
97
- if precision in ["torch.float16", "float16"]:
98
- return Precision.float16
99
- if precision in ["torch.bfloat16", "bfloat16"]:
100
- return Precision.bfloat16
101
- return Precision.Unknown
102
-
103
- # Column selection
104
- COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
105
-
106
- EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
107
- EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
108
-
109
- BENCHMARK_COLS = [t.value.col_name for t in Tasks]
110
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/envs.py CHANGED
@@ -6,10 +6,23 @@ from huggingface_hub import HfApi
6
  # ----------------------------------
7
  TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
 
9
- OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
- # ----------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- REPO_ID = f"{OWNER}/leaderboard"
 
13
  QUEUE_REPO = f"{OWNER}/requests"
14
  RESULTS_REPO = f"{OWNER}/results"
15
 
@@ -22,4 +35,8 @@ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
 
 
 
25
  API = HfApi(token=TOKEN)
 
 
6
  # ----------------------------------
7
  TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
 
9
+ OWNER = "AlyxTeam" # Change to your org - don't forget to create a results and request dataset
10
+
11
+ # For harness evaluations
12
+ DEVICE = "cpu" # "cuda:0" if you add compute, for harness evaluations
13
+ LIMIT = 20 # !!!! For testing, should be None for actual evaluations!!!
14
+ NUM_FEWSHOT = 0 # Change with your few shot for the Harness evaluations
15
+ TASKS_HARNESS = ["anli_r1", "logiqa"]
16
+
17
+ # For lighteval evaluations
18
+ ACCELERATOR = "cpu"
19
+ REGION = "us-east-1"
20
+ VENDOR = "aws"
21
+ TASKS_LIGHTEVAL = "lighteval|anli:r1|0|0,lighteval|logiqa|0|0"
22
+ # To add your own tasks, edit the custom file and launch it with `custom|myothertask|0|0``
23
 
24
+ # ---------------------------------------------------
25
+ REPO_ID = f"{OWNER}/backend"
26
  QUEUE_REPO = f"{OWNER}/requests"
27
  RESULTS_REPO = f"{OWNER}/results"
28
 
 
35
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
36
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
37
 
38
+ REFRESH_RATE = 10 * 60 # 10 min
39
+ NUM_LINES_VISUALIZE = 300
40
+
41
  API = HfApi(token=TOKEN)
42
+
src/leaderboard/read_evals.py DELETED
@@ -1,196 +0,0 @@
1
- import glob
2
- import json
3
- import math
4
- import os
5
- from dataclasses import dataclass
6
-
7
- import dateutil
8
- import numpy as np
9
-
10
- from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
- from src.submission.check_validity import is_model_on_hub
13
-
14
-
15
- @dataclass
16
- class EvalResult:
17
- """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
- """
19
- eval_name: str # org_model_precision (uid)
20
- full_model: str # org/model (path on hub)
21
- org: str
22
- model: str
23
- revision: str # commit hash, "" if main
24
- results: dict
25
- precision: Precision = Precision.Unknown
26
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
- weight_type: WeightType = WeightType.Original # Original or Adapter
28
- architecture: str = "Unknown"
29
- license: str = "?"
30
- likes: int = 0
31
- num_params: int = 0
32
- date: str = "" # submission date of request file
33
- still_on_hub: bool = False
34
-
35
- @classmethod
36
- def init_from_json_file(self, json_filepath):
37
- """Inits the result from the specific model result file"""
38
- with open(json_filepath) as fp:
39
- data = json.load(fp)
40
-
41
- config = data.get("config")
42
-
43
- # Precision
44
- precision = Precision.from_str(config.get("model_dtype"))
45
-
46
- # Get model and org
47
- org_and_model = config.get("model_name", config.get("model_args", None))
48
- org_and_model = org_and_model.split("/", 1)
49
-
50
- if len(org_and_model) == 1:
51
- org = None
52
- model = org_and_model[0]
53
- result_key = f"{model}_{precision.value.name}"
54
- else:
55
- org = org_and_model[0]
56
- model = org_and_model[1]
57
- result_key = f"{org}_{model}_{precision.value.name}"
58
- full_model = "/".join(org_and_model)
59
-
60
- still_on_hub, _, model_config = is_model_on_hub(
61
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
- )
63
- architecture = "?"
64
- if model_config is not None:
65
- architectures = getattr(model_config, "architectures", None)
66
- if architectures:
67
- architecture = ";".join(architectures)
68
-
69
- # Extract results available in this file (some results are split in several files)
70
- results = {}
71
- for task in Tasks:
72
- task = task.value
73
-
74
- # We average all scores of a given metric (not all metrics are present in all files)
75
- accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
76
- if accs.size == 0 or any([acc is None for acc in accs]):
77
- continue
78
-
79
- mean_acc = np.mean(accs) * 100.0
80
- results[task.benchmark] = mean_acc
81
-
82
- return self(
83
- eval_name=result_key,
84
- full_model=full_model,
85
- org=org,
86
- model=model,
87
- results=results,
88
- precision=precision,
89
- revision= config.get("model_sha", ""),
90
- still_on_hub=still_on_hub,
91
- architecture=architecture
92
- )
93
-
94
- def update_with_request_file(self, requests_path):
95
- """Finds the relevant request file for the current model and updates info with it"""
96
- request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
97
-
98
- try:
99
- with open(request_file, "r") as f:
100
- request = json.load(f)
101
- self.model_type = ModelType.from_str(request.get("model_type", ""))
102
- self.weight_type = WeightType[request.get("weight_type", "Original")]
103
- self.license = request.get("license", "?")
104
- self.likes = request.get("likes", 0)
105
- self.num_params = request.get("params", 0)
106
- self.date = request.get("submitted_time", "")
107
- except Exception:
108
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
109
-
110
- def to_dict(self):
111
- """Converts the Eval Result to a dict compatible with our dataframe display"""
112
- average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
- data_dict = {
114
- "eval_name": self.eval_name, # not a column, just a save name,
115
- AutoEvalColumn.precision.name: self.precision.value.name,
116
- AutoEvalColumn.model_type.name: self.model_type.value.name,
117
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
- AutoEvalColumn.architecture.name: self.architecture,
120
- AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
- AutoEvalColumn.revision.name: self.revision,
122
- AutoEvalColumn.average.name: average,
123
- AutoEvalColumn.license.name: self.license,
124
- AutoEvalColumn.likes.name: self.likes,
125
- AutoEvalColumn.params.name: self.num_params,
126
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
127
- }
128
-
129
- for task in Tasks:
130
- data_dict[task.value.col_name] = self.results[task.value.benchmark]
131
-
132
- return data_dict
133
-
134
-
135
- def get_request_file_for_model(requests_path, model_name, precision):
136
- """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
- request_files = os.path.join(
138
- requests_path,
139
- f"{model_name}_eval_request_*.json",
140
- )
141
- request_files = glob.glob(request_files)
142
-
143
- # Select correct request file (precision)
144
- request_file = ""
145
- request_files = sorted(request_files, reverse=True)
146
- for tmp_request_file in request_files:
147
- with open(tmp_request_file, "r") as f:
148
- req_content = json.load(f)
149
- if (
150
- req_content["status"] in ["FINISHED"]
151
- and req_content["precision"] == precision.split(".")[-1]
152
- ):
153
- request_file = tmp_request_file
154
- return request_file
155
-
156
-
157
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
- """From the path of the results folder root, extract all needed info for results"""
159
- model_result_filepaths = []
160
-
161
- for root, _, files in os.walk(results_path):
162
- # We should only have json files in model results
163
- if len(files) == 0 or any([not f.endswith(".json") for f in files]):
164
- continue
165
-
166
- # Sort the files by date
167
- try:
168
- files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
169
- except dateutil.parser._parser.ParserError:
170
- files = [files[-1]]
171
-
172
- for file in files:
173
- model_result_filepaths.append(os.path.join(root, file))
174
-
175
- eval_results = {}
176
- for model_result_filepath in model_result_filepaths:
177
- # Creation of result
178
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
179
- eval_result.update_with_request_file(requests_path)
180
-
181
- # Store results of same eval together
182
- eval_name = eval_result.eval_name
183
- if eval_name in eval_results.keys():
184
- eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
185
- else:
186
- eval_results[eval_name] = eval_result
187
-
188
- results = []
189
- for v in eval_results.values():
190
- try:
191
- v.to_dict() # we test if the dict version is complete
192
- results.append(v)
193
- except KeyError: # not all eval values present
194
- continue
195
-
196
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/logging.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from pathlib import Path
3
+
4
+ proj_dir = Path(__file__).parents[1]
5
+
6
+ log_file = proj_dir/"output.log"
7
+
8
+
9
+ import logging
10
+
11
+
12
+ def setup_logger(name: str):
13
+ logger = logging.getLogger(name)
14
+ logger.setLevel(logging.INFO)
15
+
16
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
17
+
18
+ # Create a file handler to write logs to a file
19
+ file_handler = logging.FileHandler(log_file)
20
+ file_handler.setLevel(logging.INFO)
21
+ file_handler.setFormatter(formatter)
22
+ logger.addHandler(file_handler)
23
+
24
+ return logger
25
+
26
+
27
+ def configure_root_logger():
28
+ # Configure the root logger
29
+ logging.basicConfig(level=logging.INFO)
30
+ root_logger = logging.getLogger()
31
+
32
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
33
+
34
+ file_handler = logging.FileHandler(log_file)
35
+ file_handler.setLevel(logging.INFO)
36
+ file_handler.setFormatter(formatter)
37
+
38
+ root_logger.addHandler(file_handler)
src/populate.py DELETED
@@ -1,58 +0,0 @@
1
- import json
2
- import os
3
-
4
- import pandas as pd
5
-
6
- from src.display.formatting import has_no_nan_values, make_clickable_model
7
- from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
- from src.leaderboard.read_evals import get_raw_eval_results
9
-
10
-
11
- def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
- """Creates a dataframe from all the individual experiment results"""
13
- raw_data = get_raw_eval_results(results_path, requests_path)
14
- all_data_json = [v.to_dict() for v in raw_data]
15
-
16
- df = pd.DataFrame.from_records(all_data_json)
17
- df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
18
- df = df[cols].round(decimals=2)
19
-
20
- # filter out if any of the benchmarks have not been produced
21
- df = df[has_no_nan_values(df, benchmark_cols)]
22
- return df
23
-
24
-
25
- def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
26
- """Creates the different dataframes for the evaluation queues requestes"""
27
- entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
28
- all_evals = []
29
-
30
- for entry in entries:
31
- if ".json" in entry:
32
- file_path = os.path.join(save_path, entry)
33
- with open(file_path) as fp:
34
- data = json.load(fp)
35
-
36
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
37
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
38
-
39
- all_evals.append(data)
40
- elif ".md" not in entry:
41
- # this is a folder
42
- sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
43
- for sub_entry in sub_entries:
44
- file_path = os.path.join(save_path, entry, sub_entry)
45
- with open(file_path) as fp:
46
- data = json.load(fp)
47
-
48
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
49
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
50
- all_evals.append(data)
51
-
52
- pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
53
- running_list = [e for e in all_evals if e["status"] == "RUNNING"]
54
- finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
55
- df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
56
- df_running = pd.DataFrame.from_records(running_list, columns=cols)
57
- df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
58
- return df_finished[cols], df_running[cols], df_pending[cols]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/check_validity.py DELETED
@@ -1,99 +0,0 @@
1
- import json
2
- import os
3
- import re
4
- from collections import defaultdict
5
- from datetime import datetime, timedelta, timezone
6
-
7
- import huggingface_hub
8
- from huggingface_hub import ModelCard
9
- from huggingface_hub.hf_api import ModelInfo
10
- from transformers import AutoConfig
11
- from transformers.models.auto.tokenization_auto import AutoTokenizer
12
-
13
- def check_model_card(repo_id: str) -> tuple[bool, str]:
14
- """Checks if the model card and license exist and have been filled"""
15
- try:
16
- card = ModelCard.load(repo_id)
17
- except huggingface_hub.utils.EntryNotFoundError:
18
- return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
-
20
- # Enforce license metadata
21
- if card.data.license is None:
22
- if not ("license_name" in card.data and "license_link" in card.data):
23
- return False, (
24
- "License not found. Please add a license to your model card using the `license` metadata or a"
25
- " `license_name`/`license_link` pair."
26
- )
27
-
28
- # Enforce card content
29
- if len(card.text) < 200:
30
- return False, "Please add a description to your model card, it is too short."
31
-
32
- return True, ""
33
-
34
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
- """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
- try:
37
- config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
38
- if test_tokenizer:
39
- try:
40
- tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
41
- except ValueError as e:
42
- return (
43
- False,
44
- f"uses a tokenizer which is not in a transformers release: {e}",
45
- None
46
- )
47
- except Exception as e:
48
- return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
49
- return True, None, config
50
-
51
- except ValueError:
52
- return (
53
- False,
54
- "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
55
- None
56
- )
57
-
58
- except Exception as e:
59
- return False, "was not found on hub!", None
60
-
61
-
62
- def get_model_size(model_info: ModelInfo, precision: str):
63
- """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
64
- try:
65
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
66
- except (AttributeError, TypeError):
67
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
68
-
69
- size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
70
- model_size = size_factor * model_size
71
- return model_size
72
-
73
- def get_model_arch(model_info: ModelInfo):
74
- """Gets the model architecture from the configuration"""
75
- return model_info.config.get("architectures", "Unknown")
76
-
77
- def already_submitted_models(requested_models_dir: str) -> set[str]:
78
- """Gather a list of already submitted models to avoid duplicates"""
79
- depth = 1
80
- file_names = []
81
- users_to_submission_dates = defaultdict(list)
82
-
83
- for root, _, files in os.walk(requested_models_dir):
84
- current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
85
- if current_depth == depth:
86
- for file in files:
87
- if not file.endswith(".json"):
88
- continue
89
- with open(os.path.join(root, file), "r") as f:
90
- info = json.load(f)
91
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
-
93
- # Select organisation
94
- if info["model"].count("/") == 0 or "submitted_time" not in info:
95
- continue
96
- organisation, _ = info["model"].split("/")
97
- users_to_submission_dates[organisation].append(info["submitted_time"])
98
-
99
- return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/submit.py DELETED
@@ -1,119 +0,0 @@
1
- import json
2
- import os
3
- from datetime import datetime, timezone
4
-
5
- from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
- from src.submission.check_validity import (
8
- already_submitted_models,
9
- check_model_card,
10
- get_model_size,
11
- is_model_on_hub,
12
- )
13
-
14
- REQUESTED_MODELS = None
15
- USERS_TO_SUBMISSION_DATES = None
16
-
17
- def add_new_eval(
18
- model: str,
19
- base_model: str,
20
- revision: str,
21
- precision: str,
22
- weight_type: str,
23
- model_type: str,
24
- ):
25
- global REQUESTED_MODELS
26
- global USERS_TO_SUBMISSION_DATES
27
- if not REQUESTED_MODELS:
28
- REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
-
30
- user_name = ""
31
- model_path = model
32
- if "/" in model:
33
- user_name = model.split("/")[0]
34
- model_path = model.split("/")[1]
35
-
36
- precision = precision.split(" ")[0]
37
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
-
39
- if model_type is None or model_type == "":
40
- return styled_error("Please select a model type.")
41
-
42
- # Does the model actually exist?
43
- if revision == "":
44
- revision = "main"
45
-
46
- # Is the model on the hub?
47
- if weight_type in ["Delta", "Adapter"]:
48
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
- if not base_model_on_hub:
50
- return styled_error(f'Base model "{base_model}" {error}')
51
-
52
- if not weight_type == "Adapter":
53
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
- if not model_on_hub:
55
- return styled_error(f'Model "{model}" {error}')
56
-
57
- # Is the model info correctly filled?
58
- try:
59
- model_info = API.model_info(repo_id=model, revision=revision)
60
- except Exception:
61
- return styled_error("Could not get your model information. Please fill it up properly.")
62
-
63
- model_size = get_model_size(model_info=model_info, precision=precision)
64
-
65
- # Were the model card and license filled?
66
- try:
67
- license = model_info.cardData["license"]
68
- except Exception:
69
- return styled_error("Please select a license for your model")
70
-
71
- modelcard_OK, error_msg = check_model_card(model)
72
- if not modelcard_OK:
73
- return styled_error(error_msg)
74
-
75
- # Seems good, creating the eval
76
- print("Adding new eval")
77
-
78
- eval_entry = {
79
- "model": model,
80
- "base_model": base_model,
81
- "revision": revision,
82
- "precision": precision,
83
- "weight_type": weight_type,
84
- "status": "PENDING",
85
- "submitted_time": current_time,
86
- "model_type": model_type,
87
- "likes": model_info.likes,
88
- "params": model_size,
89
- "license": license,
90
- "private": False,
91
- }
92
-
93
- # Check for duplicate submission
94
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
- return styled_warning("This model has been already submitted.")
96
-
97
- print("Creating eval file")
98
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
- os.makedirs(OUT_DIR, exist_ok=True)
100
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
-
102
- with open(out_path, "w") as f:
103
- f.write(json.dumps(eval_entry))
104
-
105
- print("Uploading eval file")
106
- API.upload_file(
107
- path_or_fileobj=out_path,
108
- path_in_repo=out_path.split("eval-queue/")[1],
109
- repo_id=QUEUE_REPO,
110
- repo_type="dataset",
111
- commit_message=f"Add {model} to eval queue",
112
- )
113
-
114
- # Remove the local file
115
- os.remove(out_path)
116
-
117
- return styled_message(
118
- "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
119
- )