Spaces:
Sleeping
Sleeping
Clémentine
commited on
Commit
•
23f614e
1
Parent(s):
459932d
simplified env vars
Browse files- README.md +0 -1
- app.py +4 -3
- src/envs.py +2 -2
- src/submission/submit.py +2 -2
README.md
CHANGED
@@ -15,7 +15,6 @@ space_ci:
|
|
15 |
private: true
|
16 |
secrets:
|
17 |
- HF_TOKEN
|
18 |
-
- H4_TOKEN
|
19 |
tags:
|
20 |
- leaderboard
|
21 |
short_description: Track, rank and evaluate open LLMs and chatbots
|
|
|
15 |
private: true
|
16 |
secrets:
|
17 |
- HF_TOKEN
|
|
|
18 |
tags:
|
19 |
- leaderboard
|
20 |
short_description: Track, rank and evaluate open LLMs and chatbots
|
app.py
CHANGED
@@ -32,9 +32,10 @@ from src.envs import (
|
|
32 |
API,
|
33 |
EVAL_REQUESTS_PATH,
|
34 |
AGGREGATED_REPO,
|
35 |
-
|
36 |
QUEUE_REPO,
|
37 |
REPO_ID,
|
|
|
38 |
)
|
39 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
40 |
from src.submission.submit import add_new_eval
|
@@ -48,7 +49,7 @@ enable_space_ci()
|
|
48 |
|
49 |
|
50 |
def restart_space():
|
51 |
-
API.restart_space(repo_id=REPO_ID, token=
|
52 |
|
53 |
|
54 |
def time_diff_wrapper(func):
|
@@ -98,7 +99,7 @@ def init_space(full_init: bool = True):
|
|
98 |
restart_space()
|
99 |
|
100 |
# Always retrieve the leaderboard DataFrame
|
101 |
-
leaderboard_dataset = datasets.load_dataset(AGGREGATED_REPO, "default", split="train")
|
102 |
leaderboard_df = get_leaderboard_df(
|
103 |
leaderboard_dataset=leaderboard_dataset,
|
104 |
cols=COLS,
|
|
|
32 |
API,
|
33 |
EVAL_REQUESTS_PATH,
|
34 |
AGGREGATED_REPO,
|
35 |
+
HF_TOKEN,
|
36 |
QUEUE_REPO,
|
37 |
REPO_ID,
|
38 |
+
HF_HOME,
|
39 |
)
|
40 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
41 |
from src.submission.submit import add_new_eval
|
|
|
49 |
|
50 |
|
51 |
def restart_space():
|
52 |
+
API.restart_space(repo_id=REPO_ID, token=HF_TOKEN)
|
53 |
|
54 |
|
55 |
def time_diff_wrapper(func):
|
|
|
99 |
restart_space()
|
100 |
|
101 |
# Always retrieve the leaderboard DataFrame
|
102 |
+
leaderboard_dataset = datasets.load_dataset(AGGREGATED_REPO, "default", split="train", cache_dir=HF_HOME)
|
103 |
leaderboard_df = get_leaderboard_df(
|
104 |
leaderboard_dataset=leaderboard_dataset,
|
105 |
cols=COLS,
|
src/envs.py
CHANGED
@@ -2,7 +2,7 @@ import os
|
|
2 |
from huggingface_hub import HfApi
|
3 |
|
4 |
# clone / pull the lmeh eval data
|
5 |
-
|
6 |
|
7 |
REPO_ID = "HuggingFaceH4/open_llm_leaderboard"
|
8 |
QUEUE_REPO = "open-llm-leaderboard/requests"
|
@@ -29,4 +29,4 @@ RATE_LIMIT_PERIOD = 7
|
|
29 |
RATE_LIMIT_QUOTA = 5
|
30 |
HAS_HIGHER_RATE_LIMIT = ["TheBloke"]
|
31 |
|
32 |
-
API = HfApi(token=
|
|
|
2 |
from huggingface_hub import HfApi
|
3 |
|
4 |
# clone / pull the lmeh eval data
|
5 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
6 |
|
7 |
REPO_ID = "HuggingFaceH4/open_llm_leaderboard"
|
8 |
QUEUE_REPO = "open-llm-leaderboard/requests"
|
|
|
29 |
RATE_LIMIT_QUOTA = 5
|
30 |
HAS_HIGHER_RATE_LIMIT = ["TheBloke"]
|
31 |
|
32 |
+
API = HfApi(token=HF_TOKEN)
|
src/submission/submit.py
CHANGED
@@ -8,7 +8,7 @@ from src.display.formatting import styled_error, styled_message, styled_warning
|
|
8 |
from src.envs import (
|
9 |
API,
|
10 |
EVAL_REQUESTS_PATH,
|
11 |
-
|
12 |
QUEUE_REPO,
|
13 |
RATE_LIMIT_PERIOD,
|
14 |
RATE_LIMIT_QUOTA,
|
@@ -76,7 +76,7 @@ def add_new_eval(
|
|
76 |
# Is the model on the hub?
|
77 |
if weight_type in ["Delta", "Adapter"]:
|
78 |
base_model_on_hub, error, _ = is_model_on_hub(
|
79 |
-
model_name=base_model, revision=revision, token=
|
80 |
)
|
81 |
if not base_model_on_hub:
|
82 |
return styled_error(f'Base model "{base_model}" {error}')
|
|
|
8 |
from src.envs import (
|
9 |
API,
|
10 |
EVAL_REQUESTS_PATH,
|
11 |
+
HF_TOKEN,
|
12 |
QUEUE_REPO,
|
13 |
RATE_LIMIT_PERIOD,
|
14 |
RATE_LIMIT_QUOTA,
|
|
|
76 |
# Is the model on the hub?
|
77 |
if weight_type in ["Delta", "Adapter"]:
|
78 |
base_model_on_hub, error, _ = is_model_on_hub(
|
79 |
+
model_name=base_model, revision=revision, token=HF_TOKEN, test_tokenizer=True
|
80 |
)
|
81 |
if not base_model_on_hub:
|
82 |
return styled_error(f'Base model "{base_model}" {error}')
|