Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
add read token
Browse files- src/envs.py +1 -0
- src/leaderboard/read_evals.py +2 -2
- src/submission/submit.py +5 -3
src/envs.py
CHANGED
@@ -4,6 +4,7 @@ from huggingface_hub import HfApi
|
|
4 |
|
5 |
# clone / pull the lmeh eval data
|
6 |
TOKEN = os.environ.get("TOKEN", None)
|
|
|
7 |
|
8 |
OWNER = "PatronusAI"
|
9 |
REPO_ID = f"{OWNER}/leaderboard"
|
|
|
4 |
|
5 |
# clone / pull the lmeh eval data
|
6 |
TOKEN = os.environ.get("TOKEN", None)
|
7 |
+
READ_TOKEN = os.environ.get("READ_TOKEN", None)
|
8 |
|
9 |
OWNER = "PatronusAI"
|
10 |
REPO_ID = f"{OWNER}/leaderboard"
|
src/leaderboard/read_evals.py
CHANGED
@@ -7,7 +7,7 @@ from dataclasses import dataclass
|
|
7 |
import dateutil
|
8 |
import numpy as np
|
9 |
|
10 |
-
from src.envs import
|
11 |
from src.display.formatting import make_clickable_model
|
12 |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
13 |
from src.submission.check_validity import is_model_on_hub
|
@@ -57,7 +57,7 @@ class EvalResult:
|
|
57 |
full_model = "/".join(org_and_model)
|
58 |
|
59 |
still_on_hub, _, model_config = is_model_on_hub(
|
60 |
-
full_model, config.get("model_sha", "main"),
|
61 |
)
|
62 |
architecture = "?"
|
63 |
if model_config is not None:
|
|
|
7 |
import dateutil
|
8 |
import numpy as np
|
9 |
|
10 |
+
from src.envs import READ_TOKEN
|
11 |
from src.display.formatting import make_clickable_model
|
12 |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
13 |
from src.submission.check_validity import is_model_on_hub
|
|
|
57 |
full_model = "/".join(org_and_model)
|
58 |
|
59 |
still_on_hub, _, model_config = is_model_on_hub(
|
60 |
+
full_model, config.get("model_sha", "main"), READ_TOKEN, trust_remote_code=True, test_tokenizer=False
|
61 |
)
|
62 |
architecture = "?"
|
63 |
if model_config is not None:
|
src/submission/submit.py
CHANGED
@@ -3,7 +3,7 @@ import os
|
|
3 |
from datetime import datetime, timezone
|
4 |
|
5 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
-
from src.envs import API, EVAL_REQUESTS_PATH,
|
7 |
from src.submission.check_validity import (
|
8 |
already_submitted_models,
|
9 |
check_model_card,
|
@@ -45,12 +45,14 @@ def add_new_eval(
|
|
45 |
|
46 |
# Is the model on the hub?
|
47 |
if weight_type in ["Delta", "Adapter"]:
|
48 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=
|
|
|
49 |
if not base_model_on_hub:
|
50 |
return styled_error(f'Base model "{base_model}" {error}')
|
51 |
|
52 |
if not weight_type == "Adapter":
|
53 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=
|
|
|
54 |
if not model_on_hub:
|
55 |
return styled_error(f'Model "{model}" {error}')
|
56 |
|
|
|
3 |
from datetime import datetime, timezone
|
4 |
|
5 |
from src.display.formatting import styled_error, styled_message, styled_warning
|
6 |
+
from src.envs import API, EVAL_REQUESTS_PATH, READ_TOKEN, QUEUE_REPO
|
7 |
from src.submission.check_validity import (
|
8 |
already_submitted_models,
|
9 |
check_model_card,
|
|
|
45 |
|
46 |
# Is the model on the hub?
|
47 |
if weight_type in ["Delta", "Adapter"]:
|
48 |
+
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=READ_TOKEN,
|
49 |
+
test_tokenizer=True)
|
50 |
if not base_model_on_hub:
|
51 |
return styled_error(f'Base model "{base_model}" {error}')
|
52 |
|
53 |
if not weight_type == "Adapter":
|
54 |
+
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=READ_TOKEN,
|
55 |
+
test_tokenizer=True)
|
56 |
if not model_on_hub:
|
57 |
return styled_error(f'Model "{model}" {error}')
|
58 |
|