Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
add n-shot param
Browse files- src/display/utils.py +1 -0
- src/leaderboard/read_evals.py +7 -2
- src/populate.py +0 -2
src/display/utils.py
CHANGED
@@ -40,6 +40,7 @@ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B
|
|
40 |
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
41 |
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
42 |
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
|
|
43 |
# Dummy column for the search bar (hidden by the custom CSS)
|
44 |
auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
|
45 |
|
|
|
40 |
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
41 |
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
42 |
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
43 |
+
auto_eval_column_dict.append(["n_shot", ColumnContent, ColumnContent("n_shot", "number", False)])
|
44 |
# Dummy column for the search bar (hidden by the custom CSS)
|
45 |
auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
|
46 |
|
src/leaderboard/read_evals.py
CHANGED
@@ -31,6 +31,7 @@ class EvalResult:
|
|
31 |
num_params: int = 0
|
32 |
date: str = "" # submission date of request file
|
33 |
still_on_hub: bool = False
|
|
|
34 |
|
35 |
@classmethod
|
36 |
def init_from_json_file(self, json_filepath, n_shot_num):
|
@@ -90,7 +91,8 @@ class EvalResult:
|
|
90 |
precision=precision,
|
91 |
revision= config.get("model_sha", ""),
|
92 |
still_on_hub=still_on_hub,
|
93 |
-
architecture=architecture
|
|
|
94 |
)
|
95 |
|
96 |
def update_with_request_file(self, requests_path):
|
@@ -200,7 +202,10 @@ class EvalResult:
|
|
200 |
except KeyError:
|
201 |
print(f"Could not find still on hub")
|
202 |
|
203 |
-
|
|
|
|
|
|
|
204 |
|
205 |
for task in Tasks:
|
206 |
try:
|
|
|
31 |
num_params: int = 0
|
32 |
date: str = "" # submission date of request file
|
33 |
still_on_hub: bool = False
|
34 |
+
n_shot: int = 0
|
35 |
|
36 |
@classmethod
|
37 |
def init_from_json_file(self, json_filepath, n_shot_num):
|
|
|
91 |
precision=precision,
|
92 |
revision= config.get("model_sha", ""),
|
93 |
still_on_hub=still_on_hub,
|
94 |
+
architecture=architecture,
|
95 |
+
n_shot=n_shot_num
|
96 |
)
|
97 |
|
98 |
def update_with_request_file(self, requests_path):
|
|
|
202 |
except KeyError:
|
203 |
print(f"Could not find still on hub")
|
204 |
|
205 |
+
try:
|
206 |
+
data_dict[AutoEvalColumn.n_shot.name] = self.n_shot
|
207 |
+
except KeyError:
|
208 |
+
print(f"Could not find still on hub")
|
209 |
|
210 |
for task in Tasks:
|
211 |
try:
|
src/populate.py
CHANGED
@@ -17,9 +17,7 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
17 |
df = df[cols].round(decimals=2)
|
18 |
|
19 |
# filter out if any of the benchmarks have not been produced
|
20 |
-
print('X1', df)
|
21 |
df2 = df[has_no_nan_values(df, benchmark_cols)]
|
22 |
-
print('X2', df2)
|
23 |
return raw_data, df
|
24 |
|
25 |
|
|
|
17 |
df = df[cols].round(decimals=2)
|
18 |
|
19 |
# filter out if any of the benchmarks have not been produced
|
|
|
20 |
df2 = df[has_no_nan_values(df, benchmark_cols)]
|
|
|
21 |
return raw_data, df
|
22 |
|
23 |
|