Spaces:
Running
Running
Mihail Yonchev
commited on
Commit
•
ba85782
1
Parent(s):
71a5d39
chore: silently add model_report
Browse files- src/display/utils.py +3 -1
- src/leaderboard/read_evals.py +5 -0
src/display/utils.py
CHANGED
@@ -25,7 +25,9 @@ class ColumnContent:
|
|
25 |
|
26 |
## Leaderboard columns
|
27 |
auto_eval_column_dict = [["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)],
|
28 |
-
["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)]
|
|
|
|
|
29 |
# Init
|
30 |
# Scores
|
31 |
for task in Tasks:
|
|
|
25 |
|
26 |
## Leaderboard columns
|
27 |
auto_eval_column_dict = [["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)],
|
28 |
+
["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)],
|
29 |
+
["model_report", ColumnContent, ColumnContent("Full Report", "markdown", False, never_hidden=False)]
|
30 |
+
]
|
31 |
# Init
|
32 |
# Scores
|
33 |
for task in Tasks:
|
src/leaderboard/read_evals.py
CHANGED
@@ -11,6 +11,8 @@ from src.display.formatting import make_clickable_model
|
|
11 |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
12 |
from src.submission.check_validity import is_model_on_hub
|
13 |
|
|
|
|
|
14 |
|
15 |
@dataclass
|
16 |
class EvalResult:
|
@@ -29,6 +31,7 @@ class EvalResult:
|
|
29 |
num_params: int = 0
|
30 |
date: str = "" # submission date of request file
|
31 |
still_on_hub: bool = False
|
|
|
32 |
|
33 |
@classmethod
|
34 |
def init_from_json_file(self, json_filepath):
|
@@ -89,6 +92,7 @@ class EvalResult:
|
|
89 |
org=org,
|
90 |
model=model,
|
91 |
results=results,
|
|
|
92 |
# precision=precision,
|
93 |
revision=config.get("model_sha", ""),
|
94 |
still_on_hub=still_on_hub,
|
@@ -126,6 +130,7 @@ class EvalResult:
|
|
126 |
"eval_name": self.eval_name, # not a column, just a save name,
|
127 |
# AutoEvalColumn.precision.name: self.precision.value.name,
|
128 |
AutoEvalColumn.model_type.name: self.model_type.value.name,
|
|
|
129 |
AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
130 |
# AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
131 |
# AutoEvalColumn.architecture.name: self.architecture,
|
|
|
11 |
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
|
12 |
from src.submission.check_validity import is_model_on_hub
|
13 |
|
14 |
+
def report_hyperlink(link):
|
15 |
+
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">🔗 Read Report</a>' if link else "N/A"
|
16 |
|
17 |
@dataclass
|
18 |
class EvalResult:
|
|
|
31 |
num_params: int = 0
|
32 |
date: str = "" # submission date of request file
|
33 |
still_on_hub: bool = False
|
34 |
+
model_report: str = "",
|
35 |
|
36 |
@classmethod
|
37 |
def init_from_json_file(self, json_filepath):
|
|
|
92 |
org=org,
|
93 |
model=model,
|
94 |
results=results,
|
95 |
+
model_report=config.get("model_report", ""),
|
96 |
# precision=precision,
|
97 |
revision=config.get("model_sha", ""),
|
98 |
still_on_hub=still_on_hub,
|
|
|
130 |
"eval_name": self.eval_name, # not a column, just a save name,
|
131 |
# AutoEvalColumn.precision.name: self.precision.value.name,
|
132 |
AutoEvalColumn.model_type.name: self.model_type.value.name,
|
133 |
+
AutoEvalColumn.model_report.name: report_hyperlink(self.model_report),
|
134 |
AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
135 |
# AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
136 |
# AutoEvalColumn.architecture.name: self.architecture,
|