bugfix
Browse files- src/display/utils.py +7 -7
src/display/utils.py
CHANGED
@@ -28,6 +28,8 @@ auto_eval_column_dict = []
|
|
28 |
# Init
|
29 |
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
30 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
|
|
|
|
31 |
|
32 |
# Scores
|
33 |
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
@@ -44,8 +46,6 @@ auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Arch
|
|
44 |
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
45 |
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
46 |
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
47 |
-
auto_eval_column_dict.append(["training_codebase", ColumnContent, ColumnContent("Training code", "str", False, False)])
|
48 |
-
auto_eval_column_dict.append(["training_data", ColumnContent, ColumnContent("Training data", "str", False, False)])
|
49 |
|
50 |
# We use make dataclass to dynamically fill the scores from Tasks
|
51 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
@@ -94,8 +94,8 @@ class ModelType(Enum):
|
|
94 |
|
95 |
|
96 |
class DisclosedType(Enum):
|
97 |
-
D = ModelDetails(name="disclosed", symbol="
|
98 |
-
UD = ModelDetails(name="undisclosed", symbol="
|
99 |
Unknown = ModelDetails(name="", symbol="?")
|
100 |
|
101 |
def to_str(self, separator=" "):
|
@@ -103,10 +103,10 @@ class DisclosedType(Enum):
|
|
103 |
|
104 |
@staticmethod
|
105 |
def from_str(type):
|
106 |
-
if "
|
107 |
-
return DisclosedType.D
|
108 |
-
if "undisclosed" in type or "⭕" in type:
|
109 |
return DisclosedType.UD
|
|
|
|
|
110 |
return DisclosedType.Unknown
|
111 |
|
112 |
|
|
|
28 |
# Init
|
29 |
auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
30 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
|
31 |
+
auto_eval_column_dict.append(["training_codebase", ColumnContent, ColumnContent("Code", "str", True, False)])
|
32 |
+
auto_eval_column_dict.append(["training_data", ColumnContent, ColumnContent("Data", "str", True, False)])
|
33 |
|
34 |
# Scores
|
35 |
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
|
|
|
46 |
auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
|
47 |
auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
48 |
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
|
|
|
|
49 |
|
50 |
# We use make dataclass to dynamically fill the scores from Tasks
|
51 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
|
|
94 |
|
95 |
|
96 |
class DisclosedType(Enum):
|
97 |
+
D = ModelDetails(name="disclosed", symbol="👁")
|
98 |
+
UD = ModelDetails(name="undisclosed", symbol="🙈")
|
99 |
Unknown = ModelDetails(name="", symbol="?")
|
100 |
|
101 |
def to_str(self, separator=" "):
|
|
|
103 |
|
104 |
@staticmethod
|
105 |
def from_str(type):
|
106 |
+
if "undisclosed" in type:
|
|
|
|
|
107 |
return DisclosedType.UD
|
108 |
+
if "disclosed" in type:
|
109 |
+
return DisclosedType.D
|
110 |
return DisclosedType.Unknown
|
111 |
|
112 |
|