Spaces:
Build error
Build error
Update classification_report
Browse files- classification_report.py +57 -16
classification_report.py
CHANGED
@@ -12,26 +12,67 @@
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
""" classification_report metric. """
|
15 |
-
from
|
16 |
-
|
17 |
-
import sklearn
|
18 |
import evaluate
|
19 |
import datasets
|
20 |
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
class ClassificationReportModule(evaluate.Metric):
|
23 |
-
"""
|
24 |
-
Local metric used for classification task based on sklearn classiication_report().
|
25 |
-
a classification report is a simple tool to compute multiple metrics such as:
|
26 |
-
- accuracy
|
27 |
-
- precision/recall/f1-score by class.
|
28 |
-
- mean/weighted average.
|
29 |
-
"""
|
30 |
def _info(self) -> evaluate.MetricInfo:
|
31 |
return evaluate.MetricInfo(
|
32 |
-
description=
|
33 |
-
citation=
|
34 |
-
inputs_description=
|
35 |
features=datasets.Features(
|
36 |
{
|
37 |
"predictions": datasets.Sequence(datasets.Value("int32")),
|
@@ -43,8 +84,8 @@ class ClassificationReportModule(evaluate.Metric):
|
|
43 |
"references": datasets.Value("int32"),
|
44 |
}
|
45 |
),
|
46 |
-
reference_urls=[""],
|
47 |
)
|
48 |
|
49 |
-
def _compute(self,
|
50 |
-
return
|
|
|
12 |
# See the License for the specific language governing permissions and
|
13 |
# limitations under the License.
|
14 |
""" classification_report metric. """
|
15 |
+
from sklearn.metrics import classification_report
|
|
|
|
|
16 |
import evaluate
|
17 |
import datasets
|
18 |
|
19 |
|
20 |
+
_DESCRIPTION = """
|
21 |
+
Build a text report showing the main classification metrics that are accuracy, precision, recall and F1.
|
22 |
+
"""
|
23 |
+
|
24 |
+
_KWARGS_DESCRIPTION = """
|
25 |
+
Args:
|
26 |
+
predictions (`list` of `int`): Predicted labels.
|
27 |
+
references (`list` of `int`): Ground truth labels.
|
28 |
+
labels (`list` of `int`): Optional list of label indices to include in the report. Defaults to None.
|
29 |
+
target_names: (`list` of `str`): Optional display names matching the labels (same order). Defaults to None.
|
30 |
+
sample_weight (`list` of `float`): Sample weights. Defaults to None.
|
31 |
+
digits (`int`): Number of digits for formatting output floating point values. When output_dict is True, this will be ignored and the returned values will not be rounded. Defaults to 2.
|
32 |
+
zero_division (`warn`, `0` or `1`): Sets the value to return when there is a zero division. If set to “warn”, this acts as 0, but warnings are also raised. Defaults to `warn`.
|
33 |
+
Returns:
|
34 |
+
report (`str` or `dict`): Text summary of the precision, recall, F1 score for each class. Dictionary returned if output_dict is True. Dictionary has the following structure:
|
35 |
+
```
|
36 |
+
{'label 1': {'precision':0.5,
|
37 |
+
'recall':1.0,
|
38 |
+
'f1-score':0.67,
|
39 |
+
'support':1},
|
40 |
+
'label 2': { ... },
|
41 |
+
...
|
42 |
+
}
|
43 |
+
```
|
44 |
+
The reported averages include macro average (averaging the unweighted mean per label), weighted average (averaging the support-weighted mean per label), and sample average (only for multilabel classification). Micro average (averaging the total true positives, false negatives and false positives) is only shown for multi-label or multi-class with a subset of classes, because it corresponds to accuracy otherwise and would be the same for all metrics. See also precision_recall_fscore_support for more details on averages.
|
45 |
+
Note that in binary classification, recall of the positive class is also known as “sensitivity”; recall of the negative class is “specificity”.
|
46 |
+
Examples:
|
47 |
+
Simple example
|
48 |
+
>>> accuracy_metric = evaluate.load("bstrai/classification_report")
|
49 |
+
>>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0])
|
50 |
+
>>> print(results)
|
51 |
+
{'0': {'precision': 0.5, 'recall': 0.5, 'f1-score': 0.5, 'support': 2}, '1': {'precision': 0.6666666666666666, 'recall': 1.0, 'f1-score': 0.8, 'support': 2}, '2': {'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0, 'support': 2}, 'accuracy': 0.5, 'macro avg': {'precision': 0.38888888888888884, 'recall': 0.5, 'f1-score': 0.43333333333333335, 'support': 6}, 'weighted avg': {'precision': 0.38888888888888884, 'recall': 0.5, 'f1-score': 0.43333333333333335, 'support': 6}}
|
52 |
+
"""
|
53 |
+
|
54 |
+
|
55 |
+
_CITATION = """
|
56 |
+
@article{scikit-learn,
|
57 |
+
title={Scikit-learn: Machine Learning in {P}ython},
|
58 |
+
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
|
59 |
+
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
|
60 |
+
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
|
61 |
+
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
|
62 |
+
journal={Journal of Machine Learning Research},
|
63 |
+
volume={12},
|
64 |
+
pages={2825--2830},
|
65 |
+
year={2011}
|
66 |
+
}
|
67 |
+
"""
|
68 |
+
|
69 |
+
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
70 |
class ClassificationReportModule(evaluate.Metric):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
def _info(self) -> evaluate.MetricInfo:
|
72 |
return evaluate.MetricInfo(
|
73 |
+
description=_DESCRIPTION,
|
74 |
+
citation=_CITATION,
|
75 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
76 |
features=datasets.Features(
|
77 |
{
|
78 |
"predictions": datasets.Sequence(datasets.Value("int32")),
|
|
|
84 |
"references": datasets.Value("int32"),
|
85 |
}
|
86 |
),
|
87 |
+
reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html"],
|
88 |
)
|
89 |
|
90 |
+
def _compute(self, predictions, references, labels=None, target_names=None, sample_weight=None, digits=2, zero_division="warn") -> dict:
|
91 |
+
return classification_report(y_true=references, y_pred=predictions, labels=labels, target_names=target_names, sample_weight=sample_weight, digits=digits, output_dict=True, zero_division=zero_division)
|