Spaces:
Runtime error
Runtime error
# Copyright 2022 The HuggingFace Evaluate Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Mean IoU (Intersection-over-Union) metric.""" | |
from typing import Dict, Optional | |
import datasets | |
import numpy as np | |
import evaluate | |
_DESCRIPTION = """ | |
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union | |
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, | |
the mean IoU of the image is calculated by taking the IoU of each class and averaging them. | |
""" | |
_KWARGS_DESCRIPTION = """ | |
Args: | |
predictions (`List[ndarray]`): | |
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. | |
references (`List[ndarray]`): | |
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. | |
num_labels (`int`): | |
Number of classes (categories). | |
ignore_index (`int`): | |
Index that will be ignored during evaluation. | |
nan_to_num (`int`, *optional*): | |
If specified, NaN values will be replaced by the number defined by the user. | |
label_map (`dict`, *optional*): | |
If specified, dictionary mapping old label indices to new label indices. | |
reduce_labels (`bool`, *optional*, defaults to `False`): | |
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, | |
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. | |
Returns: | |
`Dict[str, float | ndarray]` comprising various elements: | |
- *mean_iou* (`float`): | |
Mean Intersection-over-Union (IoU averaged over all categories). | |
- *mean_accuracy* (`float`): | |
Mean accuracy (averaged over all categories). | |
- *overall_accuracy* (`float`): | |
Overall accuracy on all images. | |
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): | |
Per category accuracy. | |
- *per_category_iou* (`ndarray` of shape `(num_labels,)`): | |
Per category IoU. | |
Examples: | |
>>> import numpy as np | |
>>> mean_iou = evaluate.load("mean_iou") | |
>>> # suppose one has 3 different segmentation maps predicted | |
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) | |
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) | |
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) | |
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) | |
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) | |
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) | |
>>> predicted = [predicted_1, predicted_2, predicted_3] | |
>>> ground_truth = [actual_1, actual_2, actual_3] | |
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) | |
>>> print(results) # doctest: +NORMALIZE_WHITESPACE | |
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} | |
""" | |
_CITATION = """\ | |
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, | |
author = {{MMSegmentation Contributors}}, | |
license = {Apache-2.0}, | |
month = {7}, | |
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, | |
url = {https://github.com/open-mmlab/mmsegmentation}, | |
year = {2020} | |
}""" | |
def intersect_and_union( | |
pred_label, | |
label, | |
num_labels, | |
ignore_index: bool, | |
label_map: Optional[Dict[int, int]] = None, | |
reduce_labels: bool = False, | |
): | |
"""Calculate intersection and Union. | |
Args: | |
pred_label (`ndarray`): | |
Prediction segmentation map of shape (height, width). | |
label (`ndarray`): | |
Ground truth segmentation map of shape (height, width). | |
num_labels (`int`): | |
Number of categories. | |
ignore_index (`int`): | |
Index that will be ignored during evaluation. | |
label_map (`dict`, *optional*): | |
Mapping old labels to new labels. The parameter will work only when label is str. | |
reduce_labels (`bool`, *optional*, defaults to `False`): | |
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, | |
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. | |
Returns: | |
area_intersect (`ndarray`): | |
The intersection of prediction and ground truth histogram on all classes. | |
area_union (`ndarray`): | |
The union of prediction and ground truth histogram on all classes. | |
area_pred_label (`ndarray`): | |
The prediction histogram on all classes. | |
area_label (`ndarray`): | |
The ground truth histogram on all classes. | |
""" | |
if label_map is not None: | |
for old_id, new_id in label_map.items(): | |
label[label == old_id] = new_id | |
# turn into Numpy arrays | |
pred_label = np.array(pred_label) | |
label = np.array(label) | |
if reduce_labels: | |
label[label == 0] = 255 | |
label = label - 1 | |
label[label == 254] = 255 | |
mask = label != ignore_index | |
mask = np.not_equal(label, ignore_index) | |
pred_label = pred_label[mask] | |
label = np.array(label)[mask] | |
intersect = pred_label[pred_label == label] | |
area_intersect = np.histogram(intersect, bins=num_labels, range=(0, num_labels - 1))[0] | |
area_pred_label = np.histogram(pred_label, bins=num_labels, range=(0, num_labels - 1))[0] | |
area_label = np.histogram(label, bins=num_labels, range=(0, num_labels - 1))[0] | |
area_union = area_pred_label + area_label - area_intersect | |
return area_intersect, area_union, area_pred_label, area_label | |
def total_intersect_and_union( | |
results, | |
gt_seg_maps, | |
num_labels, | |
ignore_index: bool, | |
label_map: Optional[Dict[int, int]] = None, | |
reduce_labels: bool = False, | |
): | |
"""Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair. | |
Args: | |
results (`ndarray`): | |
List of prediction segmentation maps, each of shape (height, width). | |
gt_seg_maps (`ndarray`): | |
List of ground truth segmentation maps, each of shape (height, width). | |
num_labels (`int`): | |
Number of categories. | |
ignore_index (`int`): | |
Index that will be ignored during evaluation. | |
label_map (`dict`, *optional*): | |
Mapping old labels to new labels. The parameter will work only when label is str. | |
reduce_labels (`bool`, *optional*, defaults to `False`): | |
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, | |
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. | |
Returns: | |
total_area_intersect (`ndarray`): | |
The intersection of prediction and ground truth histogram on all classes. | |
total_area_union (`ndarray`): | |
The union of prediction and ground truth histogram on all classes. | |
total_area_pred_label (`ndarray`): | |
The prediction histogram on all classes. | |
total_area_label (`ndarray`): | |
The ground truth histogram on all classes. | |
""" | |
total_area_intersect = np.zeros((num_labels,), dtype=np.float64) | |
total_area_union = np.zeros((num_labels,), dtype=np.float64) | |
total_area_pred_label = np.zeros((num_labels,), dtype=np.float64) | |
total_area_label = np.zeros((num_labels,), dtype=np.float64) | |
for result, gt_seg_map in zip(results, gt_seg_maps): | |
area_intersect, area_union, area_pred_label, area_label = intersect_and_union( | |
result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels | |
) | |
total_area_intersect += area_intersect | |
total_area_union += area_union | |
total_area_pred_label += area_pred_label | |
total_area_label += area_label | |
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label | |
def mean_iou( | |
results, | |
gt_seg_maps, | |
num_labels, | |
ignore_index: bool, | |
nan_to_num: Optional[int] = None, | |
label_map: Optional[Dict[int, int]] = None, | |
reduce_labels: bool = False, | |
): | |
"""Calculate Mean Intersection and Union (mIoU). | |
Args: | |
results (`ndarray`): | |
List of prediction segmentation maps, each of shape (height, width). | |
gt_seg_maps (`ndarray`): | |
List of ground truth segmentation maps, each of shape (height, width). | |
num_labels (`int`): | |
Number of categories. | |
ignore_index (`int`): | |
Index that will be ignored during evaluation. | |
nan_to_num (`int`, *optional*): | |
If specified, NaN values will be replaced by the number defined by the user. | |
label_map (`dict`, *optional*): | |
Mapping old labels to new labels. The parameter will work only when label is str. | |
reduce_labels (`bool`, *optional*, defaults to `False`): | |
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, | |
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. | |
Returns: | |
`Dict[str, float | ndarray]` comprising various elements: | |
- *mean_iou* (`float`): | |
Mean Intersection-over-Union (IoU averaged over all categories). | |
- *mean_accuracy* (`float`): | |
Mean accuracy (averaged over all categories). | |
- *overall_accuracy* (`float`): | |
Overall accuracy on all images. | |
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): | |
Per category accuracy. | |
- *per_category_iou* (`ndarray` of shape `(num_labels,)`): | |
Per category IoU. | |
""" | |
total_area_intersect, total_area_union, total_area_pred_label, total_area_label = total_intersect_and_union( | |
results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels | |
) | |
# compute metrics | |
metrics = dict() | |
all_acc = total_area_intersect.sum() / total_area_label.sum() | |
iou = total_area_intersect / total_area_union | |
acc = total_area_intersect / total_area_label | |
metrics["mean_iou"] = np.nanmean(iou) | |
metrics["mean_accuracy"] = np.nanmean(acc) | |
metrics["overall_accuracy"] = all_acc | |
metrics["per_category_iou"] = iou | |
metrics["per_category_accuracy"] = acc | |
if nan_to_num is not None: | |
metrics = dict( | |
{metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()} | |
) | |
return metrics | |
class MeanIoU(evaluate.Metric): | |
def _info(self): | |
return evaluate.MetricInfo( | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"predictions": datasets.Image(), | |
"references": datasets.Image(), | |
} | |
), | |
reference_urls=[ | |
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" | |
], | |
) | |
def _compute( | |
self, | |
predictions, | |
references, | |
num_labels: int, | |
ignore_index: bool, | |
nan_to_num: Optional[int] = None, | |
label_map: Optional[Dict[int, int]] = None, | |
reduce_labels: bool = False, | |
): | |
iou_result = mean_iou( | |
results=predictions, | |
gt_seg_maps=references, | |
num_labels=num_labels, | |
ignore_index=ignore_index, | |
nan_to_num=nan_to_num, | |
label_map=label_map, | |
reduce_labels=reduce_labels, | |
) | |
return iou_result | |