Spaces:
Runtime error
Runtime error
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
# https://huggingface.co/spaces/jordyvl/ece | |
import evaluate | |
import datasets | |
import numpy as np | |
from typing import Dict, Optional | |
# TODO: Add BibTeX citation | |
_CITATION = """\ | |
@InProceedings{huggingface:module, | |
title = {Expected Calibration Error}, | |
authors={Jordy Van Landeghem}, | |
year={2022} | |
} | |
""" | |
# TODO: Add description of the module here | |
_DESCRIPTION = """\ | |
This new module is designed to evaluate the calibration of a probabilistic classifier. | |
More concretely, we provide a binned empirical estimator of top-1 calibration error. [1] | |
""" | |
# TODO: Add description of the arguments of the module here | |
_KWARGS_DESCRIPTION = """ | |
Calculates how good are predictions given some references, using certain scores | |
Args: | |
predictions: 2D Array of confidence estimates. | |
references: 1D Array of Ground truth indices. | |
n_bins : int, default=15 | |
Number of bins of :math:`[\\frac{1}{n_{\\text{classes}},1]` for the confidence estimates. | |
p : int, default=1 | |
Power of the calibration error, :math:`1 \\leq p \\leq \\infty`. | |
Returns | |
Expected calibration error (ECE), float. | |
Examples: | |
>>> my_new_module = evaluate.load("jordyvl/ece") | |
>>> results = my_new_module.compute(references=[0, 1, 2], predictions=[[0.6, 0.2, 0.2], [0, 0.95, 0.05], [0.7, 0.1 ,0.2]]) | |
>>> print(results) | |
{'ECE': 0.1333333333333334} | |
""" | |
# TODO: Define external resources urls if needed | |
BAD_WORDS_URL = "" | |
# Discretization and binning | |
def create_bins(n_bins=10, scheme="equal-range", bin_range=None, P=None): | |
assert scheme in [ | |
"equal-range", | |
"equal-mass", | |
], f"This binning scheme {scheme} is not implemented yet" | |
if bin_range is None: | |
if P is None: | |
bin_range = [0, 1] # no way to know range | |
else: | |
bin_range = [min(P), max(P)] | |
if scheme == "equal-range": | |
bins = np.linspace(bin_range[0], bin_range[1], n_bins + 1) # equal range | |
# bins = np.tile(np.linspace(bin_range[0], bin_range[1], n_bins + 1), (n_classes,1)) | |
elif scheme == "equal-mass": | |
assert P.size >= n_bins, "Fewer points than bins" | |
# assume global equal mass binning; not discriminated per class | |
P = P.flatten() | |
# split sorted probabilities into groups of approx equal size | |
groups = np.array_split(np.sort(P), n_bins) | |
# is this really required? | |
bin_upper_edges = [] | |
# rightmost entry per equal size group | |
for cur_group in range(n_bins): | |
bin_upper_edges += [max(groups[cur_group])] # if upper edges is what we compare against | |
bin_upper_edges += [1] # always +1 for right edges | |
bin_upper_edges = sorted(list(set(bin_upper_edges))) # important for numerical conditions! | |
# might change number of bins :O | |
bins = np.array(bin_upper_edges) | |
return bins | |
def discretize_into_bins(P, bins): | |
contains_rightmost = bool(bins[-1] > 1) # outlier bins | |
contains_leftmost = bool(bins[0] <= 0) # beyond [before] bin_range[0] | |
# bins_with_left_edge = np.insert(bins, 0, 0, axis=0) | |
oneDbins = np.digitize( | |
P, bins, right=contains_rightmost | |
) # since bins contains extra righmost (& leftmost bins) | |
if contains_leftmost: | |
oneDbins -= 1 | |
# Fix to scipy.binned_dd_statistic: | |
# Tie-breaking to the left for rightmost bin | |
# Using `digitize`, values that fall on an edge are put in the right bin. | |
# For the rightmost bin, we want values equal to the right | |
# edge to be counted in the last bin, and not as an outlier. | |
for k in range(P.shape[-1]): | |
# Find the rounding precision | |
dedges_min = np.diff(bins).min() | |
if dedges_min == 0: | |
raise ValueError("The smallest edge difference is numerically 0.") | |
decimal = int(-np.log10(dedges_min)) + 6 | |
# Find which points are on the rightmost edge. | |
on_edge = np.where( | |
(P[:, k] >= bins[-1]) & (np.around(P[:, k], decimal) == np.around(bins[-1], decimal)) | |
)[0] | |
# Shift these points one bin to the left. | |
oneDbins[on_edge, k] -= 1 | |
return oneDbins | |
def manual_binned_statistic(P, y_correct, bins, statistic="mean"): | |
bin_assignments = discretize_into_bins(np.expand_dims(P, 0), bins)[0] | |
# indexed as in julia! | |
result = np.empty([len(bins)], float) | |
result.fill(np.nan) # cannot assume each bin will have observations | |
flatcount = np.bincount(bin_assignments, None) | |
# cannot have a negative index | |
a = flatcount.nonzero() | |
if statistic == "mean": | |
flatsum = np.bincount(bin_assignments, y_correct) | |
result[a] = flatsum[a] / flatcount[a] | |
return result, bins, bin_assignments + 1 # upper right edge as proxy | |
def bin_calibrated_accuracy(bins, proxy="upper-edge"): | |
assert proxy in ["center", "upper-edge"], f"Unsupported proxy{proxy}" | |
contains_leftmost = bool(bins[0] == 0) # beyond [before] bin_range[0] | |
if proxy == "upper-edge": | |
return bins[1:] if contains_leftmost else bins | |
if proxy == "center": | |
return bins[:-1] + np.diff(bins) / 2 | |
def CE_estimate(y_correct, P, bins=None, p=1, proxy="upper-edge", detail=False): | |
""" | |
y_correct: binary (N x 1) | |
P: normalized (N x 1) either max or per class | |
Summary: weighted average over the accuracy/confidence difference of discrete bins of prediction probability | |
""" | |
n_bins = len(bins) - 1 # true number of bins | |
bin_range = [min(bins), max(bins)] | |
# average bin probability #55 for bin 50-60, mean per bin; or right/upper bin edges | |
calibrated_acc = bin_calibrated_accuracy(bins, proxy=proxy) | |
empirical_acc, bin_edges, bin_assignment = manual_binned_statistic(P, y_correct, bins) | |
bin_numbers, weights_ece = np.unique(bin_assignment, return_counts=True) | |
anindices = bin_numbers - 1 # reduce bin counts; left edge; indexes right by default | |
# Expected calibration error | |
if p < np.inf: # L^p-CE | |
CE = np.average( | |
abs(empirical_acc[anindices] - calibrated_acc[anindices]) ** p, weights=weights_ece | |
) | |
elif np.isinf(p): # max-ECE | |
CE = np.max(abs(empirical_acc[anindices] - calibrated_acc[anindices])) | |
if detail: | |
return CE, calibrated_acc, empirical_acc, weights_ece | |
return CE | |
def top_1_CE(Y, P, **kwargs): | |
y_correct = (Y == np.argmax(P, -1)).astype(int) # create condition y = ŷ € [K] | |
p_max = np.max(P, -1) # create p̂ as top-1 softmax probability € [0,1] | |
bins = create_bins( | |
n_bins=kwargs["n_bins"], bin_range=kwargs["bin_range"], scheme=kwargs["scheme"], P=p_max | |
) | |
CE = CE_estimate(y_correct, p_max, bins=bins, proxy=kwargs["proxy"], detail=kwargs["detail"]) | |
if kwargs["detail"]: | |
return { | |
"ECE": CE[0], | |
"y_bar": CE[1], | |
"p_bar": CE[2], | |
"bin_freq": CE[3], | |
"p_bar_cont": np.mean(p_max, -1), | |
"accuracy": np.mean(y_correct), | |
} | |
return CE | |
class ECE(evaluate.EvaluationModule): | |
""" | |
0. create binning scheme [discretization of f] | |
1. build histogram P(f(X)) | |
2. build conditional density estimate P(y|f(X)) | |
3. average bin probabilities f_B as center/edge of bin | |
4. apply L^p norm distance and weights | |
""" | |
def _info(self): | |
# TODO: Specifies the evaluate.EvaluationModuleInfo object | |
return evaluate.EvaluationModuleInfo( | |
module_type="metric", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"predictions": datasets.Sequence(datasets.Value("float32")), | |
"references": datasets.Value("int64"), | |
} | |
), | |
# Homepage of the module for documentation | |
homepage="https://huggingface.co/spaces/jordyvl/ece", | |
# Additional links to the codebase or references | |
codebase_urls=[""], | |
reference_urls=[""], | |
) | |
def init_kwargs( | |
self, | |
n_bins: int = 10, | |
bin_range: Optional[int] = [0, 1], | |
scheme: str = "equal-range", | |
proxy: str = "upper-edge", | |
p=1, | |
detail: bool = False, | |
**kwargs, | |
): | |
# super(evaluate.EvaluationModule, self).__init__(**kwargs) | |
self.n_bins = n_bins | |
self.bin_range = bin_range | |
self.scheme = scheme | |
self.proxy = proxy | |
self.p = p | |
self.detail = detail | |
def _compute(self, predictions, references, **kwargs): | |
# convert to numpy arrays | |
references = np.array(references, dtype=np.int64) | |
predictions = np.array(predictions, dtype=np.float32) | |
assert ( | |
predictions.shape[0] == references.shape[0] | |
), "Need to pass similar predictions and references" | |
# Assert that arrays are 2D | |
if len(predictions.shape) != 2: | |
raise ValueError("Expected `predictions` to be a 2D vector (N x K)") | |
if len(references.shape) != 1: | |
# could check if wrongly passed as onehot | |
if (references.shape[-1] == predictions.shape[1]) and ( | |
np.sum(references) == predictions.shape[0] | |
): | |
references = np.argmax(references, -1) | |
else: | |
raise ValueError("Expected `references` to be a 1D vector (N,)") | |
self.init_kwargs(**kwargs) | |
"""Returns the scores""" | |
ECE = top_1_CE(references, predictions, **self.__dict__) | |
if self.detail: | |
return ECE | |
return { | |
"ECE": ECE, | |
} | |
def test_ECE(**kwargs): | |
N = 10 # N evaluation instances {(x_i,y_i)}_{i=1}^N | |
K = 5 # K class problem | |
def random_mc_instance(concentration=1, onehot=False): | |
reference = np.argmax( | |
np.random.dirichlet(([concentration for _ in range(K)])), -1 | |
) # class targets | |
prediction = np.random.dirichlet(([concentration for _ in range(K)])) # probabilities | |
if onehot: | |
reference = np.eye(K)[np.argmax(reference, -1)] | |
return reference, prediction | |
references, predictions = list(zip(*[random_mc_instance() for i in range(N)])) | |
references = np.array(references, dtype=np.int64) | |
predictions = np.array(predictions, dtype=np.float32) | |
res = ECE()._compute(predictions, references, **kwargs) | |
print(f"ECE: {res['ECE']}") | |
res = ECE()._compute(predictions, references, detail=True) | |
print(f"ECE: {res['ECE']}") | |
def test_deterministic(): | |
res = ECE()._compute( | |
references=[0, 1, 2], | |
predictions=[[0.63, 0.2, 0.2], [0, 0.95, 0.05], [0.72, 0.1, 0.2]], | |
detail=True, | |
) | |
print(f"ECE: {res['ECE']}\n {res}") | |
def test_equalmass_binning(): | |
probs = np.array([0.63, 0.2, 0.2, 0, 0.95, 0.05, 0.72, 0.1, 0.2]) | |
kwargs = dict( | |
n_bins=5, | |
scheme="equal-mass", | |
bin_range=None, | |
proxy="upper-edge", | |
p=1, | |
detail=True, | |
) | |
bins = create_bins( | |
n_bins=kwargs["n_bins"], scheme=kwargs["scheme"], bin_range=kwargs["bin_range"], P=probs | |
) | |
test_ECE(**kwargs) | |
def test_perfect_predictions(K=3): | |
references = [0, 1, 2] | |
res = ECE()._compute( | |
references=references, | |
predictions=np.eye(K)[references], | |
detail=True, | |
) | |
print(f"ECE: {res['ECE']}\n {res}") | |
if __name__ == "__main__": | |
test_perfect_predictions() | |
test_equalmass_binning() | |
test_deterministic() | |
test_ECE() | |