Spaces:
Runtime error
Runtime error
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
import evaluate | |
import datasets | |
from itertools import repeat | |
# TODO: Add BibTeX citation | |
_CITATION = """\ | |
@InProceedings{huggingface:module, | |
title = {A great new module}, | |
authors={huggingface, Inc.}, | |
year={2020} | |
} | |
""" | |
# TODO: Add description of the module here | |
_DESCRIPTION = """\ | |
This new module is designed to solve this great ML task and is crafted with a lot of care. | |
""" | |
# TODO: Add description of the arguments of the module here | |
_KWARGS_DESCRIPTION = """ | |
Calculates how good are predictions given some references, using certain scores | |
Args: | |
predictions: list of predictions to score. Each prediction | |
should be an input id. | |
references: list of reference for each prediction. Each | |
reference should be an input id. | |
actions_seen: number of actions token seen before generating the predicted action token. | |
max_actions_seen: the number of scores to calculate. For example, with max_actions_seen = 5, | |
it will calculate score for prediction with actions_seen = 0, 1, 2, 3, 4, 5. | |
Returns: | |
score_k: accuracy score calculated on predictions with n = k. The number of scores | |
calculated in this way depends on the value of max_actions_seen. For example, | |
with max_actions_seen = 5, we will have score_0, score_1, ..., score_5. | |
support_k: the number of predictions that support the corresponding score_k. | |
Examples: | |
Examples should be written in doctest format, and should illustrate how | |
to use the function. | |
>>> my_new_module = evaluate.load("my_new_module") | |
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) | |
>>> print(results) | |
{'accuracy': 1.0} | |
""" | |
# TODO: Define external resources urls if needed | |
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" | |
class MetricaTesi(evaluate.Metric): | |
"""TODO: Short description of my evaluation module.""" | |
def _info(self): | |
# TODO: Specifies the evaluate.EvaluationModuleInfo object | |
return evaluate.MetricInfo( | |
# This is the description that will appear on the modules page. | |
module_type="metric", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
# This defines the format of each prediction and reference | |
features=datasets.Features( | |
{ | |
"predictions": datasets.Value("int32"), | |
"references": datasets.Value("int32"), | |
"actions_seen": datasets.Value("int32"), | |
} | |
), | |
# Homepage of the module for documentation | |
homepage="http://module.homepage", | |
# Additional links to the codebase or references | |
codebase_urls=["http://github.com/path/to/codebase/of/new_module"], | |
reference_urls=["http://path.to.reference.url/new_module"], | |
) | |
def _download_and_prepare(self, dl_manager): | |
"""Optional: download external resources useful to compute the scores""" | |
# TODO: Download external resources if needed | |
pass | |
def _compute(self, predictions, references, actions_seen, max_actions_seen=20): | |
"""Returns the scores""" | |
results = dict() | |
for i in range(max_actions_seen + 1): | |
score = 0.0 | |
support = sum(n == i for n in actions_seen) | |
if support != 0: | |
for prediction, reference, n in zip(predictions, references, actions_seen): | |
if n == i: | |
if prediction == reference: | |
score += 1 | |
score /= support | |
if support != 0: | |
results[f"support_{i}"] = support | |
results[f"score_{i}"] = score | |
return results | |