Datasets:
File size: 1,974 Bytes
ee3ae9f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import re
import string
from lifelines.utils import concordance_index
def keep_integers_commas_spaces(input_string):
cleaned_string = re.sub(r'[^0-9\s,]', '', str(input_string))
return cleaned_string
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
normalized_list = keep_integers_commas_spaces(s).replace(",", " ").strip(string.punctuation).split()
try:
normalized_list = [int(remove_punc(x).strip()) for x in normalized_list]
except ValueError:
return []
return normalized_list
def concordant_index_score(prediction, ground_truth):
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(ground_truth)
if sorted(normalized_ground_truth) != sorted(normalized_prediction):
return 0.0
pred_order = summ_id_per_location_to_pos_of_id(normalized_prediction)
gold_order = summ_id_per_location_to_pos_of_id(normalized_ground_truth)
return concordance_index(gold_order, pred_order)
def summ_id_per_location_to_pos_of_id(id_per_location):
order = [-1] * len(id_per_location)
for i, id_ in enumerate(id_per_location, 1):
order[id_ - 1] = i
return order
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def compute_concordance_index(predictions, references):
concordant_index = 0
for prediction, ground_truths in zip(predictions, references):
concordant_index += metric_max_over_ground_truths(concordant_index_score, prediction, ground_truths)
return 100.0 * concordant_index / len(predictions)
|