|
import re |
|
import string |
|
from lifelines.utils import concordance_index |
|
|
|
|
|
def keep_integers_commas_spaces(input_string): |
|
cleaned_string = re.sub(r'[^0-9\s,]', '', str(input_string)) |
|
return cleaned_string |
|
|
|
|
|
def normalize_answer(s): |
|
"""Lower text and remove punctuation, articles and extra whitespace.""" |
|
|
|
def remove_punc(text): |
|
exclude = set(string.punctuation) |
|
return "".join(ch for ch in text if ch not in exclude) |
|
|
|
normalized_list = keep_integers_commas_spaces(s).replace(",", " ").strip(string.punctuation).split() |
|
try: |
|
normalized_list = [int(remove_punc(x).strip()) for x in normalized_list] |
|
except ValueError: |
|
return [] |
|
return normalized_list |
|
|
|
|
|
def concordant_index_score(prediction, ground_truth): |
|
normalized_prediction = normalize_answer(prediction) |
|
normalized_ground_truth = normalize_answer(ground_truth) |
|
if sorted(normalized_ground_truth) != sorted(normalized_prediction): |
|
return 0.0 |
|
|
|
pred_order = summ_id_per_location_to_pos_of_id(normalized_prediction) |
|
gold_order = summ_id_per_location_to_pos_of_id(normalized_ground_truth) |
|
|
|
return concordance_index(gold_order, pred_order) |
|
|
|
|
|
def summ_id_per_location_to_pos_of_id(id_per_location): |
|
order = [-1] * len(id_per_location) |
|
for i, id_ in enumerate(id_per_location, 1): |
|
order[id_ - 1] = i |
|
return order |
|
|
|
|
|
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): |
|
scores_for_ground_truths = [] |
|
for ground_truth in ground_truths: |
|
score = metric_fn(prediction, ground_truth) |
|
scores_for_ground_truths.append(score) |
|
return max(scores_for_ground_truths) |
|
|
|
|
|
def compute_concordance_index(predictions, references): |
|
concordant_index = 0 |
|
for prediction, ground_truths in zip(predictions, references): |
|
concordant_index += metric_max_over_ground_truths(concordant_index_score, prediction, ground_truths) |
|
return 100.0 * concordant_index / len(predictions) |
|
|