Datasets:
File size: 1,402 Bytes
ee3ae9f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import re
PATTERN = re.compile(r'\d+\.?\d*%')
def find_percentage(s):
match = PATTERN.search(s)
if match is None:
return None
return match.group(0)
def to_int(s):
percentage_string = find_percentage(s)
if percentage_string is None:
return None
percentage_string = percentage_string.replace("%", "")
percentage = float(percentage_string)
return percentage
def exp_similarity_score(prediction, ground_truth):
ground_truth_percentage = to_int(ground_truth)
pred_percentage = to_int(str(prediction))
if ground_truth_percentage is None:
raise ValueError(f"ground_truth_percentage is None: {ground_truth_percentage}")
if pred_percentage is None:
return 0.0
return 0.5 ** (abs(ground_truth_percentage - pred_percentage) / 10)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def compute_exp_similarity(predictions, references):
exp_similarity = 0
for prediction, ground_truths in zip(predictions, references):
exp_similarity += metric_max_over_ground_truths(exp_similarity_score, prediction, ground_truths)
return 100 * exp_similarity / len(predictions)
|