Datasets:
tau
/

Languages:
English
ArXiv:
zero_scrolls / metrics /exp_similarity.py
Uri's picture
add files
ee3ae9f
raw
history blame
1.4 kB
import re
PATTERN = re.compile(r'\d+\.?\d*%')
def find_percentage(s):
match = PATTERN.search(s)
if match is None:
return None
return match.group(0)
def to_int(s):
percentage_string = find_percentage(s)
if percentage_string is None:
return None
percentage_string = percentage_string.replace("%", "")
percentage = float(percentage_string)
return percentage
def exp_similarity_score(prediction, ground_truth):
ground_truth_percentage = to_int(ground_truth)
pred_percentage = to_int(str(prediction))
if ground_truth_percentage is None:
raise ValueError(f"ground_truth_percentage is None: {ground_truth_percentage}")
if pred_percentage is None:
return 0.0
return 0.5 ** (abs(ground_truth_percentage - pred_percentage) / 10)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def compute_exp_similarity(predictions, references):
exp_similarity = 0
for prediction, ground_truths in zip(predictions, references):
exp_similarity += metric_max_over_ground_truths(exp_similarity_score, prediction, ground_truths)
return 100 * exp_similarity / len(predictions)