Datasets:
tau
/

Languages:
English
ArXiv:
zero_scrolls / metrics /accuracy.py
Uri's picture
add files
ee3ae9f
raw
history blame
1.03 kB
import re
PATTERN = re.compile(r'\b[A-D]\b')
def find_answer(s):
match = PATTERN.search(s)
if match is None:
return None
return match.group()
def accuracy_score(prediction, ground_truth):
letter_ground_truth = find_answer(ground_truth)
assert letter_ground_truth in ["A", "B", "C", "D"], f"Invalid ground truth: {ground_truth}"
letter_prediction = find_answer(str(prediction))
return letter_prediction == letter_ground_truth
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def compute_accuracy(predictions, references):
accuracy = 0
for prediction, ground_truths in zip(predictions, references):
accuracy += metric_max_over_ground_truths(accuracy_score, prediction, ground_truths)
return 100.0 * accuracy / len(predictions)