Luke Merrick
Add cached qrels
fd84559
raw
history blame
No virus
1.52 kB
"""
Simple script to pre-download and JSON-serialize the query-relations of MTEB datasets, since the `mteb` library requires us to download the full dataset to get just the qrels, and that's slow to do every time we want just qrels.
"""
import json
from pathlib import Path
import mteb
from tqdm.auto import tqdm
names = [
'ArguAna',
'CQADupstackAndroidRetrieval',
'CQADupstackEnglishRetrieval',
'CQADupstackGamingRetrieval',
'CQADupstackGisRetrieval',
'CQADupstackMathematicaRetrieval',
'CQADupstackPhysicsRetrieval',
'CQADupstackProgrammersRetrieval',
'CQADupstackStatsRetrieval',
'CQADupstackTexRetrieval',
'CQADupstackUnixRetrieval',
'CQADupstackWebmastersRetrieval',
'CQADupstackWordpressRetrieval',
'ClimateFEVER',
'DBPedia',
'FEVER',
'FiQA2018',
'HotpotQA',
'MSMARCO',
'NFCorpus',
'NQ',
'QuoraRetrieval',
'SCIDOCS',
'SciFact',
'TRECCOVID',
'Touche2020'
]
out_path = Path(__file__)
def load_mteb_qrels(task_name: str) -> dict:
split_name = "dev" if task_name == "MSMARCO" else "test"
task_obj = mteb.MTEB(tasks=[mteb.get_task(task_name, languages=["en"])]).tasks[0]
task_obj.load_data(eval_splits=[split_name])
qrels = task_obj.relevant_docs[split_name]
return qrels
for name in tqdm(names, desc="downloading qrels", unit="dataset"):
qrel = load_mteb_qrels(name)
out_file_path = (out_path / f"{name}.json")
out_file_path.write_text(json.dumps(qrel, indent=2, sort_keys=True))