File size: 1,524 Bytes
fd84559
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
"""
Simple script to pre-download and JSON-serialize the query-relations of MTEB datasets, since the `mteb` library requires us to download the full dataset to get just the qrels, and that's slow to do every time we want just qrels.
"""

import json
from pathlib import Path

import mteb
from tqdm.auto import tqdm

names = [
    'ArguAna',
    'CQADupstackAndroidRetrieval',
    'CQADupstackEnglishRetrieval',
    'CQADupstackGamingRetrieval',
    'CQADupstackGisRetrieval',
    'CQADupstackMathematicaRetrieval',
    'CQADupstackPhysicsRetrieval',
    'CQADupstackProgrammersRetrieval',
    'CQADupstackStatsRetrieval',
    'CQADupstackTexRetrieval',
    'CQADupstackUnixRetrieval',
    'CQADupstackWebmastersRetrieval',
    'CQADupstackWordpressRetrieval',
    'ClimateFEVER',
    'DBPedia',
    'FEVER',
    'FiQA2018',
    'HotpotQA',
    'MSMARCO',
    'NFCorpus',
    'NQ',
    'QuoraRetrieval',
    'SCIDOCS',
    'SciFact',
    'TRECCOVID',
    'Touche2020'
]

out_path = Path(__file__)

def load_mteb_qrels(task_name: str) -> dict:
    split_name = "dev" if task_name == "MSMARCO" else "test"
    task_obj = mteb.MTEB(tasks=[mteb.get_task(task_name, languages=["en"])]).tasks[0]
    task_obj.load_data(eval_splits=[split_name])
    qrels = task_obj.relevant_docs[split_name]
    return qrels

for name in tqdm(names, desc="downloading qrels", unit="dataset"):
    qrel = load_mteb_qrels(name)
    out_file_path = (out_path / f"{name}.json")
    out_file_path.write_text(json.dumps(qrel, indent=2, sort_keys=True))