|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
from collections import defaultdict |
|
|
|
|
|
_CITATION = "" |
|
|
|
languages = {'yoruba':'yo', |
|
'hausa':'ha', |
|
'swahili':'sw', |
|
'somali':'so'} |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset consists of the queries and relevance judgements in the CIRAL test collection. |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
_URLS = { |
|
lang: { |
|
'dev': [ |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-dev.tsv', |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/qrels/qrels.ciral-v1.0-{lang_code}-dev.tsv' |
|
], |
|
'testA':[ |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-test-a.tsv', |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/qrels/qrels.ciral-v1.0-{lang_code}-test-a.tsv', |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/qrels/qrels.ciral-v1.0-{lang_code}-test-a-pools.tsv', |
|
], |
|
'testB':[ |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/topics/topics.ciral-v1.0-{lang_code}-test-b.tsv', |
|
f'https://huggingface.co/datasets/CIRAL/ciral/resolve/main/ciral-{lang}/qrels/qrels.ciral-v1.0-{lang_code}-test-b.tsv', |
|
] |
|
} for lang, lang_code in languages.items() |
|
} |
|
|
|
|
|
def load_queries(_file): |
|
if _file is None: |
|
return [] |
|
|
|
queries = {} |
|
with open(_file, encoding="utf-8") as query_file: |
|
for line in query_file: |
|
line = line.strip() |
|
id, query = (line.split('\t')) if len(line.split('\t')) == 2 else ("", "") |
|
queries[id] = query |
|
return queries |
|
|
|
def load_qrels(_file): |
|
if _file is None: |
|
return None |
|
|
|
qrels = defaultdict(dict) |
|
with open(_file, encoding="utf-8") as qrel_file: |
|
for line in qrel_file: |
|
line = line.strip() |
|
qid, _, docid, rel = (line.split('\t')) if len(line.split('\t')) == 4 else ("", "", "",False) |
|
qrels[qid][docid] = int(rel) |
|
|
|
return qrels |
|
|
|
|
|
|
|
class CIRAL(datasets.GeneratorBasedBuilder): |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=lang, |
|
version=datasets.Version("1.1.0"), |
|
description=f"CIRAL data for {lang}.") for lang in languages.keys() |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"query_id": datasets.Value("string"), |
|
"query": datasets.Value("string"), |
|
|
|
|
|
|
|
|
|
"positive_passages": [{ |
|
'docid': datasets.Value("string"), |
|
'text': datasets.Value("string"), |
|
}], |
|
"negative_passages": [{ |
|
"docid": datasets.Value("string"), |
|
"text": datasets.Value("string") |
|
}], |
|
|
|
"pools_positive_passages": [{ |
|
'docid': datasets.Value("string"), |
|
'text': datasets.Value("string"), |
|
}], |
|
"pools_negative_passages": [{ |
|
"docid": datasets.Value("string"), |
|
"text": datasets.Value("string")}] |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
lang = self.config.name |
|
downloaded_files = dl_manager.download_and_extract(_URLS[lang]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name='dev', |
|
gen_kwargs={ |
|
'filepaths': downloaded_files['dev'], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name='testA', |
|
gen_kwargs={ |
|
'filepaths': downloaded_files['testA'], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name='testB', |
|
gen_kwargs={ |
|
'filepaths': downloaded_files['testB'], |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepaths): |
|
lang = self.config.name |
|
corpus = datasets.load_dataset('ciral/ciral-corpus', lang)['train'] |
|
docid2doc = {doc['docid']: doc['text'] for doc in corpus} |
|
|
|
query_file, qrel_file, pools_file = (filepaths) if len(filepaths) == 3 else (filepaths[0], filepaths[1], None) |
|
queries = load_queries(query_file) |
|
shallow_qrels = load_qrels(qrel_file) |
|
pools_qrels = load_qrels(pools_file) |
|
|
|
for query_id in queries: |
|
positive_docids = [docid for docid, judgement in shallow_qrels[query_id].items() if judgement==1] |
|
negative_docids = [docid for docid, judgement in shallow_qrels[query_id].items() if judgement==0] |
|
|
|
pools_positive_docids = [docid for docid, judgement in pools_qrels[query_id].items() if judgement==1] if pools_qrels is not None else [] |
|
pools_negative_docids = [docid for docid, judgement in pools_qrels[query_id].items() if judgement==0] if pools_qrels is not None else [] |
|
|
|
data = {} |
|
data['query_id'] = query_id |
|
data['query'] = queries[query_id] |
|
data['positive_passages'] = [{ |
|
'docid': docid, |
|
'text': docid2doc[docid] |
|
} for docid in positive_docids if docid in docid2doc] |
|
data['negative_passages'] = [{ |
|
'docid': docid, |
|
'text': docid2doc[docid] |
|
} for docid in negative_docids if docid in docid2doc] |
|
|
|
data['pools_positive_passages'] = [{ |
|
'docid': docid, |
|
'text': docid2doc[docid] |
|
} for docid in pools_positive_docids if docid in docid2doc] |
|
data['pools_negative_passages'] = [{ |
|
'docid': docid, |
|
'text': docid2doc[docid] |
|
} for docid in pools_negative_docids if docid in docid2doc] |
|
|
|
yield query_id, data |