Datasets:
mteb
/

ArXiv:
orionweller commited on
Commit
be4e641
1 Parent(s): 5366246
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - fas
4
+ - rus
5
+ - zho
6
+
7
+
8
+ multilinguality:
9
+ - multilingual
10
+
11
+ task_categories:
12
+ - text-retrieval
13
+
14
+ ---
15
+
16
+ From the NeuCLIR TREC Track 2022: https://arxiv.org/abs/2304.12367
17
+
18
+ Generated from https://huggingface.co/datasets/neuclir/neuclir1
19
+
20
+ ```
21
+ @article{lawrie2023overview,
22
+ title={Overview of the TREC 2022 NeuCLIR track},
23
+ author={Lawrie, Dawn and MacAvaney, Sean and Mayfield, James and McNamee, Paul and Oard, Douglas W and Soldaini, Luca and Yang, Eugene},
24
+ journal={arXiv preprint arXiv:2304.12367},
25
+ year={2023}
26
+ }
27
+ ```
28
+
neuclir-2022-fast.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ iimport json
2
+
3
+ import datasets
4
+
5
+ _CITATION = '''
6
+ @article{lawrie2023overview,
7
+ title={Overview of the TREC 2022 NeuCLIR track},
8
+ author={Lawrie, Dawn and MacAvaney, Sean and Mayfield, James and McNamee, Paul and Oard, Douglas W and Soldaini, Luca and Yang, Eugene},
9
+ journal={arXiv preprint arXiv:2304.12367},
10
+ year={2023}
11
+ }
12
+ '''
13
+
14
+ _LANGUAGES = [
15
+ 'rus',
16
+ 'fas',
17
+ 'zho',
18
+ ]
19
+
20
+ _DESCRIPTION = 'dataset load script for NeuCLIR 2022'
21
+
22
+ _DATASET_URLS = {
23
+ lang: {
24
+ 'test': f'https://huggingface.co/datasets/MTEB/neuclir-2022/resolve/main/neuclir-{lang}/test.jsonl',
25
+ } for lang in _LANGUAGES
26
+ }
27
+
28
+ _DATASET_CORPUS_URLS = {
29
+ f'corpus-{lang}': {
30
+ 'corpus': f'https://huggingface.co/datasets/MTEB/neuclir-2022/resolve/main/neuclir-{lang}/corpus.jsonl'
31
+ } for lang in _LANGUAGES
32
+ }
33
+
34
+ _DATASET_QUERIES_URLS = {
35
+ f'queries-{lang}': {
36
+ 'queries': f'https://huggingface.co/datasets/MTEB/neuclir-2022/resolve/main/neuclir-{lang}/queries.jsonl'
37
+ } for lang in _LANGUAGES
38
+ }
39
+
40
+
41
+ class MLDR(datasets.GeneratorBasedBuilder):
42
+ BUILDER_CONFIGS = [datasets.BuilderConfig(
43
+ version=datasets.Version('1.0.0'),
44
+ name=lang, description=f'NeuCLIR dataset in language {lang}.'
45
+ ) for lang in _LANGUAGES
46
+ ] + [
47
+ datasets.BuilderConfig(
48
+ version=datasets.Version('1.0.0'),
49
+ name=f'corpus-{lang}', description=f'corpus of NeuCLIR dataset in language {lang}.'
50
+ ) for lang in _LANGUAGES
51
+ ] + [
52
+ datasets.BuilderConfig(
53
+ version=datasets.Version('1.0.0'),
54
+ name=f'queries-{lang}', description=f'queries of NeuCLIR dataset in language {lang}.'
55
+ ) for lang in _LANGUAGES
56
+ ]
57
+
58
+ def _info(self):
59
+ name = self.config.name
60
+ if name.startswith('corpus-'):
61
+ features = datasets.Features({
62
+ '_id': datasets.Value('string'),
63
+ 'text': datasets.Value('string'),
64
+ 'title': datasets.Value('string'),
65
+ })
66
+ elif name.startswith("queries-"):
67
+ features = datasets.Features({
68
+ '_id': datasets.Value('string'),
69
+ 'text': datasets.Value('string'),
70
+ })
71
+ else:
72
+ features = datasets.Features({
73
+ 'query-id': datasets.Value('string'),
74
+ 'corpus-id': datasets.Value('string'),
75
+ 'score': datasets.Value('int32'),
76
+ })
77
+
78
+ return datasets.DatasetInfo(
79
+ # This is the description that will appear on the datasets page.
80
+ description=_DESCRIPTION,
81
+ # This defines the different columns of the dataset and their types
82
+ features=features, # Here we define them above because they are different between the two configurations
83
+ supervised_keys=None,
84
+ # Homepage of the dataset for documentation
85
+ homepage='https://arxiv.org/abs/2304.12367',
86
+ # License for the dataset if available
87
+ license=None,
88
+ # Citation for the dataset
89
+ citation=_CITATION,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+ name = self.config.name
94
+ if name.startswith('corpus-'):
95
+ downloaded_files = dl_manager.download_and_extract(_DATASET_CORPUS_URLS[name])
96
+ splits = [
97
+ datasets.SplitGenerator(
98
+ name='corpus',
99
+ gen_kwargs={
100
+ 'filepath': downloaded_files['corpus'],
101
+ },
102
+ ),
103
+ ]
104
+ elif name.startswith("queries-"):
105
+ downloaded_files = dl_manager.download_and_extract(_DATASET_QUERIES_URLS[name])
106
+ splits = [
107
+ datasets.SplitGenerator(
108
+ name='queries',
109
+ gen_kwargs={
110
+ 'filepath': downloaded_files['queries'],
111
+ },
112
+ ),
113
+ ]
114
+ else:
115
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[name])
116
+ splits = [
117
+ datasets.SplitGenerator(
118
+ name='test',
119
+ gen_kwargs={
120
+ 'filepath': downloaded_files['test'],
121
+ },
122
+ ),
123
+ ]
124
+ return splits
125
+
126
+ def _generate_examples(self, filepath):
127
+ import pandas as pd
128
+
129
+ name = self.config.name
130
+ df = pd.read_parquet(filepath)
131
+
132
+ if name.startswith('corpus-'):
133
+ for index, row in df.iterrows():
134
+ yield row['_id'], {
135
+ '_id': row['_id'],
136
+ 'text': row['text'],
137
+ 'title': row['title']
138
+ }
139
+ elif name.startswith("queries-"):
140
+ for index, row in df.iterrows():
141
+ yield row['_id'], {
142
+ '_id': row['_id'],
143
+ 'text': row['text']
144
+ }
145
+ else:
146
+ for index, row in df.iterrows():
147
+ yield f"{row['query-id']}-----{row['corpus-id']}", {
148
+ 'query-id': row['query-id'],
149
+ 'corpus-id': row['corpus-id'],
150
+ 'score': row['score']
151
+ }
neuclir-fas/corpus-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e354a87be54da18e45deb85da8877baebf673989be9ea41c2927134fb01f7647
3
+ size 50994348
neuclir-fas/queries-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bee67a27a3143663741477c295e72380a9fe14ea184fe8f0d90608a839c311d
3
+ size 10946
neuclir-fas/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd77e50f5a5812303d6e5b51fe03038a3f6e978fa429b1a78617385efab3551e
3
+ size 214319
neuclir-rus/corpus-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57fdf172ad6f91ec7d4dd20aa69319a4b23d4c361f8a25c3386025082c45c15b
3
+ size 50201154
neuclir-rus/queries-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa9c028a77000761b8404353ac541cae5f480480d5ab4ee897440c04208f6837
3
+ size 11881
neuclir-rus/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6fe124bcf22e259fe070d3b21811cca78c408f74e95d172ce0e08e4badfe017
3
+ size 219692
neuclir-zho/corpus-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aed730a08071aa48efec66c7db3136c5a371109b12618ff6d58325eaa8a3f998
3
+ size 44691612
neuclir-zho/queries-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19da8114882e3c4a9070ace08a30be3fe46f134467ceb35fa00144a3b86bd315
3
+ size 7930
neuclir-zho/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ea8ef5a7aa3c9866746e8296ac73d44fd3c2a586d2226558bbe003f5b7d5d2c
3
+ size 245344