parquet-converter commited on
Commit
747930d
1 Parent(s): 88ce2dc

Update parquet files

Browse files
README.md DELETED
@@ -1,1011 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - crowdsourced
4
- language_creators:
5
- - machine-generated
6
- language:
7
- - en
8
- license:
9
- - unknown
10
- multilinguality:
11
- - monolingual
12
- paperswithcode_id: triviaqa
13
- pretty_name: TriviaQA
14
- size_categories:
15
- - 10K<n<100K
16
- - 100K<n<1M
17
- source_datasets:
18
- - original
19
- task_categories:
20
- - question-answering
21
- - text2text-generation
22
- task_ids:
23
- - open-domain-qa
24
- - open-domain-abstractive-qa
25
- - extractive-qa
26
- - abstractive-qa
27
- dataset_info:
28
- - config_name: rc
29
- features:
30
- - name: question
31
- dtype: string
32
- - name: question_id
33
- dtype: string
34
- - name: question_source
35
- dtype: string
36
- - name: entity_pages
37
- sequence:
38
- - name: doc_source
39
- dtype: string
40
- - name: filename
41
- dtype: string
42
- - name: title
43
- dtype: string
44
- - name: wiki_context
45
- dtype: string
46
- - name: search_results
47
- sequence:
48
- - name: description
49
- dtype: string
50
- - name: filename
51
- dtype: string
52
- - name: rank
53
- dtype: int32
54
- - name: title
55
- dtype: string
56
- - name: url
57
- dtype: string
58
- - name: search_context
59
- dtype: string
60
- - name: answer
61
- struct:
62
- - name: aliases
63
- sequence: string
64
- - name: normalized_aliases
65
- sequence: string
66
- - name: matched_wiki_entity_name
67
- dtype: string
68
- - name: normalized_matched_wiki_entity_name
69
- dtype: string
70
- - name: normalized_value
71
- dtype: string
72
- - name: type
73
- dtype: string
74
- - name: value
75
- dtype: string
76
- splits:
77
- - name: train
78
- num_bytes: 12749652867
79
- num_examples: 138384
80
- - name: validation
81
- num_bytes: 1662321436
82
- num_examples: 17944
83
- - name: test
84
- num_bytes: 1577710751
85
- num_examples: 17210
86
- download_size: 2665779500
87
- dataset_size: 15989685054
88
- - config_name: rc.nocontext
89
- features:
90
- - name: question
91
- dtype: string
92
- - name: question_id
93
- dtype: string
94
- - name: question_source
95
- dtype: string
96
- - name: entity_pages
97
- sequence:
98
- - name: doc_source
99
- dtype: string
100
- - name: filename
101
- dtype: string
102
- - name: title
103
- dtype: string
104
- - name: wiki_context
105
- dtype: string
106
- - name: search_results
107
- sequence:
108
- - name: description
109
- dtype: string
110
- - name: filename
111
- dtype: string
112
- - name: rank
113
- dtype: int32
114
- - name: title
115
- dtype: string
116
- - name: url
117
- dtype: string
118
- - name: search_context
119
- dtype: string
120
- - name: answer
121
- struct:
122
- - name: aliases
123
- sequence: string
124
- - name: normalized_aliases
125
- sequence: string
126
- - name: matched_wiki_entity_name
127
- dtype: string
128
- - name: normalized_matched_wiki_entity_name
129
- dtype: string
130
- - name: normalized_value
131
- dtype: string
132
- - name: type
133
- dtype: string
134
- - name: value
135
- dtype: string
136
- splits:
137
- - name: train
138
- num_bytes: 106884466
139
- num_examples: 138384
140
- - name: validation
141
- num_bytes: 14060078
142
- num_examples: 17944
143
- - name: test
144
- num_bytes: 3668151
145
- num_examples: 17210
146
- download_size: 2665779500
147
- dataset_size: 124612695
148
- - config_name: unfiltered
149
- features:
150
- - name: question
151
- dtype: string
152
- - name: question_id
153
- dtype: string
154
- - name: question_source
155
- dtype: string
156
- - name: entity_pages
157
- sequence:
158
- - name: doc_source
159
- dtype: string
160
- - name: filename
161
- dtype: string
162
- - name: title
163
- dtype: string
164
- - name: wiki_context
165
- dtype: string
166
- - name: search_results
167
- sequence:
168
- - name: description
169
- dtype: string
170
- - name: filename
171
- dtype: string
172
- - name: rank
173
- dtype: int32
174
- - name: title
175
- dtype: string
176
- - name: url
177
- dtype: string
178
- - name: search_context
179
- dtype: string
180
- - name: answer
181
- struct:
182
- - name: aliases
183
- sequence: string
184
- - name: normalized_aliases
185
- sequence: string
186
- - name: matched_wiki_entity_name
187
- dtype: string
188
- - name: normalized_matched_wiki_entity_name
189
- dtype: string
190
- - name: normalized_value
191
- dtype: string
192
- - name: type
193
- dtype: string
194
- - name: value
195
- dtype: string
196
- splits:
197
- - name: train
198
- num_bytes: 25019623548
199
- num_examples: 87622
200
- - name: validation
201
- num_bytes: 3038803991
202
- num_examples: 11313
203
- - name: test
204
- num_bytes: 2906455559
205
- num_examples: 10832
206
- download_size: 3298328560
207
- dataset_size: 30964883098
208
- - config_name: unfiltered.nocontext
209
- features:
210
- - name: question
211
- dtype: string
212
- - name: question_id
213
- dtype: string
214
- - name: question_source
215
- dtype: string
216
- - name: entity_pages
217
- sequence:
218
- - name: doc_source
219
- dtype: string
220
- - name: filename
221
- dtype: string
222
- - name: title
223
- dtype: string
224
- - name: wiki_context
225
- dtype: string
226
- - name: search_results
227
- sequence:
228
- - name: description
229
- dtype: string
230
- - name: filename
231
- dtype: string
232
- - name: rank
233
- dtype: int32
234
- - name: title
235
- dtype: string
236
- - name: url
237
- dtype: string
238
- - name: search_context
239
- dtype: string
240
- - name: answer
241
- struct:
242
- - name: aliases
243
- sequence: string
244
- - name: normalized_aliases
245
- sequence: string
246
- - name: matched_wiki_entity_name
247
- dtype: string
248
- - name: normalized_matched_wiki_entity_name
249
- dtype: string
250
- - name: normalized_value
251
- dtype: string
252
- - name: type
253
- dtype: string
254
- - name: value
255
- dtype: string
256
- splits:
257
- - name: train
258
- num_bytes: 63301342
259
- num_examples: 87622
260
- - name: validation
261
- num_bytes: 8297118
262
- num_examples: 11313
263
- - name: test
264
- num_bytes: 2320908
265
- num_examples: 10832
266
- download_size: 632549060
267
- dataset_size: 73919368
268
- - config_name: rc.web
269
- features:
270
- - name: question
271
- dtype: string
272
- - name: question_id
273
- dtype: string
274
- - name: question_source
275
- dtype: string
276
- - name: entity_pages
277
- sequence:
278
- - name: doc_source
279
- dtype: string
280
- - name: filename
281
- dtype: string
282
- - name: title
283
- dtype: string
284
- - name: wiki_context
285
- dtype: string
286
- - name: search_results
287
- sequence:
288
- - name: description
289
- dtype: string
290
- - name: filename
291
- dtype: string
292
- - name: rank
293
- dtype: int32
294
- - name: title
295
- dtype: string
296
- - name: url
297
- dtype: string
298
- - name: search_context
299
- dtype: string
300
- - name: answer
301
- struct:
302
- - name: aliases
303
- sequence: string
304
- - name: normalized_aliases
305
- sequence: string
306
- - name: matched_wiki_entity_name
307
- dtype: string
308
- - name: normalized_matched_wiki_entity_name
309
- dtype: string
310
- - name: normalized_value
311
- dtype: string
312
- - name: type
313
- dtype: string
314
- - name: value
315
- dtype: string
316
- splits:
317
- - name: train
318
- num_bytes: 9408852131
319
- num_examples: 76496
320
- - name: validation
321
- num_bytes: 1232155262
322
- num_examples: 9951
323
- - name: test
324
- num_bytes: 1171664123
325
- num_examples: 9509
326
- download_size: 2665779500
327
- dataset_size: 11812671516
328
- - config_name: rc.web.nocontext
329
- features:
330
- - name: question
331
- dtype: string
332
- - name: question_id
333
- dtype: string
334
- - name: question_source
335
- dtype: string
336
- - name: entity_pages
337
- sequence:
338
- - name: doc_source
339
- dtype: string
340
- - name: filename
341
- dtype: string
342
- - name: title
343
- dtype: string
344
- - name: wiki_context
345
- dtype: string
346
- - name: search_results
347
- sequence:
348
- - name: description
349
- dtype: string
350
- - name: filename
351
- dtype: string
352
- - name: rank
353
- dtype: int32
354
- - name: title
355
- dtype: string
356
- - name: url
357
- dtype: string
358
- - name: search_context
359
- dtype: string
360
- - name: answer
361
- struct:
362
- - name: aliases
363
- sequence: string
364
- - name: normalized_aliases
365
- sequence: string
366
- - name: matched_wiki_entity_name
367
- dtype: string
368
- - name: normalized_matched_wiki_entity_name
369
- dtype: string
370
- - name: normalized_value
371
- dtype: string
372
- - name: type
373
- dtype: string
374
- - name: value
375
- dtype: string
376
- splits:
377
- - name: train
378
- num_bytes: 58524077
379
- num_examples: 76496
380
- - name: validation
381
- num_bytes: 7694681
382
- num_examples: 9951
383
- - name: test
384
- num_bytes: 2024871
385
- num_examples: 9509
386
- download_size: 2665779500
387
- dataset_size: 68243629
388
- - config_name: unfiltered.web
389
- features:
390
- - name: question
391
- dtype: string
392
- - name: question_id
393
- dtype: string
394
- - name: question_source
395
- dtype: string
396
- - name: entity_pages
397
- sequence:
398
- - name: doc_source
399
- dtype: string
400
- - name: filename
401
- dtype: string
402
- - name: title
403
- dtype: string
404
- - name: wiki_context
405
- dtype: string
406
- - name: search_results
407
- sequence:
408
- - name: description
409
- dtype: string
410
- - name: filename
411
- dtype: string
412
- - name: rank
413
- dtype: int32
414
- - name: title
415
- dtype: string
416
- - name: url
417
- dtype: string
418
- - name: search_context
419
- dtype: string
420
- - name: answer
421
- struct:
422
- - name: aliases
423
- sequence: string
424
- - name: normalized_aliases
425
- sequence: string
426
- - name: matched_wiki_entity_name
427
- dtype: string
428
- - name: normalized_matched_wiki_entity_name
429
- dtype: string
430
- - name: normalized_value
431
- dtype: string
432
- - name: type
433
- dtype: string
434
- - name: value
435
- dtype: string
436
- splits:
437
- - name: train
438
- - name: validation
439
- - name: test
440
- download_size: 3298328560
441
- dataset_size: 0
442
- - config_name: unfiltered.web.nocontext
443
- features:
444
- - name: question
445
- dtype: string
446
- - name: question_id
447
- dtype: string
448
- - name: question_source
449
- dtype: string
450
- - name: entity_pages
451
- sequence:
452
- - name: doc_source
453
- dtype: string
454
- - name: filename
455
- dtype: string
456
- - name: title
457
- dtype: string
458
- - name: wiki_context
459
- dtype: string
460
- - name: search_results
461
- sequence:
462
- - name: description
463
- dtype: string
464
- - name: filename
465
- dtype: string
466
- - name: rank
467
- dtype: int32
468
- - name: title
469
- dtype: string
470
- - name: url
471
- dtype: string
472
- - name: search_context
473
- dtype: string
474
- - name: answer
475
- struct:
476
- - name: aliases
477
- sequence: string
478
- - name: normalized_aliases
479
- sequence: string
480
- - name: matched_wiki_entity_name
481
- dtype: string
482
- - name: normalized_matched_wiki_entity_name
483
- dtype: string
484
- - name: normalized_value
485
- dtype: string
486
- - name: type
487
- dtype: string
488
- - name: value
489
- dtype: string
490
- splits:
491
- - name: train
492
- - name: validation
493
- - name: test
494
- download_size: 632549060
495
- dataset_size: 0
496
- - config_name: rc.wikipedia
497
- features:
498
- - name: question
499
- dtype: string
500
- - name: question_id
501
- dtype: string
502
- - name: question_source
503
- dtype: string
504
- - name: entity_pages
505
- sequence:
506
- - name: doc_source
507
- dtype: string
508
- - name: filename
509
- dtype: string
510
- - name: title
511
- dtype: string
512
- - name: wiki_context
513
- dtype: string
514
- - name: search_results
515
- sequence:
516
- - name: description
517
- dtype: string
518
- - name: filename
519
- dtype: string
520
- - name: rank
521
- dtype: int32
522
- - name: title
523
- dtype: string
524
- - name: url
525
- dtype: string
526
- - name: search_context
527
- dtype: string
528
- - name: answer
529
- struct:
530
- - name: aliases
531
- sequence: string
532
- - name: normalized_aliases
533
- sequence: string
534
- - name: matched_wiki_entity_name
535
- dtype: string
536
- - name: normalized_matched_wiki_entity_name
537
- dtype: string
538
- - name: normalized_value
539
- dtype: string
540
- - name: type
541
- dtype: string
542
- - name: value
543
- dtype: string
544
- splits:
545
- - name: train
546
- num_bytes: 3340800860
547
- num_examples: 61888
548
- - name: validation
549
- num_bytes: 430166174
550
- num_examples: 7993
551
- - name: test
552
- num_bytes: 406046628
553
- num_examples: 7701
554
- download_size: 2665779500
555
- dataset_size: 4177013662
556
- - config_name: rc.wikipedia.nocontext
557
- features:
558
- - name: question
559
- dtype: string
560
- - name: question_id
561
- dtype: string
562
- - name: question_source
563
- dtype: string
564
- - name: entity_pages
565
- sequence:
566
- - name: doc_source
567
- dtype: string
568
- - name: filename
569
- dtype: string
570
- - name: title
571
- dtype: string
572
- - name: wiki_context
573
- dtype: string
574
- - name: search_results
575
- sequence:
576
- - name: description
577
- dtype: string
578
- - name: filename
579
- dtype: string
580
- - name: rank
581
- dtype: int32
582
- - name: title
583
- dtype: string
584
- - name: url
585
- dtype: string
586
- - name: search_context
587
- dtype: string
588
- - name: answer
589
- struct:
590
- - name: aliases
591
- sequence: string
592
- - name: normalized_aliases
593
- sequence: string
594
- - name: matched_wiki_entity_name
595
- dtype: string
596
- - name: normalized_matched_wiki_entity_name
597
- dtype: string
598
- - name: normalized_value
599
- dtype: string
600
- - name: type
601
- dtype: string
602
- - name: value
603
- dtype: string
604
- splits:
605
- - name: train
606
- num_bytes: 48360513
607
- num_examples: 61888
608
- - name: validation
609
- num_bytes: 6365397
610
- num_examples: 7993
611
- - name: test
612
- num_bytes: 1643280
613
- num_examples: 7701
614
- download_size: 2665779500
615
- dataset_size: 56369190
616
- - config_name: unfiltered.wikipedia
617
- features:
618
- - name: question
619
- dtype: string
620
- - name: question_id
621
- dtype: string
622
- - name: question_source
623
- dtype: string
624
- - name: entity_pages
625
- sequence:
626
- - name: doc_source
627
- dtype: string
628
- - name: filename
629
- dtype: string
630
- - name: title
631
- dtype: string
632
- - name: wiki_context
633
- dtype: string
634
- - name: search_results
635
- sequence:
636
- - name: description
637
- dtype: string
638
- - name: filename
639
- dtype: string
640
- - name: rank
641
- dtype: int32
642
- - name: title
643
- dtype: string
644
- - name: url
645
- dtype: string
646
- - name: search_context
647
- dtype: string
648
- - name: answer
649
- struct:
650
- - name: aliases
651
- sequence: string
652
- - name: normalized_aliases
653
- sequence: string
654
- - name: matched_wiki_entity_name
655
- dtype: string
656
- - name: normalized_matched_wiki_entity_name
657
- dtype: string
658
- - name: normalized_value
659
- dtype: string
660
- - name: type
661
- dtype: string
662
- - name: value
663
- dtype: string
664
- splits:
665
- - name: train
666
- - name: validation
667
- - name: test
668
- download_size: 3298328560
669
- dataset_size: 0
670
- - config_name: unfiltered.wikipedia.nocontext
671
- features:
672
- - name: question
673
- dtype: string
674
- - name: question_id
675
- dtype: string
676
- - name: question_source
677
- dtype: string
678
- - name: entity_pages
679
- sequence:
680
- - name: doc_source
681
- dtype: string
682
- - name: filename
683
- dtype: string
684
- - name: title
685
- dtype: string
686
- - name: wiki_context
687
- dtype: string
688
- - name: search_results
689
- sequence:
690
- - name: description
691
- dtype: string
692
- - name: filename
693
- dtype: string
694
- - name: rank
695
- dtype: int32
696
- - name: title
697
- dtype: string
698
- - name: url
699
- dtype: string
700
- - name: search_context
701
- dtype: string
702
- - name: answer
703
- struct:
704
- - name: aliases
705
- sequence: string
706
- - name: normalized_aliases
707
- sequence: string
708
- - name: matched_wiki_entity_name
709
- dtype: string
710
- - name: normalized_matched_wiki_entity_name
711
- dtype: string
712
- - name: normalized_value
713
- dtype: string
714
- - name: type
715
- dtype: string
716
- - name: value
717
- dtype: string
718
- splits:
719
- - name: train
720
- - name: validation
721
- - name: test
722
- download_size: 632549060
723
- dataset_size: 0
724
- ---
725
-
726
- # Dataset Card for "trivia_qa"
727
-
728
- ## Table of Contents
729
- - [Dataset Description](#dataset-description)
730
- - [Dataset Summary](#dataset-summary)
731
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
732
- - [Languages](#languages)
733
- - [Dataset Structure](#dataset-structure)
734
- - [Data Instances](#data-instances)
735
- - [Data Fields](#data-fields)
736
- - [Data Splits](#data-splits)
737
- - [Dataset Creation](#dataset-creation)
738
- - [Curation Rationale](#curation-rationale)
739
- - [Source Data](#source-data)
740
- - [Annotations](#annotations)
741
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
742
- - [Considerations for Using the Data](#considerations-for-using-the-data)
743
- - [Social Impact of Dataset](#social-impact-of-dataset)
744
- - [Discussion of Biases](#discussion-of-biases)
745
- - [Other Known Limitations](#other-known-limitations)
746
- - [Additional Information](#additional-information)
747
- - [Dataset Curators](#dataset-curators)
748
- - [Licensing Information](#licensing-information)
749
- - [Citation Information](#citation-information)
750
- - [Contributions](#contributions)
751
-
752
- ## Dataset Description
753
-
754
- - **Homepage:** [http://nlp.cs.washington.edu/triviaqa/](http://nlp.cs.washington.edu/triviaqa/)
755
- - **Repository:** [https://github.com/mandarjoshi90/triviaqa](https://github.com/mandarjoshi90/triviaqa)
756
- - **Paper:** [TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension](https://arxiv.org/abs/1705.03551)
757
- - **Leaderboard:** [CodaLab Leaderboard](https://competitions.codalab.org/competitions/17208#results)
758
- - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
759
- - **Size of downloaded dataset files:** 8833.35 MB
760
- - **Size of the generated dataset:** 43351.32 MB
761
- - **Total amount of disk used:** 52184.66 MB
762
-
763
- ### Dataset Summary
764
-
765
- TriviaqQA is a reading comprehension dataset containing over 650K
766
- question-answer-evidence triples. TriviaqQA includes 95K question-answer
767
- pairs authored by trivia enthusiasts and independently gathered evidence
768
- documents, six per question on average, that provide high quality distant
769
- supervision for answering the questions.
770
-
771
- ### Supported Tasks and Leaderboards
772
-
773
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
774
-
775
- ### Languages
776
-
777
- English.
778
-
779
- ## Dataset Structure
780
-
781
- ### Data Instances
782
-
783
- #### rc
784
-
785
- - **Size of downloaded dataset files:** 2542.29 MB
786
- - **Size of the generated dataset:** 15275.31 MB
787
- - **Total amount of disk used:** 17817.60 MB
788
-
789
- An example of 'train' looks as follows.
790
- ```
791
-
792
- ```
793
-
794
- #### rc.nocontext
795
-
796
- - **Size of downloaded dataset files:** 2542.29 MB
797
- - **Size of the generated dataset:** 120.42 MB
798
- - **Total amount of disk used:** 2662.71 MB
799
-
800
- An example of 'train' looks as follows.
801
- ```
802
-
803
- ```
804
-
805
- #### unfiltered
806
-
807
- - **Size of downloaded dataset files:** 3145.53 MB
808
- - **Size of the generated dataset:** 27884.47 MB
809
- - **Total amount of disk used:** 31030.00 MB
810
-
811
- An example of 'validation' looks as follows.
812
- ```
813
-
814
- ```
815
-
816
- #### unfiltered.nocontext
817
-
818
- - **Size of downloaded dataset files:** 603.25 MB
819
- - **Size of the generated dataset:** 71.11 MB
820
- - **Total amount of disk used:** 674.35 MB
821
-
822
- An example of 'train' looks as follows.
823
- ```
824
-
825
- ```
826
-
827
- ### Data Fields
828
-
829
- The data fields are the same among all splits.
830
-
831
- #### rc
832
- - `question`: a `string` feature.
833
- - `question_id`: a `string` feature.
834
- - `question_source`: a `string` feature.
835
- - `entity_pages`: a dictionary feature containing:
836
- - `doc_source`: a `string` feature.
837
- - `filename`: a `string` feature.
838
- - `title`: a `string` feature.
839
- - `wiki_context`: a `string` feature.
840
- - `search_results`: a dictionary feature containing:
841
- - `description`: a `string` feature.
842
- - `filename`: a `string` feature.
843
- - `rank`: a `int32` feature.
844
- - `title`: a `string` feature.
845
- - `url`: a `string` feature.
846
- - `search_context`: a `string` feature.
847
- - `aliases`: a `list` of `string` features.
848
- - `normalized_aliases`: a `list` of `string` features.
849
- - `matched_wiki_entity_name`: a `string` feature.
850
- - `normalized_matched_wiki_entity_name`: a `string` feature.
851
- - `normalized_value`: a `string` feature.
852
- - `type`: a `string` feature.
853
- - `value`: a `string` feature.
854
-
855
- #### rc.nocontext
856
- - `question`: a `string` feature.
857
- - `question_id`: a `string` feature.
858
- - `question_source`: a `string` feature.
859
- - `entity_pages`: a dictionary feature containing:
860
- - `doc_source`: a `string` feature.
861
- - `filename`: a `string` feature.
862
- - `title`: a `string` feature.
863
- - `wiki_context`: a `string` feature.
864
- - `search_results`: a dictionary feature containing:
865
- - `description`: a `string` feature.
866
- - `filename`: a `string` feature.
867
- - `rank`: a `int32` feature.
868
- - `title`: a `string` feature.
869
- - `url`: a `string` feature.
870
- - `search_context`: a `string` feature.
871
- - `aliases`: a `list` of `string` features.
872
- - `normalized_aliases`: a `list` of `string` features.
873
- - `matched_wiki_entity_name`: a `string` feature.
874
- - `normalized_matched_wiki_entity_name`: a `string` feature.
875
- - `normalized_value`: a `string` feature.
876
- - `type`: a `string` feature.
877
- - `value`: a `string` feature.
878
-
879
- #### unfiltered
880
- - `question`: a `string` feature.
881
- - `question_id`: a `string` feature.
882
- - `question_source`: a `string` feature.
883
- - `entity_pages`: a dictionary feature containing:
884
- - `doc_source`: a `string` feature.
885
- - `filename`: a `string` feature.
886
- - `title`: a `string` feature.
887
- - `wiki_context`: a `string` feature.
888
- - `search_results`: a dictionary feature containing:
889
- - `description`: a `string` feature.
890
- - `filename`: a `string` feature.
891
- - `rank`: a `int32` feature.
892
- - `title`: a `string` feature.
893
- - `url`: a `string` feature.
894
- - `search_context`: a `string` feature.
895
- - `aliases`: a `list` of `string` features.
896
- - `normalized_aliases`: a `list` of `string` features.
897
- - `matched_wiki_entity_name`: a `string` feature.
898
- - `normalized_matched_wiki_entity_name`: a `string` feature.
899
- - `normalized_value`: a `string` feature.
900
- - `type`: a `string` feature.
901
- - `value`: a `string` feature.
902
-
903
- #### unfiltered.nocontext
904
- - `question`: a `string` feature.
905
- - `question_id`: a `string` feature.
906
- - `question_source`: a `string` feature.
907
- - `entity_pages`: a dictionary feature containing:
908
- - `doc_source`: a `string` feature.
909
- - `filename`: a `string` feature.
910
- - `title`: a `string` feature.
911
- - `wiki_context`: a `string` feature.
912
- - `search_results`: a dictionary feature containing:
913
- - `description`: a `string` feature.
914
- - `filename`: a `string` feature.
915
- - `rank`: a `int32` feature.
916
- - `title`: a `string` feature.
917
- - `url`: a `string` feature.
918
- - `search_context`: a `string` feature.
919
- - `aliases`: a `list` of `string` features.
920
- - `normalized_aliases`: a `list` of `string` features.
921
- - `matched_wiki_entity_name`: a `string` feature.
922
- - `normalized_matched_wiki_entity_name`: a `string` feature.
923
- - `normalized_value`: a `string` feature.
924
- - `type`: a `string` feature.
925
- - `value`: a `string` feature.
926
-
927
- ### Data Splits
928
-
929
- | name |train |validation|test |
930
- |--------------------|-----:|---------:|----:|
931
- |rc |138384| 18669|17210|
932
- |rc.nocontext |138384| 18669|17210|
933
- |unfiltered | 87622| 11313|10832|
934
- |unfiltered.nocontext| 87622| 11313|10832|
935
-
936
- ## Dataset Creation
937
-
938
- ### Curation Rationale
939
-
940
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
941
-
942
- ### Source Data
943
-
944
- #### Initial Data Collection and Normalization
945
-
946
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
947
-
948
- #### Who are the source language producers?
949
-
950
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
951
-
952
- ### Annotations
953
-
954
- #### Annotation process
955
-
956
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
957
-
958
- #### Who are the annotators?
959
-
960
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
961
-
962
- ### Personal and Sensitive Information
963
-
964
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
965
-
966
- ## Considerations for Using the Data
967
-
968
- ### Social Impact of Dataset
969
-
970
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
971
-
972
- ### Discussion of Biases
973
-
974
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
975
-
976
- ### Other Known Limitations
977
-
978
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
979
-
980
- ## Additional Information
981
-
982
- ### Dataset Curators
983
-
984
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
985
-
986
- ### Licensing Information
987
-
988
- The University of Washington does not own the copyright of the questions and documents included in TriviaQA.
989
-
990
- ### Citation Information
991
-
992
- ```
993
-
994
- @article{2017arXivtriviaqa,
995
- author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},
996
- Daniel and {Zettlemoyer}, Luke},
997
- title = "{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}",
998
- journal = {arXiv e-prints},
999
- year = 2017,
1000
- eid = {arXiv:1705.03551},
1001
- pages = {arXiv:1705.03551},
1002
- archivePrefix = {arXiv},
1003
- eprint = {1705.03551},
1004
- }
1005
-
1006
- ```
1007
-
1008
-
1009
- ### Contributions
1010
-
1011
- Thanks to [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten), [@lewtun](https://github.com/lewtun) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"rc": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 12749652867, "num_examples": 138384, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 1662321436, "num_examples": 17944, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 1577710751, "num_examples": 17210, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 15989685054, "size_in_bytes": 18655464554}, "rc.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 106884466, "num_examples": 138384, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 14060078, "num_examples": 17944, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 3668151, "num_examples": 17210, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 124612695, "size_in_bytes": 2790392195}, "unfiltered": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 25019623548, "num_examples": 87622, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 3038803991, "num_examples": 11313, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 2906455559, "num_examples": 10832, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}, "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 3298328560, "post_processing_size": null, "dataset_size": 30964883098, "size_in_bytes": 34263211658}, "unfiltered.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 63301342, "num_examples": 87622, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 8297118, "num_examples": 11313, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 2320908, "num_examples": 10832, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 632549060, "post_processing_size": null, "dataset_size": 73919368, "size_in_bytes": 706468428}, "rc.web": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.web", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9408852131, "num_examples": 76496, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 1232155262, "num_examples": 9951, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 1171664123, "num_examples": 9509, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 11812671516, "size_in_bytes": 14478451016}, "rc.web.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.web.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 58524077, "num_examples": 76496, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 7694681, "num_examples": 9951, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 2024871, "num_examples": 9509, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 68243629, "size_in_bytes": 2734023129}, "unfiltered.web": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.web", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}, "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 3298328560, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 3298328560}, "unfiltered.web.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.web.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 632549060, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 632549060}, "rc.wikipedia": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.wikipedia", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3340800860, "num_examples": 61888, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 430166174, "num_examples": 7993, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 406046628, "num_examples": 7701, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 4177013662, "size_in_bytes": 6842793162}, "rc.wikipedia.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "rc.wikipedia.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 48360513, "num_examples": 61888, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 6365397, "num_examples": 7993, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 1643280, "num_examples": 7701, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}}, "download_size": 2665779500, "post_processing_size": null, "dataset_size": 56369190, "size_in_bytes": 2722148690}, "unfiltered.wikipedia": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.wikipedia", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-rc.tar.gz": {"num_bytes": 2665779500, "checksum": "ef94fac6db0541e5bb5b27020d067a8b13b1c1ffc52717e836832e02aaed87b9"}, "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 3298328560, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 3298328560}, "unfiltered.wikipedia.nocontext": {"description": "TriviaqQA is a reading comprehension dataset containing over 650K\nquestion-answer-evidence triples. TriviaqQA includes 95K question-answer\npairs authored by trivia enthusiasts and independently gathered evidence\ndocuments, six per question on average, that provide high quality distant\nsupervision for answering the questions.\n", "citation": "\n@article{2017arXivtriviaqa,\n author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},\n Daniel and {Zettlemoyer}, Luke},\n title = \"{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}\",\n journal = {arXiv e-prints},\n year = 2017,\n eid = {arXiv:1705.03551},\n pages = {arXiv:1705.03551},\narchivePrefix = {arXiv},\n eprint = {1705.03551},\n}\n", "homepage": "http://nlp.cs.washington.edu/triviaqa/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "question_id": {"dtype": "string", "id": null, "_type": "Value"}, "question_source": {"dtype": "string", "id": null, "_type": "Value"}, "entity_pages": {"feature": {"doc_source": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "wiki_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "search_results": {"feature": {"description": {"dtype": "string", "id": null, "_type": "Value"}, "filename": {"dtype": "string", "id": null, "_type": "Value"}, "rank": {"dtype": "int32", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "url": {"dtype": "string", "id": null, "_type": "Value"}, "search_context": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answer": {"aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "normalized_aliases": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_matched_wiki_entity_name": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_value": {"dtype": "string", "id": null, "_type": "Value"}, "type": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "trivia_qa", "config_name": "unfiltered.wikipedia.nocontext", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "validation": {"name": "validation", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}, "test": {"name": "test", "num_bytes": 0, "num_examples": 0, "dataset_name": "trivia_qa"}}, "download_checksums": {"http://nlp.cs.washington.edu/triviaqa/data/triviaqa-unfiltered.tar.gz": {"num_bytes": 632549060, "checksum": "7f0c94207fd0a1eb7bba3d52cdfe0735c98de850b9e5aa4e75f2776ef9a531d0"}}, "download_size": 632549060, "post_processing_size": null, "dataset_size": 0, "size_in_bytes": 632549060}}
 
 
trivia_qa.py DELETED
@@ -1,329 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """TriviaQA: A Reading Comprehension Dataset."""
18
-
19
-
20
- import glob
21
- import json
22
- import os
23
-
24
- import datasets
25
-
26
-
27
- logger = datasets.logging.get_logger(__name__)
28
-
29
-
30
- _CITATION = """
31
- @article{2017arXivtriviaqa,
32
- author = {{Joshi}, Mandar and {Choi}, Eunsol and {Weld},
33
- Daniel and {Zettlemoyer}, Luke},
34
- title = "{triviaqa: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}",
35
- journal = {arXiv e-prints},
36
- year = 2017,
37
- eid = {arXiv:1705.03551},
38
- pages = {arXiv:1705.03551},
39
- archivePrefix = {arXiv},
40
- eprint = {1705.03551},
41
- }
42
- """
43
- _DOWNLOAD_URL_TMPL = "http://nlp.cs.washington.edu/triviaqa/data/triviaqa-{}.tar.gz"
44
- _WEB_EVIDENCE_DIR = "evidence/web"
45
- _WIKI_EVIDENCE_DIR = "evidence/wikipedia"
46
-
47
- _DESCRIPTION = """\
48
- TriviaqQA is a reading comprehension dataset containing over 650K
49
- question-answer-evidence triples. TriviaqQA includes 95K question-answer
50
- pairs authored by trivia enthusiasts and independently gathered evidence
51
- documents, six per question on average, that provide high quality distant
52
- supervision for answering the questions.
53
- """
54
-
55
- _RC_DESCRIPTION = """\
56
- Question-answer pairs where all documents for a given question contain the
57
- answer string(s).
58
- """
59
-
60
- _UNFILTERED_DESCRIPTION = """\
61
- 110k question-answer pairs for open domain QA where not all documents for a
62
- given question contain the answer string(s). This makes the unfiltered dataset
63
- more appropriate for IR-style QA.
64
- """
65
-
66
- _CONTEXT_ADDENDUM = "Includes context from Wikipedia and search results."
67
-
68
-
69
- def _web_evidence_dir(tmp_dir):
70
- return sorted(glob.glob(os.path.join(tmp_dir, _WEB_EVIDENCE_DIR)))
71
-
72
-
73
- def _wiki_evidence_dir(tmp_dir):
74
- return sorted(glob.glob(os.path.join(tmp_dir, _WIKI_EVIDENCE_DIR)))
75
-
76
-
77
- def _qa_files(file_paths, sources, split, unfiltered):
78
- qa_dir = (
79
- os.path.join(file_paths["unfiltered"], "triviaqa-unfiltered")
80
- if unfiltered
81
- else os.path.join(file_paths["rc"], "qa")
82
- )
83
-
84
- suffix_mapping = {
85
- datasets.Split.TRAIN: "train.json",
86
- datasets.Split.VALIDATION: "dev.json",
87
- datasets.Split.TEST: "test-without-answers.json",
88
- }
89
- suffix = suffix_mapping[split]
90
-
91
- filenames = [f"unfiltered-web-{suffix}"] if unfiltered else [f"{source}-{suffix}" for source in sources]
92
-
93
- filenames = [os.path.join(qa_dir, filename) for filename in filenames]
94
-
95
- return sorted(filenames)
96
-
97
-
98
- class TriviaQaConfig(datasets.BuilderConfig):
99
- """BuilderConfig for TriviaQA."""
100
-
101
- def __init__(self, source="all", unfiltered=False, exclude_context=False, **kwargs):
102
- """BuilderConfig for TriviaQA.
103
-
104
- Args:
105
- unfiltered: bool, whether to use the unfiltered version of the dataset,
106
- intended for open-domain QA.
107
- exclude_context: bool, whether to exclude Wikipedia and search context for
108
- reduced size.
109
- **kwargs: keyword arguments forwarded to super.
110
- """
111
- name = "unfiltered" if unfiltered else "rc"
112
-
113
- assert source in ["all", "web", "wikipedia"]
114
-
115
- # there is no unfiltered version for the wikipedia subset
116
- # --> unfiltered subset for source="all" is the same as for source="web"
117
- # --> only accept source="all" if unfiltered is True
118
- assert not unfiltered or source == "all"
119
-
120
- if source != "all":
121
- name += f".{source}"
122
-
123
- if exclude_context:
124
- name += ".nocontext"
125
- description = _UNFILTERED_DESCRIPTION if unfiltered else _RC_DESCRIPTION
126
- if not exclude_context:
127
- description += _CONTEXT_ADDENDUM
128
- super(TriviaQaConfig, self).__init__(
129
- name=name, description=description, version=datasets.Version("1.2.0"), **kwargs
130
- )
131
-
132
- self.sources = ["web", "wikipedia"] if source == "all" else [source]
133
- self.unfiltered = unfiltered
134
- self.exclude_context = exclude_context
135
-
136
-
137
- class TriviaQa(datasets.GeneratorBasedBuilder):
138
- """TriviaQA is a reading comprehension dataset.
139
-
140
- It containss over 650K question-answer-evidence triples.
141
- """
142
-
143
- BUILDER_CONFIGS = [
144
- TriviaQaConfig(source="all", unfiltered=False, exclude_context=False), # rc
145
- TriviaQaConfig(source="all", unfiltered=False, exclude_context=True), # rc.nocontext
146
- TriviaQaConfig(source="all", unfiltered=True, exclude_context=False), # unfiltered
147
- TriviaQaConfig(source="all", unfiltered=True, exclude_context=True), # unfilered.nocontext
148
- TriviaQaConfig(source="web", unfiltered=False, exclude_context=False), # rc
149
- TriviaQaConfig(source="web", unfiltered=False, exclude_context=True), # rc.nocontext
150
- TriviaQaConfig(source="wikipedia", unfiltered=False, exclude_context=False), # rc
151
- TriviaQaConfig(source="wikipedia", unfiltered=False, exclude_context=True), # rc.nocontext
152
- ]
153
- DEFAULT_WRITER_BATCH_SIZE = 1000 # examples are quite big, so set this value to save some RAM
154
-
155
- def _info(self):
156
- return datasets.DatasetInfo(
157
- description=_DESCRIPTION,
158
- features=datasets.Features(
159
- {
160
- "question": datasets.Value("string"),
161
- "question_id": datasets.Value("string"),
162
- "question_source": datasets.Value("string"),
163
- "entity_pages": datasets.features.Sequence(
164
- {
165
- "doc_source": datasets.Value("string"),
166
- "filename": datasets.Value("string"),
167
- "title": datasets.Value("string"),
168
- "wiki_context": datasets.Value("string"),
169
- }
170
- ),
171
- "search_results": datasets.features.Sequence(
172
- {
173
- "description": datasets.Value("string"),
174
- "filename": datasets.Value("string"),
175
- "rank": datasets.Value("int32"),
176
- "title": datasets.Value("string"),
177
- "url": datasets.Value("string"),
178
- "search_context": datasets.Value("string"),
179
- }
180
- ),
181
- "answer": dict(
182
- {
183
- "aliases": datasets.features.Sequence(datasets.Value("string")),
184
- "normalized_aliases": datasets.features.Sequence(datasets.Value("string")),
185
- "matched_wiki_entity_name": datasets.Value("string"),
186
- "normalized_matched_wiki_entity_name": datasets.Value("string"),
187
- "normalized_value": datasets.Value("string"),
188
- "type": datasets.Value("string"),
189
- "value": datasets.Value("string"),
190
- }
191
- ),
192
- }
193
- ),
194
- supervised_keys=None,
195
- homepage="http://nlp.cs.washington.edu/triviaqa/",
196
- citation=_CITATION,
197
- )
198
-
199
- def _split_generators(self, dl_manager):
200
- """Returns SplitGenerators."""
201
- cfg = self.config
202
- download_urls = dict()
203
- if not (cfg.unfiltered and cfg.exclude_context):
204
- download_urls["rc"] = _DOWNLOAD_URL_TMPL.format("rc")
205
- if cfg.unfiltered:
206
- download_urls["unfiltered"] = _DOWNLOAD_URL_TMPL.format("unfiltered")
207
- file_paths = dl_manager.download_and_extract(download_urls)
208
-
209
- if cfg.exclude_context:
210
- web_evidence_dir = None
211
- wiki_evidence_dir = None
212
- else:
213
- web_evidence_dir = os.path.join(file_paths["rc"], _WEB_EVIDENCE_DIR)
214
- wiki_evidence_dir = os.path.join(file_paths["rc"], _WIKI_EVIDENCE_DIR)
215
-
216
- return [
217
- datasets.SplitGenerator(
218
- name=name,
219
- gen_kwargs={
220
- "files": _qa_files(file_paths, cfg.sources, name, cfg.unfiltered),
221
- "web_dir": web_evidence_dir,
222
- "wiki_dir": wiki_evidence_dir,
223
- },
224
- )
225
- for name in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
226
- ]
227
-
228
- def _generate_examples(self, files, web_dir, wiki_dir):
229
- """This function returns the examples."""
230
-
231
- def parse_example(article):
232
- """Return a single example from an article JSON record."""
233
-
234
- def _strip(collection):
235
- return [item.strip() for item in collection]
236
-
237
- if "Answer" in article:
238
- answer = article["Answer"]
239
- answer_dict = {
240
- "aliases": _strip(answer["Aliases"]),
241
- "normalized_aliases": _strip(answer["NormalizedAliases"]),
242
- "matched_wiki_entity_name": answer.get("MatchedWikiEntryName", "").strip(),
243
- "normalized_matched_wiki_entity_name": answer.get("NormalizedMatchedWikiEntryName", "").strip(),
244
- "normalized_value": answer["NormalizedValue"].strip(),
245
- "type": answer["Type"].strip(),
246
- "value": answer["Value"].strip(),
247
- }
248
- else:
249
- answer_dict = {
250
- "aliases": [],
251
- "normalized_aliases": [],
252
- "matched_wiki_entity_name": "<unk>",
253
- "normalized_matched_wiki_entity_name": "<unk>",
254
- "normalized_value": "<unk>",
255
- "type": "",
256
- "value": "<unk>",
257
- }
258
-
259
- if self.config.exclude_context:
260
- article["SearchResults"] = []
261
- article["EntityPages"] = []
262
-
263
- def _add_context(collection, context_field, file_dir):
264
- """Adds context from file, or skips if file does not exist."""
265
- new_items = []
266
- for item in collection:
267
- if "Filename" not in item:
268
- logger.info("Missing context 'Filename', skipping.")
269
- continue
270
-
271
- new_item = item.copy()
272
- fname = item["Filename"]
273
- try:
274
- with open(os.path.join(file_dir, fname), encoding="utf-8") as f:
275
- new_item[context_field] = f.read()
276
- except (IOError, FileNotFoundError):
277
- logger.info("File does not exist, skipping: %s", fname)
278
- continue
279
- new_items.append(new_item)
280
- return new_items
281
-
282
- def _strip_if_str(v):
283
- return v.strip() if isinstance(v, str) else v
284
-
285
- def _transpose_and_strip_dicts(dicts, field_names):
286
- return {
287
- datasets.naming.camelcase_to_snakecase(k): [_strip_if_str(d[k]) for d in dicts]
288
- for k in field_names
289
- }
290
-
291
- search_results = _transpose_and_strip_dicts(
292
- _add_context(article.get("SearchResults", []), "SearchContext", web_dir),
293
- ["Description", "Filename", "Rank", "Title", "Url", "SearchContext"],
294
- )
295
-
296
- entity_pages = _transpose_and_strip_dicts(
297
- _add_context(article.get("EntityPages", []), "WikiContext", wiki_dir),
298
- ["DocSource", "Filename", "Title", "WikiContext"],
299
- )
300
-
301
- question = article["Question"].strip()
302
- question_id = article["QuestionId"]
303
- question_source = article["QuestionSource"].strip()
304
-
305
- return {
306
- "entity_pages": entity_pages,
307
- "search_results": search_results,
308
- "question": question,
309
- "question_id": question_id,
310
- "question_source": question_source,
311
- "answer": answer_dict,
312
- }
313
-
314
- for filepath in files:
315
- logger.info("generating examples from = %s", filepath)
316
- fname = os.path.basename(filepath)
317
-
318
- with open(filepath, encoding="utf-8") as f:
319
- current_record = ""
320
- for line in f:
321
- if line == " {\n":
322
- current_record = line
323
- elif line.startswith(" }"): # Handles final record as well.
324
- article = json.loads(current_record + "}")
325
- current_record = ""
326
- example = parse_example(article)
327
- yield "%s_%s" % (fname, example["question_id"]), example
328
- else:
329
- current_record += line
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
unfiltered.nocontext/trivia_qa-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2718527bc46d9a2ff70d3bb173e2987a4bffc07f0d3d8f0a6b51c4dde91843b0
3
+ size 762213
unfiltered.nocontext/trivia_qa-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd79c63b9a37e7af3f56500b1a2ab2eb76953ec9109ac6fbd544d6d43fb25a8a
3
+ size 33210864
unfiltered.nocontext/trivia_qa-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff4582f84a263a27a86941abd8e64551933ef3a05f88b08fda8acc76490be7f6
3
+ size 4390956