wiki40b / README.md
mariosasko's picture
Upload bg subset
4e14e4e verified
|
raw
history blame
15 kB
---
language:
- en
paperswithcode_id: wiki-40b
pretty_name: Wiki-40B
dataset_info:
- config_name: ar
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 773508885
num_examples: 220885
- name: validation
num_bytes: 44102674
num_examples: 12198
- name: test
num_bytes: 43755879
num_examples: 12271
download_size: 413683528
dataset_size: 861367438
- config_name: bg
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 1413477231
num_examples: 130670
- name: validation
num_bytes: 78976448
num_examples: 7259
- name: test
num_bytes: 78350414
num_examples: 7289
download_size: 484828696
dataset_size: 1570804093
- config_name: de
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 4988094268
num_examples: 1554910
- name: validation
num_bytes: 278101948
num_examples: 86068
- name: test
num_bytes: 278024815
num_examples: 86594
download_size: 3174352286
dataset_size: 5544221031
- config_name: en
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 9423468036
num_examples: 2926536
- name: validation
num_bytes: 527374301
num_examples: 163597
- name: test
num_bytes: 522210646
num_examples: 162274
download_size: 6183831905
dataset_size: 10473052983
- config_name: es
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 2906242601
num_examples: 872541
- name: validation
num_bytes: 161381260
num_examples: 48592
- name: test
num_bytes: 164110964
num_examples: 48764
download_size: 1783120767
dataset_size: 3231734825
- config_name: fr
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 3850031120
num_examples: 1227206
- name: validation
num_bytes: 216405364
num_examples: 68655
- name: test
num_bytes: 215243874
num_examples: 68004
download_size: 2246390244
dataset_size: 4281680358
- config_name: it
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 1998187938
num_examples: 732609
- name: validation
num_bytes: 109399796
num_examples: 40684
- name: test
num_bytes: 108160871
num_examples: 40443
download_size: 1330554944
dataset_size: 2215748605
- config_name: ja
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 7719156890
num_examples: 745392
- name: validation
num_bytes: 423396781
num_examples: 41576
- name: test
num_bytes: 424775191
num_examples: 41268
download_size: 2914923230
dataset_size: 8567328862
- config_name: ko
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 1424423053
num_examples: 194977
- name: validation
num_bytes: 79027067
num_examples: 10805
- name: test
num_bytes: 78623281
num_examples: 10802
download_size: 568560655
dataset_size: 1582073401
- config_name: nl
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 906908479
num_examples: 447555
- name: validation
num_bytes: 51519150
num_examples: 25201
- name: test
num_bytes: 49492508
num_examples: 24776
download_size: 594312303
dataset_size: 1007920137
- config_name: pl
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 1250270240
num_examples: 505191
- name: validation
num_bytes: 70048390
num_examples: 28310
- name: test
num_bytes: 69957343
num_examples: 27987
download_size: 755556434
dataset_size: 1390275973
- config_name: pt
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 1186541609
num_examples: 406507
- name: validation
num_bytes: 65911750
num_examples: 22301
- name: test
num_bytes: 65941634
num_examples: 22693
download_size: 725984914
dataset_size: 1318394993
- config_name: ru
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 14041955183
num_examples: 926037
- name: validation
num_bytes: 787569099
num_examples: 51287
- name: test
num_bytes: 782630173
num_examples: 51885
download_size: 4959684748
dataset_size: 15612154455
- config_name: th
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 1167742322
num_examples: 56798
- name: validation
num_bytes: 58604863
num_examples: 3093
- name: test
num_bytes: 63235795
num_examples: 3114
download_size: 286569412
dataset_size: 1289582980
- config_name: tr
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 417796625
num_examples: 142576
- name: validation
num_bytes: 23829728
num_examples: 7845
- name: test
num_bytes: 23573543
num_examples: 7890
download_size: 208571967
dataset_size: 465199896
- config_name: zh-cn
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 902812807
num_examples: 549672
- name: validation
num_bytes: 50487729
num_examples: 30299
- name: test
num_bytes: 49584239
num_examples: 30355
download_size: 667605463
dataset_size: 1002884775
- config_name: zh-tw
features:
- name: wikidata_id
dtype: string
- name: text
dtype: string
- name: version_id
dtype: string
splits:
- name: train
num_bytes: 3254625339
num_examples: 552031
- name: validation
num_bytes: 185024571
num_examples: 30739
- name: test
num_bytes: 181148137
num_examples: 30670
download_size: 1375185673
dataset_size: 3620798047
configs:
- config_name: ar
data_files:
- split: train
path: ar/train-*
- split: validation
path: ar/validation-*
- split: test
path: ar/test-*
- config_name: bg
data_files:
- split: train
path: bg/train-*
- split: validation
path: bg/validation-*
- split: test
path: bg/test-*
- config_name: de
data_files:
- split: train
path: de/train-*
- split: validation
path: de/validation-*
- split: test
path: de/test-*
- config_name: en
data_files:
- split: train
path: en/train-*
- split: validation
path: en/validation-*
- split: test
path: en/test-*
- config_name: es
data_files:
- split: train
path: es/train-*
- split: validation
path: es/validation-*
- split: test
path: es/test-*
- config_name: fr
data_files:
- split: train
path: fr/train-*
- split: validation
path: fr/validation-*
- split: test
path: fr/test-*
- config_name: it
data_files:
- split: train
path: it/train-*
- split: validation
path: it/validation-*
- split: test
path: it/test-*
- config_name: ja
data_files:
- split: train
path: ja/train-*
- split: validation
path: ja/validation-*
- split: test
path: ja/test-*
- config_name: ko
data_files:
- split: train
path: ko/train-*
- split: validation
path: ko/validation-*
- split: test
path: ko/test-*
- config_name: nl
data_files:
- split: train
path: nl/train-*
- split: validation
path: nl/validation-*
- split: test
path: nl/test-*
- config_name: pl
data_files:
- split: train
path: pl/train-*
- split: validation
path: pl/validation-*
- split: test
path: pl/test-*
- config_name: pt
data_files:
- split: train
path: pt/train-*
- split: validation
path: pt/validation-*
- split: test
path: pt/test-*
- config_name: ru
data_files:
- split: train
path: ru/train-*
- split: validation
path: ru/validation-*
- split: test
path: ru/test-*
- config_name: th
data_files:
- split: train
path: th/train-*
- split: validation
path: th/validation-*
- split: test
path: th/test-*
- config_name: tr
data_files:
- split: train
path: tr/train-*
- split: validation
path: tr/validation-*
- split: test
path: tr/test-*
- config_name: zh-cn
data_files:
- split: train
path: zh-cn/train-*
- split: validation
path: zh-cn/validation-*
- split: test
path: zh-cn/test-*
- config_name: zh-tw
data_files:
- split: train
path: zh-tw/train-*
- split: validation
path: zh-tw/validation-*
- split: test
path: zh-tw/test-*
---
# Dataset Card for "wiki40b"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://research.google/pubs/pub49029/](https://research.google/pubs/pub49029/)
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 0.00 MB
- **Size of the generated dataset:** 10.47 GB
- **Total amount of disk used:** 10.47 GB
### Dataset Summary
Clean-up text for 40+ Wikipedia languages editions of pages
correspond to entities. The datasets have train/dev/test splits per language.
The dataset is cleaned up by page filtering to remove disambiguation pages,
redirect pages, deleted pages, and non-entity pages. Each example contains the
wikidata id of the entity, and the full Wikipedia article after page processing
that removes non-content sections and structured objects.
### Supported Tasks and Leaderboards
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Languages
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Dataset Structure
### Data Instances
#### en
- **Size of downloaded dataset files:** 0.00 MB
- **Size of the generated dataset:** 10.47 GB
- **Total amount of disk used:** 10.47 GB
An example of 'train' looks as follows.
```
```
### Data Fields
The data fields are the same among all splits.
#### en
- `wikidata_id`: a `string` feature.
- `text`: a `string` feature.
- `version_id`: a `string` feature.
### Data Splits
|name| train |validation| test |
|----|------:|---------:|-----:|
|en |2926536| 163597|162274|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Licensing Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Citation Information
```
```
### Contributions
Thanks to [@jplu](https://github.com/jplu), [@patrickvonplaten](https://github.com/patrickvonplaten), [@thomwolf](https://github.com/thomwolf), [@albertvillanova](https://github.com/albertvillanova), [@lhoestq](https://github.com/lhoestq) for adding this dataset.