File size: 786 Bytes
a979f85 7aa7b86 0c35249 2c2dcc4 0c35249 2c2dcc4 b15fad2 0c35249 7aa7b86 0c35249 b15fad2 0c35249 b15fad2 7aa7b86 0c35249 a979f85 69d0f36 ce2b702 69d0f36 ce2b702 69d0f36 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
---
license: cc-by-sa-4.0
dataset_info:
features:
- name: id
dtype: int64
- name: title
dtype: string
- name: summary
dtype: string
- name: text
dtype: string
- name: categories
sequence: string
splits:
- name: train
num_bytes: 447696713.49705654
num_examples: 67573
- name: test
num_bytes: 49749968.50294345
num_examples: 7509
download_size: 298225345
dataset_size: 497446682.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
Creates a pages dataset using Wikipedia.
Explores the 40 root categories and their sub-categories to collect pages. The produced dataset provides up to 2000 pages per category.
See https://github.com/tarekziade/mwcat
|