metadata
language:
- en
license: cc-by-sa-3.0
size_categories:
- n<1K
- 1K<n<10K
- 10K<n<100K
dataset_info:
- config_name: '100'
features:
- name: id
dtype: string
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: partial
num_bytes: 98031
num_examples: 100
- name: full
num_bytes: 315241.0851032817
num_examples: 100
download_size: 839250
dataset_size: 413272.0851032817
- config_name: 100k
features:
- name: id
dtype: string
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: partial
num_bytes: 102100446
num_examples: 100000
- name: full
num_bytes: 315241085.10328174
num_examples: 100000
download_size: 830226372
dataset_size: 417341531.10328174
- config_name: 10k
features:
- name: id
dtype: string
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: partial
num_bytes: 10221068
num_examples: 10000
- name: full
num_bytes: 31524108.51032817
num_examples: 10000
download_size: 83501027
dataset_size: 41745176.51032817
- config_name: 1k
features:
- name: id
dtype: string
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: partial
num_bytes: 1007863
num_examples: 1000
- name: full
num_bytes: 3152410.8510328173
num_examples: 1000
download_size: 8616768
dataset_size: 4160273.8510328177
- config_name: 50k
features:
- name: id
dtype: string
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: partial
num_bytes: 51054035
num_examples: 50000
- name: full
num_bytes: 157620542.55164087
num_examples: 50000
download_size: 413753517
dataset_size: 208674577.55164087
- config_name: 5k
features:
- name: id
dtype: string
- name: url
dtype: string
- name: title
dtype: string
- name: text
dtype: string
splits:
- name: partial
num_bytes: 5082253
num_examples: 5000
- name: full
num_bytes: 15762054.255164085
num_examples: 5000
download_size: 41631926
dataset_size: 20844307.255164087
configs:
- config_name: '100'
data_files:
- split: full
path: 100/full-*
- split: partial
path: 100/partial-*
- config_name: 100k
data_files:
- split: full
path: 100k/full-*
- split: partial
path: 100k/partial-*
- config_name: 10k
data_files:
- split: full
path: 10k/full-*
- split: partial
path: 10k/partial-*
- config_name: 1k
data_files:
- split: full
path: 1k/full-*
- split: partial
path: 1k/partial-*
- config_name: 50k
data_files:
- split: full
path: 50k/full-*
- split: partial
path: 50k/partial-*
- config_name: 5k
data_files:
- split: full
path: 5k/full-*
- split: partial
path: 5k/partial-*
mini_wiki
This is a sampled version of the wikimedia/wikipedia
dataset. This repository provides scripts to generate the 100, 1k, 5k, 10k, 50k, 100k samples of the dataset, based on the "20231101.en"
version.
Usage
There are two possible splits: full
, which loads the entire article, or partial
, which is a version that only contains the first 200 words of each article. It is recommended to use partial if you are performing retrieval and are only interested in the first paragraphs of the Wikipedia dataset.
Now, to run:
from datasets import load_dataset
# Load the 100-sample, full version of the dataset:
data = load_dataset('xhluca/mini_wiki', name="100", split="full")
print(data)
# Load partial version with 1k, 5k, 10k, 50k, 100k samples
data = load_dataset('xhluca/mini_wiki', name="1k", split="partial")
data = load_dataset('xhluca/mini_wiki', name="5k", split="partial")
data = load_dataset('xhluca/mini_wiki', name="10k", split="partial")
data = load_dataset('xhluca/mini_wiki', name="50k", split="partial")
data = load_dataset('xhluca/mini_wiki', name="100k", split="partial")