Update dataset_infos.json
Browse files- dataset_infos.json +39 -1
dataset_infos.json
CHANGED
@@ -1 +1,39 @@
|
|
1 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Team-PIXEL/rendered-bookcorpus": {
|
3 |
+
"description": "This dataset is a version of the BookCorpus available at [https://huggingface.co/datasets/bookcorpusopen](https://huggingface.co/datasets/bookcorpusopen) with examples rendered as images with resolution 16x8464 pixels. The original BookCorpus was introduced by Zhu et al. (2015) in ['Aligning Books and Movies: Towards Story-Like Visual Explanations by Watching Movies and Reading Books'](https://arxiv.org/abs/1506.06724) and contains 17868 dataset items (books) of various genres. The rendered BookCorpus was used to train the PIXEL model introduced in the paper 'Language Modelling with Pixels' by Rust et al. (2022).",
|
4 |
+
"citation": "@article{rust-etal-2022-pixel,\ntitle={Language Modelling with Pixels},\nauthor={Phillip Rust and Jonas F. Lotz and Emanuele Bugliarello and Elizabeth Salesky and Miryam de Lhoneux and Desmond Elliott},\njournal={arXiv preprint},\nyear={2022},\nurl={https://arxiv.org/abs/2207.06991}\n}",
|
5 |
+
"homepage": "https://github.com/xplip/pixel",
|
6 |
+
"license": "",
|
7 |
+
"features": {
|
8 |
+
"pixel_values": {
|
9 |
+
"decode": true,
|
10 |
+
"id": null,
|
11 |
+
"_type": "Image"
|
12 |
+
},
|
13 |
+
"num_patches": {
|
14 |
+
"dtype": "int64",
|
15 |
+
"id": null,
|
16 |
+
"_type": "Value"
|
17 |
+
}
|
18 |
+
},
|
19 |
+
"post_processed": null,
|
20 |
+
"supervised_keys": null,
|
21 |
+
"task_templates": null,
|
22 |
+
"builder_name": null,
|
23 |
+
"config_name": null,
|
24 |
+
"version": null,
|
25 |
+
"splits": {
|
26 |
+
"train": {
|
27 |
+
"name": "train",
|
28 |
+
"num_bytes": 63586787923,
|
29 |
+
"num_examples": 5400000,
|
30 |
+
"dataset_name": "rendered-bookcorpus"
|
31 |
+
}
|
32 |
+
},
|
33 |
+
"download_checksums": null,
|
34 |
+
"download_size": 63578266746,
|
35 |
+
"post_processing_size": null,
|
36 |
+
"dataset_size": 63586787923,
|
37 |
+
"size_in_bytes": 127165054669
|
38 |
+
}
|
39 |
+
}
|