Datasets:
Tasks:
Text2Text Generation
Modalities:
Text
Formats:
parquet
Languages:
English
Size:
1K - 10K
ArXiv:
Tags:
text-to-sql
License:
Commit
•
fbb01a4
1
Parent(s):
6232cc3
Convert dataset to Parquet (#3)
Browse files- Convert dataset to Parquet (3374e30baf2f6bfa282b151d52baeecbf633922a)
- Delete loading script (9757161cdf9ffa29b596e3bdf890358a0bd7249f)
- Delete legacy dataset_infos.json (345fa1a33572e51552bac830528b864fea42133e)
- Delete data folder (50a5a6862dcf2332a7c6781199dd631309361d42)
- README.md +10 -2
- dataset_infos.json +0 -1
- spider.py +0 -109
- data/spider.zip → spider/train-00000-of-00001.parquet +2 -2
- spider/validation-00000-of-00001.parquet +3 -0
README.md
CHANGED
@@ -22,6 +22,7 @@ pretty_name: Spider
|
|
22 |
tags:
|
23 |
- text-to-sql
|
24 |
dataset_info:
|
|
|
25 |
features:
|
26 |
- name: db_id
|
27 |
dtype: string
|
@@ -35,7 +36,6 @@ dataset_info:
|
|
35 |
sequence: string
|
36 |
- name: question_toks
|
37 |
sequence: string
|
38 |
-
config_name: spider
|
39 |
splits:
|
40 |
- name: train
|
41 |
num_bytes: 4743786
|
@@ -43,8 +43,16 @@ dataset_info:
|
|
43 |
- name: validation
|
44 |
num_bytes: 682090
|
45 |
num_examples: 1034
|
46 |
-
download_size:
|
47 |
dataset_size: 5425876
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
---
|
49 |
|
50 |
|
|
|
22 |
tags:
|
23 |
- text-to-sql
|
24 |
dataset_info:
|
25 |
+
config_name: spider
|
26 |
features:
|
27 |
- name: db_id
|
28 |
dtype: string
|
|
|
36 |
sequence: string
|
37 |
- name: question_toks
|
38 |
sequence: string
|
|
|
39 |
splits:
|
40 |
- name: train
|
41 |
num_bytes: 4743786
|
|
|
43 |
- name: validation
|
44 |
num_bytes: 682090
|
45 |
num_examples: 1034
|
46 |
+
download_size: 957246
|
47 |
dataset_size: 5425876
|
48 |
+
configs:
|
49 |
+
- config_name: spider
|
50 |
+
data_files:
|
51 |
+
- split: train
|
52 |
+
path: spider/train-*
|
53 |
+
- split: validation
|
54 |
+
path: spider/validation-*
|
55 |
+
default: true
|
56 |
---
|
57 |
|
58 |
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"spider": {"description": "Spider is a large-scale complex and cross-domain semantic parsing and text-toSQL dataset annotated by 11 college students\n", "citation": "@article{yu2018spider,\n title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task},\n author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others},\n journal={arXiv preprint arXiv:1809.08887},\n year={2018}\n}\n", "homepage": "https://yale-lily.github.io/spider", "license": "CC BY-SA 4.0", "features": {"db_id": {"dtype": "string", "id": null, "_type": "Value"}, "query": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "query_toks": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "query_toks_no_value": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "question_toks": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "spider", "config_name": "spider", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4743786, "num_examples": 7000, "dataset_name": "spider"}, "validation": {"name": "validation", "num_bytes": 682090, "num_examples": 1034, "dataset_name": "spider"}}, "download_checksums": {"https://huggingface.co/datasets/spider/resolve/main/data/spider.zip": {"num_bytes": 99736136, "checksum": "5ddff97bb1d421282c593e8d30ce0ce107270f4dd4a21d60eba4bf287d5956b1"}}, "download_size": 99736136, "post_processing_size": null, "dataset_size": 5425876, "size_in_bytes": 105162012}}
|
|
|
|
spider.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks"""
|
16 |
-
|
17 |
-
|
18 |
-
import json
|
19 |
-
import os
|
20 |
-
|
21 |
-
import datasets
|
22 |
-
|
23 |
-
|
24 |
-
logger = datasets.logging.get_logger(__name__)
|
25 |
-
|
26 |
-
|
27 |
-
_CITATION = """\
|
28 |
-
@article{yu2018spider,
|
29 |
-
title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task},
|
30 |
-
author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others},
|
31 |
-
journal={arXiv preprint arXiv:1809.08887},
|
32 |
-
year={2018}
|
33 |
-
}
|
34 |
-
"""
|
35 |
-
|
36 |
-
_DESCRIPTION = """\
|
37 |
-
Spider is a large-scale complex and cross-domain semantic parsing and text-toSQL dataset annotated by 11 college students
|
38 |
-
"""
|
39 |
-
|
40 |
-
_HOMEPAGE = "https://yale-lily.github.io/spider"
|
41 |
-
|
42 |
-
_LICENSE = "CC BY-SA 4.0"
|
43 |
-
|
44 |
-
_URL = "https://huggingface.co/datasets/spider/resolve/main/data/spider.zip"
|
45 |
-
|
46 |
-
|
47 |
-
class Spider(datasets.GeneratorBasedBuilder):
|
48 |
-
VERSION = datasets.Version("1.0.0")
|
49 |
-
|
50 |
-
BUILDER_CONFIGS = [
|
51 |
-
datasets.BuilderConfig(
|
52 |
-
name="spider",
|
53 |
-
version=VERSION,
|
54 |
-
description="Spider: A Large-Scale Human-Labeled Dataset for Text-to-SQL Tasks",
|
55 |
-
),
|
56 |
-
]
|
57 |
-
|
58 |
-
def _info(self):
|
59 |
-
features = datasets.Features(
|
60 |
-
{
|
61 |
-
"db_id": datasets.Value("string"),
|
62 |
-
"query": datasets.Value("string"),
|
63 |
-
"question": datasets.Value("string"),
|
64 |
-
"query_toks": datasets.features.Sequence(datasets.Value("string")),
|
65 |
-
"query_toks_no_value": datasets.features.Sequence(datasets.Value("string")),
|
66 |
-
"question_toks": datasets.features.Sequence(datasets.Value("string")),
|
67 |
-
}
|
68 |
-
)
|
69 |
-
return datasets.DatasetInfo(
|
70 |
-
description=_DESCRIPTION,
|
71 |
-
features=features,
|
72 |
-
supervised_keys=None,
|
73 |
-
homepage=_HOMEPAGE,
|
74 |
-
license=_LICENSE,
|
75 |
-
citation=_CITATION,
|
76 |
-
)
|
77 |
-
|
78 |
-
def _split_generators(self, dl_manager):
|
79 |
-
downloaded_filepath = dl_manager.download_and_extract(_URL)
|
80 |
-
|
81 |
-
return [
|
82 |
-
datasets.SplitGenerator(
|
83 |
-
name=datasets.Split.TRAIN,
|
84 |
-
gen_kwargs={
|
85 |
-
"data_filepath": os.path.join(downloaded_filepath, "spider/train_spider.json"),
|
86 |
-
},
|
87 |
-
),
|
88 |
-
datasets.SplitGenerator(
|
89 |
-
name=datasets.Split.VALIDATION,
|
90 |
-
gen_kwargs={
|
91 |
-
"data_filepath": os.path.join(downloaded_filepath, "spider/dev.json"),
|
92 |
-
},
|
93 |
-
),
|
94 |
-
]
|
95 |
-
|
96 |
-
def _generate_examples(self, data_filepath):
|
97 |
-
"""This function returns the examples in the raw (text) form."""
|
98 |
-
logger.info("generating examples from = %s", data_filepath)
|
99 |
-
with open(data_filepath, encoding="utf-8") as f:
|
100 |
-
spider = json.load(f)
|
101 |
-
for idx, sample in enumerate(spider):
|
102 |
-
yield idx, {
|
103 |
-
"db_id": sample["db_id"],
|
104 |
-
"query": sample["query"],
|
105 |
-
"question": sample["question"],
|
106 |
-
"query_toks": sample["query_toks"],
|
107 |
-
"query_toks_no_value": sample["query_toks_no_value"],
|
108 |
-
"question_toks": sample["question_toks"],
|
109 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/spider.zip → spider/train-00000-of-00001.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb4b681558f6f8f428e516fb94c5a1cb19c5a0a0c153c0618c8cc4a28115d4cb
|
3 |
+
size 831359
|
spider/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c3e2a46303899a2d4afe3f6a3a62e59f8d589f241b3cbfb52356479b1f054888
|
3 |
+
size 125887
|