Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
cdb5c3d
1 Parent(s): 944ee2a

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (5e5f553e977ee62568debf772272d382e54a360f)
- Delete loading script (5611843f4bc8fccc5c6d97eafb5845741900d120)

README.md CHANGED
@@ -22,6 +22,7 @@ task_ids:
22
  paperswithcode_id: snli
23
  pretty_name: Stanford Natural Language Inference
24
  dataset_info:
 
25
  features:
26
  - name: premise
27
  dtype: string
@@ -34,19 +35,27 @@ dataset_info:
34
  '0': entailment
35
  '1': neutral
36
  '2': contradiction
37
- config_name: plain_text
38
  splits:
39
  - name: test
40
- num_bytes: 1263912
41
  num_examples: 10000
42
- - name: train
43
- num_bytes: 66159510
44
- num_examples: 550152
45
  - name: validation
46
- num_bytes: 1268044
47
  num_examples: 10000
48
- download_size: 94550081
49
- dataset_size: 68691466
 
 
 
 
 
 
 
 
 
 
 
 
50
  ---
51
  # Dataset Card for SNLI
52
 
 
22
  paperswithcode_id: snli
23
  pretty_name: Stanford Natural Language Inference
24
  dataset_info:
25
+ config_name: plain_text
26
  features:
27
  - name: premise
28
  dtype: string
 
35
  '0': entailment
36
  '1': neutral
37
  '2': contradiction
 
38
  splits:
39
  - name: test
40
+ num_bytes: 1258904
41
  num_examples: 10000
 
 
 
42
  - name: validation
43
+ num_bytes: 1263036
44
  num_examples: 10000
45
+ - name: train
46
+ num_bytes: 65884386
47
+ num_examples: 550152
48
+ download_size: 20439300
49
+ dataset_size: 68406326
50
+ configs:
51
+ - config_name: plain_text
52
+ data_files:
53
+ - split: test
54
+ path: plain_text/test-*
55
+ - split: validation
56
+ path: plain_text/validation-*
57
+ - split: train
58
+ path: plain_text/train-*
59
  ---
60
  # Dataset Card for SNLI
61
 
plain_text/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4696deda851c4d2385f26b58f2e13f9ed9f08ea7b42a3f4c2b97a9d08448878c
3
+ size 411531
plain_text/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef9a7b25d97390a62aeda7abe26aec8640600f50b818eaeb9107097d60ac6620
3
+ size 19614612
plain_text/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00f5ed8deaed007fef3022f0215b287efbab815b1bb31ac3f3ff4f4129d41ffe
3
+ size 413157
snli.py DELETED
@@ -1,110 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The Stanford Natural Language Inference (SNLI) Corpus."""
18
-
19
-
20
- import csv
21
- import os
22
-
23
- import datasets
24
-
25
-
26
- _CITATION = """\
27
- @inproceedings{bowman-etal-2015-large,
28
- title = "A large annotated corpus for learning natural language inference",
29
- author = "Bowman, Samuel R. and
30
- Angeli, Gabor and
31
- Potts, Christopher and
32
- Manning, Christopher D.",
33
- editor = "M{\\`a}rquez, Llu{\\'\\i}s and
34
- Callison-Burch, Chris and
35
- Su, Jian",
36
- booktitle = "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing",
37
- month = sep,
38
- year = "2015",
39
- address = "Lisbon, Portugal",
40
- publisher = "Association for Computational Linguistics",
41
- url = "https://aclanthology.org/D15-1075",
42
- doi = "10.18653/v1/D15-1075",
43
- pages = "632--642",
44
- }
45
- """
46
-
47
- _DESCRIPTION = """\
48
- The SNLI corpus (version 1.0) is a collection of 570k human-written English
49
- sentence pairs manually labeled for balanced classification with the labels
50
- entailment, contradiction, and neutral, supporting the task of natural language
51
- inference (NLI), also known as recognizing textual entailment (RTE).
52
- """
53
-
54
- _DATA_URL = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
55
-
56
-
57
- class Snli(datasets.GeneratorBasedBuilder):
58
- """The Stanford Natural Language Inference (SNLI) Corpus."""
59
-
60
- BUILDER_CONFIGS = [
61
- datasets.BuilderConfig(
62
- name="plain_text",
63
- version=datasets.Version("1.0.0", ""),
64
- description="Plain text import of SNLI",
65
- )
66
- ]
67
-
68
- def _info(self):
69
- return datasets.DatasetInfo(
70
- description=_DESCRIPTION,
71
- features=datasets.Features(
72
- {
73
- "premise": datasets.Value("string"),
74
- "hypothesis": datasets.Value("string"),
75
- "label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
76
- }
77
- ),
78
- # No default supervised_keys (as we have to pass both premise
79
- # and hypothesis as input).
80
- supervised_keys=None,
81
- homepage="https://nlp.stanford.edu/projects/snli/",
82
- citation=_CITATION,
83
- )
84
-
85
- def _split_generators(self, dl_manager):
86
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
87
- data_dir = os.path.join(dl_dir, "snli_1.0")
88
- return [
89
- datasets.SplitGenerator(
90
- name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_test.txt")}
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_dev.txt")}
94
- ),
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_train.txt")}
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, filepath):
101
- """This function returns the examples in the raw (text) form."""
102
- with open(filepath, encoding="utf-8") as f:
103
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
104
- for idx, row in enumerate(reader):
105
- label = -1 if row["gold_label"] == "-" else row["gold_label"]
106
- yield idx, {
107
- "premise": row["sentence1"],
108
- "hypothesis": row["sentence2"],
109
- "label": label,
110
- }