Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:

Convert dataset to Parquet

#2
by albertvillanova HF staff - opened
README.md CHANGED
@@ -29,13 +29,20 @@ dataset_info:
29
  sequence: string
30
  splits:
31
  - name: validation
32
- num_bytes: 379991
33
  num_examples: 755
34
  - name: test
35
- num_bytes: 379711
36
  num_examples: 748
37
- download_size: 731111
38
- dataset_size: 759702
 
 
 
 
 
 
 
39
  ---
40
 
41
  # Dataset Card for JFLEG
 
29
  sequence: string
30
  splits:
31
  - name: validation
32
+ num_bytes: 379979
33
  num_examples: 755
34
  - name: test
35
+ num_bytes: 379699
36
  num_examples: 748
37
+ download_size: 289093
38
+ dataset_size: 759678
39
+ configs:
40
+ - config_name: default
41
+ data_files:
42
+ - split: validation
43
+ path: data/validation-*
44
+ - split: test
45
+ path: data/test-*
46
  ---
47
 
48
  # Dataset Card for JFLEG
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3e5328a79aebb6fd559d19291c055824ed1040c5038731b3449e0e9efb1d59b
3
+ size 141012
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:102d7bd261223c00e2a39f0dd239857de9e5f91f94ae2aefbdd50a38ac6fddf8
3
+ size 148081
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "JFLEG (JHU FLuency-Extended GUG) is an English grammatical error correction (GEC) corpus.\nIt is a gold standard benchmark for developing and evaluating GEC systems with respect to\nfluency (extent to which a text is native-sounding) as well as grammaticality.\n\nFor each source document, there are four human-written corrections (ref0 to ref3).\n", "citation": "@InProceedings{napoles-sakaguchi-tetreault:2017:EACLshort,\n author = {Napoles, Courtney\n and Sakaguchi, Keisuke\n and Tetreault, Joel},\n title = {JFLEG: A Fluency Corpus and Benchmark for Grammatical Error Correction},\n booktitle = {Proceedings of the 15th Conference of the European Chapter of the\n Association for Computational Linguistics: Volume 2, Short Papers},\n month = {April},\n year = {2017},\n address = {Valencia, Spain},\n publisher = {Association for Computational Linguistics},\n pages = {229--234},\n url = {http://www.aclweb.org/anthology/E17-2037}\n}\n@InProceedings{heilman-EtAl:2014:P14-2,\n author = {Heilman, Michael\n and Cahill, Aoife\n and Madnani, Nitin\n and Lopez, Melissa\n and Mulholland, Matthew\n and Tetreault, Joel},\n title = {Predicting Grammaticality on an Ordinal Scale},\n booktitle = {Proceedings of the 52nd Annual Meeting of the\n Association for Computational Linguistics (Volume 2: Short Papers)},\n month = {June},\n year = {2014},\n address = {Baltimore, Maryland},\n publisher = {Association for Computational Linguistics},\n pages = {174--180},\n url = {http://www.aclweb.org/anthology/P14-2029}\n}\n", "homepage": "https://github.com/keisks/jfleg", "license": "CC BY-NC-SA 4.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "corrections": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "jfleg", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 379991, "num_examples": 755, "dataset_name": "jfleg"}, "test": {"name": "test", "num_bytes": 379711, "num_examples": 748, "dataset_name": "jfleg"}}, "download_checksums": {"https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.src": {"num_bytes": 72726, "checksum": "4a0e8b86d18a1058460ff0a592dac1ba68986d135256efbd27e997ac43f295f8"}, "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.ref0": {"num_bytes": 73216, "checksum": "adea6287c6e2240b7777e63cd56f8e228e742bbfb42c5152bc0bd2bc91f4e53e"}, "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.ref1": {"num_bytes": 73129, "checksum": "d40d56ec7468ddab03fdcca97065ab3f9d391d749dbc7097b7c777a19ce4242e"}, "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.ref2": {"num_bytes": 73394, "checksum": "b070691d633e0c4143d96ba21299ae71cb126086517d2970df47420842067793"}, "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.ref3": {"num_bytes": 73164, "checksum": "9187fd834693fa77d07957991282d32d61ff84a207c25cbfab318c871bacdbc4"}, "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.src": {"num_bytes": 72684, "checksum": "893db119162487aa7f956b65978453576919e6797cd6c1955f93b7a8b9f4bbd8"}, "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.ref0": {"num_bytes": 73090, "checksum": "875953280a3ea1dea2827337b1778c0105f0c0aa79f2517a6e0e42db5e5e170c"}, "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.ref1": {"num_bytes": 73325, "checksum": "190d3398f2765f54a39b5489d1e96c483412a656086c731f8712ad0591087d80"}, "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.ref2": {"num_bytes": 73018, "checksum": "0e3c6abe934ccd16c9dffb2fd889d6f55afc3ad13a63c1e148c720bb4e99046b"}, "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.ref3": {"num_bytes": 73365, "checksum": "19f49de6eff813b26505ecf756c20dc301aeb80696696b01ca950298f6e58441"}}, "download_size": 731111, "post_processing_size": null, "dataset_size": 759702, "size_in_bytes": 1490813}}
 
 
jfleg.py DELETED
@@ -1,144 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """JFLEG dataset."""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @InProceedings{napoles-sakaguchi-tetreault:2017:EACLshort,
23
- author = {Napoles, Courtney
24
- and Sakaguchi, Keisuke
25
- and Tetreault, Joel},
26
- title = {JFLEG: A Fluency Corpus and Benchmark for Grammatical Error Correction},
27
- booktitle = {Proceedings of the 15th Conference of the European Chapter of the
28
- Association for Computational Linguistics: Volume 2, Short Papers},
29
- month = {April},
30
- year = {2017},
31
- address = {Valencia, Spain},
32
- publisher = {Association for Computational Linguistics},
33
- pages = {229--234},
34
- url = {http://www.aclweb.org/anthology/E17-2037}
35
- }
36
- @InProceedings{heilman-EtAl:2014:P14-2,
37
- author = {Heilman, Michael
38
- and Cahill, Aoife
39
- and Madnani, Nitin
40
- and Lopez, Melissa
41
- and Mulholland, Matthew
42
- and Tetreault, Joel},
43
- title = {Predicting Grammaticality on an Ordinal Scale},
44
- booktitle = {Proceedings of the 52nd Annual Meeting of the
45
- Association for Computational Linguistics (Volume 2: Short Papers)},
46
- month = {June},
47
- year = {2014},
48
- address = {Baltimore, Maryland},
49
- publisher = {Association for Computational Linguistics},
50
- pages = {174--180},
51
- url = {http://www.aclweb.org/anthology/P14-2029}
52
- }
53
- """
54
-
55
- _DESCRIPTION = """\
56
- JFLEG (JHU FLuency-Extended GUG) is an English grammatical error correction (GEC) corpus.
57
- It is a gold standard benchmark for developing and evaluating GEC systems with respect to
58
- fluency (extent to which a text is native-sounding) as well as grammaticality.
59
-
60
- For each source document, there are four human-written corrections (ref0 to ref3).
61
- """
62
-
63
- _HOMEPAGE = "https://github.com/keisks/jfleg"
64
-
65
- _LICENSE = "CC BY-NC-SA 4.0"
66
-
67
- _URLs = {
68
- "dev": {
69
- "src": "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.src",
70
- "ref0": "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.ref0",
71
- "ref1": "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.ref1",
72
- "ref2": "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.ref2",
73
- "ref3": "https://raw.githubusercontent.com/keisks/jfleg/master/dev/dev.ref3",
74
- },
75
- "test": {
76
- "src": "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.src",
77
- "ref0": "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.ref0",
78
- "ref1": "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.ref1",
79
- "ref2": "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.ref2",
80
- "ref3": "https://raw.githubusercontent.com/keisks/jfleg/master/test/test.ref3",
81
- },
82
- }
83
-
84
-
85
- class Jfleg(datasets.GeneratorBasedBuilder):
86
- """JFLEG (JHU FLuency-Extended GUG) grammatical error correction dataset."""
87
-
88
- VERSION = datasets.Version("1.0.0")
89
-
90
- def _info(self):
91
- return datasets.DatasetInfo(
92
- description=_DESCRIPTION,
93
- features=datasets.Features(
94
- {"sentence": datasets.Value("string"), "corrections": datasets.Sequence(datasets.Value("string"))}
95
- ),
96
- supervised_keys=None,
97
- homepage=_HOMEPAGE,
98
- license=_LICENSE,
99
- citation=_CITATION,
100
- )
101
-
102
- def _split_generators(self, dl_manager):
103
- """Returns SplitGenerators."""
104
-
105
- downloaded_dev = dl_manager.download_and_extract(_URLs["dev"])
106
- downloaded_test = dl_manager.download_and_extract(_URLs["test"])
107
-
108
- return [
109
- datasets.SplitGenerator(
110
- name=datasets.Split.VALIDATION,
111
- gen_kwargs={
112
- "filepath": downloaded_dev,
113
- "split": "dev",
114
- },
115
- ),
116
- datasets.SplitGenerator(
117
- name=datasets.Split.TEST,
118
- gen_kwargs={"filepath": downloaded_test, "split": "test"},
119
- ),
120
- ]
121
-
122
- def _generate_examples(self, filepath, split):
123
- """Yields examples."""
124
-
125
- source_file = filepath["src"]
126
- with open(source_file, encoding="utf-8") as f:
127
- source_sentences = f.read().split("\n")
128
- num_source = len(source_sentences)
129
-
130
- corrections = []
131
- for n in range(0, 4):
132
- correction_file = filepath[f"ref{n}"]
133
- with open(correction_file, encoding="utf-8") as f:
134
- correction_sentences = f.read().split("\n")
135
- num_correction = len(correction_sentences)
136
-
137
- assert len(correction_sentences) == len(
138
- source_sentences
139
- ), f"Sizes do not match: {num_source} vs {num_correction} for {source_file} vs {correction_file}."
140
- corrections.append(correction_sentences)
141
-
142
- corrected_sentences = list(zip(*corrections))
143
- for id_, source_sentence in enumerate(source_sentences):
144
- yield id_, {"sentence": source_sentence, "corrections": corrected_sentences[id_]}