Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Korean
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
1592c3e
1 Parent(s): 9ae5d41

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (faf9bb39db59a06055a39f7b8cb2f737067158d1)
- Delete loading script (4ae4e65e2e2a2c5ad7a8952fbfbe01a65bb470db)

README.md CHANGED
@@ -35,13 +35,20 @@ dataset_info:
35
  dtype: string
36
  splits:
37
  - name: train
38
- num_bytes: 3102158
39
  num_examples: 55134
40
  - name: test
41
- num_bytes: 344028
42
  num_examples: 6121
43
- download_size: 2956114
44
- dataset_size: 3446186
 
 
 
 
 
 
 
45
  ---
46
 
47
  # Dataset Card for 3i4K
 
35
  dtype: string
36
  splits:
37
  - name: train
38
+ num_bytes: 3102134
39
  num_examples: 55134
40
  - name: test
41
+ num_bytes: 344024
42
  num_examples: 6121
43
+ download_size: 1974323
44
+ dataset_size: 3446158
45
+ configs:
46
+ - config_name: default
47
+ data_files:
48
+ - split: train
49
+ path: data/train-*
50
+ - split: test
51
+ path: data/test-*
52
  ---
53
 
54
  # Dataset Card for 3i4K
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90fdc098e3053288ead52b64a703dcb5f401dd1f32d6eee6fbd36f7df072461b
3
+ size 200394
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c63c2775ae44f668be76a36799974ee3bd0c83e5af343e3bd6c57ce3dc0c4e3
3
+ size 1773929
kor_3i4k.py DELETED
@@ -1,95 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """3i4K: Intonation-aided intention identification for Korean dataset"""
16
-
17
-
18
- import csv
19
-
20
- import datasets
21
- from datasets.tasks import TextClassification
22
-
23
-
24
- _CITATION = """\
25
- @article{cho2018speech,
26
- title={Speech Intention Understanding in a Head-final Language: A Disambiguation Utilizing Intonation-dependency},
27
- author={Cho, Won Ik and Lee, Hyeon Seung and Yoon, Ji Won and Kim, Seok Min and Kim, Nam Soo},
28
- journal={arXiv preprint arXiv:1811.04231},
29
- year={2018}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- This dataset is designed to identify speaker intention based on real-life spoken utterance in Korean into one of
35
- 7 categories: fragment, description, question, command, rhetorical question, rhetorical command, utterances.
36
- """
37
-
38
- _HOMEPAGE = "https://github.com/warnikchow/3i4k"
39
-
40
- _LICENSE = "CC BY-SA-4.0"
41
-
42
- _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/warnikchow/3i4k/master/data/train_val_test/fci_train_val.txt"
43
- _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/warnikchow/3i4k/master/data/train_val_test/fci_test.txt"
44
-
45
-
46
- class Kor_3i4k(datasets.GeneratorBasedBuilder):
47
- """Intonation-aided intention identification for Korean"""
48
-
49
- VERSION = datasets.Version("1.1.0")
50
-
51
- def _info(self):
52
-
53
- return datasets.DatasetInfo(
54
- description=_DESCRIPTION,
55
- features=datasets.Features(
56
- {
57
- "label": datasets.features.ClassLabel(
58
- names=[
59
- "fragment",
60
- "statement",
61
- "question",
62
- "command",
63
- "rhetorical question",
64
- "rhetorical command",
65
- "intonation-dependent utterance",
66
- ]
67
- ),
68
- "text": datasets.Value("string"),
69
- }
70
- ),
71
- supervised_keys=None,
72
- homepage=_HOMEPAGE,
73
- license=_LICENSE,
74
- citation=_CITATION,
75
- task_templates=[TextClassification(text_column="text", label_column="label")],
76
- )
77
-
78
- def _split_generators(self, dl_manager):
79
- """Returns SplitGenerators"""
80
-
81
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
82
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
83
- return [
84
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
85
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
86
- ]
87
-
88
- def _generate_examples(self, filepath):
89
- """Generates 3i4K examples"""
90
-
91
- with open(filepath, encoding="utf-8") as csv_file:
92
- data = csv.reader(csv_file, delimiter="\t")
93
- for id_, row in enumerate(data):
94
- label, text = row
95
- yield id_, {"label": int(label), "text": text}