holylovenia commited on
Commit
d8c4ab3
1 Parent(s): 840e11a

Upload ijelid.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ijelid.py +142 -0
ijelid.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ from datasets.download.download_manager import DownloadManager
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @article{hidayatullah2023corpus,
13
+ title={Corpus creation and language identification for code-mixed Indonesian-Javanese-English Tweets},
14
+ author={Hidayatullah, Ahmad Fathan and Apong, Rosyzie Anna and Lai, Daphne TC and Qazi, Atika},
15
+ journal={PeerJ Computer Science},
16
+ volume={9},
17
+ pages={e1312},
18
+ year={2023},
19
+ publisher={PeerJ Inc.}
20
+ }
21
+ """
22
+
23
+ _LOCAL = False
24
+ _LANGUAGES = ["ind", "jav", "eng"]
25
+ _DATASETNAME = "ijelid"
26
+ _DESCRIPTION = """\
27
+ This is a code-mixed Indonesian-Javanese-English dataset for token-level
28
+ language identification. We named this dataset as IJELID
29
+ (Indonesian-Javanese-English Language Identification). This dataset contains
30
+ tweets that have been tokenized with the corresponding token and its language
31
+ label. There are seven language labels in the dataset, namely: ID (Indonesian)JV
32
+ (Javanese), EN (English), MIX_ID_EN (mixed Indonesian-English), MIX_ID_JV (mixed
33
+ Indonesian-Javanese), MIX_JV_EN (mixed Javanese-English), OTH (Other).
34
+ """
35
+
36
+ _HOMEPAGE = "https://github.com/fathanick/Code-mixed-Indonesian-Javanese-English-Twitter-Data"
37
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
38
+ _URLS = {
39
+ "train": "https://raw.githubusercontent.com/fathanick/Code-mixed-Indonesian-Javanese-English-Twitter-Data/main/train.tsv",
40
+ "dev": "https://raw.githubusercontent.com/fathanick/Code-mixed-Indonesian-Javanese-English-Twitter-Data/main/val.tsv",
41
+ "test": "https://raw.githubusercontent.com/fathanick/Code-mixed-Indonesian-Javanese-English-Twitter-Data/main/test.tsv",
42
+ }
43
+
44
+ _SUPPORTED_TASKS = [Tasks.TOKEN_LEVEL_LANGUAGE_IDENTIFICATION]
45
+ _SOURCE_VERSION = "1.0.0"
46
+ _SEACROWD_VERSION = "2024.06.20"
47
+
48
+
49
+ class IJELIDDataset(datasets.GeneratorBasedBuilder):
50
+ """IJELID dataset from https://github.com/fathanick/Code-mixed-Indonesian-Javanese-English-Twitter-Data"""
51
+
52
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
53
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
54
+
55
+ SEACROWD_SCHEMA_NAME = "seq_label"
56
+ LABEL_CLASSES = ["ID", "JV", "EN", "MIX_ID_EN", "MIX_ID_JV", "MIX_JV_EN", "OTH"]
57
+
58
+ BUILDER_CONFIGS = [
59
+ SEACrowdConfig(
60
+ name=f"{_DATASETNAME}_source",
61
+ version=SOURCE_VERSION,
62
+ description=f"{_DATASETNAME} source schema",
63
+ schema="source",
64
+ subset_id=_DATASETNAME,
65
+ ),
66
+ SEACrowdConfig(
67
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
68
+ version=SEACROWD_VERSION,
69
+ description=f"{_DATASETNAME} SEACrowd schema",
70
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
71
+ subset_id=_DATASETNAME,
72
+ ),
73
+ ]
74
+
75
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
76
+
77
+ def _info(self) -> datasets.DatasetInfo:
78
+ # No specific schema for the source, so for consistency,
79
+ # I will use the same schema with SEACrowd
80
+ features = schemas.seq_label_features(self.LABEL_CLASSES)
81
+
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=features,
85
+ homepage=_HOMEPAGE,
86
+ license=_LICENSE,
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
91
+ """Returns SplitGenerators."""
92
+ data_files = {
93
+ "train": Path(dl_manager.download_and_extract(_URLS["train"])),
94
+ "dev": Path(dl_manager.download_and_extract(_URLS["dev"])),
95
+ "test": Path(dl_manager.download_and_extract(_URLS["test"])),
96
+ }
97
+
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TRAIN,
101
+ gen_kwargs={"filepath": data_files["train"], "split": "train"},
102
+ ),
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.VALIDATION,
105
+ gen_kwargs={"filepath": data_files["dev"], "split": "dev"},
106
+ ),
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TEST,
109
+ gen_kwargs={"filepath": data_files["test"], "split": "test"},
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
114
+ """Yield examples as (key, example) tuples"""
115
+ with open(filepath, encoding="utf-8") as f:
116
+ guid = 0
117
+ tokens = []
118
+ labels = []
119
+ for line in f:
120
+ if line == "" or line == "\n":
121
+ if tokens:
122
+ yield guid, {
123
+ "id": str(guid),
124
+ "tokens": tokens,
125
+ "labels": labels,
126
+ }
127
+ guid += 1
128
+ tokens = []
129
+ labels = []
130
+ else:
131
+ # IJELID TSV are separated by \t
132
+ token, label = line.split("\t")
133
+ tokens.append(token)
134
+ labels.append(label.rstrip())
135
+
136
+ # Last example
137
+ if tokens:
138
+ yield guid, {
139
+ "id": str(guid),
140
+ "tokens": tokens,
141
+ "labels": labels,
142
+ }