anishka commited on
Commit
f742bc4
1 Parent(s): 2810a5a

Upload te_en_syn_dataset.py

Browse files
Files changed (1) hide show
  1. te_en_syn_dataset.py +134 -0
te_en_syn_dataset.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the Ancora NER dataset.
2
+ import datasets
3
+
4
+ logger = datasets.logging.get_logger(__name__)
5
+
6
+ _CITATION = """ """
7
+
8
+ _DESCRIPTION = """AnCora Catalan NER.
9
+ This is a dataset for Named Eentity Reacognition (NER) from Ancora corpus adapted for
10
+ Machine Learning and Language Model evaluation purposes.
11
+ Since multiwords (including Named Entites) in the original Ancora corpus are aggregated as
12
+ a single lexical item using underscores (e.g. "Ajuntament_de_Barcelona")
13
+ we splitted them to align with word-per-line format, and added conventional Begin-Inside-Outside (IOB)
14
+ tags to mark and classify Named Entites.
15
+ We did not filter out the different categories of NEs from Ancora (weak and strong).
16
+ We did 6 minor edits by hand.
17
+ AnCora corpus is used under [CC-by] (https://creativecommons.org/licenses/by/4.0/) licence.
18
+ This dataset was developed by BSC TeMU as part of the AINA project, and to enrich the Catalan Language Understanding Benchmark (CLUB).
19
+ """
20
+
21
+ _HOMEPAGE = """"""
22
+
23
+ _URL = "https://huggingface.co/datasets/anishka/Te_En_Syn_dataset/resolve/main/"
24
+ _TRAINING_FILE = "te_syn-code_switch-train.conllu"
25
+ _DEV_FILE = " te_syn-code_switch-dev.conllu"
26
+ _TEST_FILE = "te_syn-code_switch-test.conllu"
27
+
28
+
29
+ class AncoraCaNerConfig(datasets.BuilderConfig):
30
+ """ Builder config for the Ancora Ca NER dataset """
31
+
32
+ def __init__(self, **kwargs):
33
+ """BuilderConfig for AncoraCaNer.
34
+ Args:
35
+ **kwargs: keyword arguments forwarded to super.
36
+ """
37
+ super(AncoraCaNerConfig, self).__init__(**kwargs)
38
+
39
+
40
+ class AncoraCaNer(datasets.GeneratorBasedBuilder):
41
+ """ AncoraCaNer dataset."""
42
+
43
+ BUILDER_CONFIGS = [
44
+ AncoraCaNerConfig(
45
+ name="AncoraCaNer",
46
+ version=datasets.Version("2.0.0"),
47
+ description="AncoraCaNer dataset"
48
+ ),
49
+ ]
50
+
51
+ def _info(self):
52
+ return datasets.DatasetInfo(
53
+ description=_DESCRIPTION,
54
+ features=datasets.Features(
55
+ {
56
+ "idx": datasets.Value("string"),
57
+ "text": datasets.Sequence(datasets.Value("string")),
58
+ "upos": datasets.Sequence(
59
+ datasets.features.ClassLabel(
60
+ names=[
61
+ "NOUN",
62
+ "PUNCT",
63
+ "ADP",
64
+ "NUM",
65
+ "SYM",
66
+ "SCONJ",
67
+ "ADJ",
68
+ "PART",
69
+ "DET",
70
+ "CCONJ",
71
+ "PROPN",
72
+ "PRON",
73
+ "X",
74
+ "_",
75
+ "ADV",
76
+ "INTJ",
77
+ "VERB",
78
+ "AUX",
79
+ ]
80
+ )
81
+ ),
82
+ "xpos": datasets.Sequence(datasets.Value("string")),
83
+ }
84
+ ),
85
+ supervised_keys=None,
86
+ homepage=_HOMEPAGE,
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ """Returns SplitGenerators."""
92
+ urls_to_download = {
93
+ "train": f"{_URL}{_TRAINING_FILE}",
94
+ "dev": f"{_URL}{_DEV_FILE}",
95
+ "test": f"{_URL}{_TEST_FILE}",
96
+ }
97
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
98
+
99
+ return [
100
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
101
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
102
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
103
+ ]
104
+
105
+ def _generate_examples(self, filepath):
106
+ logger.info("⏳ Generating examples from = %s", filepath)
107
+ with open(filepath, encoding="utf-8") as f:
108
+ guid = 0
109
+ tokens = []
110
+ pos_tags = []
111
+ for line in f:
112
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n" or line.startswith("#"):
113
+ if tokens:
114
+ yield guid, {
115
+ "idx": str(guid),
116
+ "text": tokens,
117
+ "upos": pos_tags,
118
+ "xpos": pos_tags,
119
+ }
120
+ guid += 1
121
+ tokens = []
122
+ pos_tags = []
123
+ else:
124
+ # AncoraCaNer tokens are space separated
125
+ splits = line.split('\t')
126
+ tokens.append(splits[1])
127
+ pos_tags.append(splits[3].rstrip())
128
+ # last example
129
+ yield guid, {
130
+ "idx": str(guid),
131
+ "text": tokens,
132
+ "upos": pos_tags,
133
+ "xpos": pos_tags,
134
+ }