ljvmiranda921 commited on
Commit
414fb68
1 Parent(s): 4f42950

Delete loading script

Browse files
Files changed (1) hide show
  1. tlunified-ner.py +0 -95
tlunified-ner.py DELETED
@@ -1,95 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
- logger = datasets.logging.get_logger(__name__)
6
-
7
- _DESCRIPTION = """
8
- This dataset contains the annotated TLUnified corpora from Cruz and Cheng
9
- (2021). It is a curated sample of around 7,000 documents for the
10
- named entity recognition (NER) task. The majority of the corpus are news
11
- reports in Tagalog, resembling the domain of the original ConLL 2003. There
12
- are three entity types: Person (PER), Organization (ORG), and Location (LOC).
13
- """
14
- _LICENSE = """GNU GPL v3.0"""
15
- _URL = "https://huggingface.co/ljvmiranda921/tlunified-ner"
16
- _CLASSES = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
17
- _VERSION = "1.0.0"
18
-
19
-
20
- class TLUnifiedNERConfig(datasets.BuilderConfig):
21
- def __init__(self, **kwargs):
22
- super(TLUnifiedNER, self).__init__(**kwargs)
23
-
24
-
25
- class TLUnifiedNER(datasets.GeneratorBasedBuilder):
26
- """Contains an annotated version of the TLUnified dataset from Cruz and Cheng (2021)."""
27
-
28
- VERSION = datasets.Version(_VERSION)
29
-
30
- def _info(self) -> "datasets.DatasetInfo":
31
- return datasets.DatasetInfo(
32
- description=_DESCRIPTION,
33
- features=datasets.Features(
34
- {
35
- "id": datasets.Value("string"),
36
- "tokens": datasets.Sequence(datasets.Value("string")),
37
- "ner_tags": datasets.Sequence(
38
- datasets.features.ClassLabel(names=_CLASSES)
39
- ),
40
- }
41
- ),
42
- homepage=_URL,
43
- supervised_keys=None,
44
- )
45
-
46
- def _split_generators(
47
- self, dl_manager: "datasets.builder.DownloadManager"
48
- ) -> List["datasets.SplitGenerator"]:
49
- """Return a list of SplitGenerators that organizes the splits."""
50
- # The file extracts into {train,dev,test}.spacy files. The _generate_examples function
51
- # below will define how these files are parsed.
52
- data_files = {
53
- "train": dl_manager.download_and_extract("corpus/iob/train.iob"),
54
- "dev": dl_manager.download_and_extract("corpus/iob/dev.iob"),
55
- "test": dl_manager.download_and_extract("corpus/iob/test.iob"),
56
- }
57
-
58
- return [
59
- # fmt: off
60
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
61
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
62
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
63
- # fmt: on
64
- ]
65
-
66
- def _generate_examples(self, filepath: str):
67
- """Defines how examples are parsed from the IOB file."""
68
- logger.info("⏳ Generating examples from = %s", filepath)
69
- with open(filepath, encoding="utf-8") as f:
70
- guid = 0
71
- tokens = []
72
- ner_tags = []
73
- for line in f:
74
- if line.startswith("-DOCSTART-") or line == "" or line == "\n":
75
- if tokens:
76
- yield guid, {
77
- "id": str(guid),
78
- "tokens": tokens,
79
- "ner_tags": ner_tags,
80
- }
81
- guid += 1
82
- tokens = []
83
- ner_tags = []
84
- else:
85
- # TLUnified-NER iob are separated by \t
86
- token, ner_tag = line.split("\t")
87
- tokens.append(token)
88
- ner_tags.append(ner_tag.rstrip())
89
- # Last example
90
- if tokens:
91
- yield guid, {
92
- "id": str(guid),
93
- "tokens": tokens,
94
- "ner_tags": ner_tags,
95
- }