Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Tagalog
Size:
1K - 10K
ArXiv:
DOI:
License:
ljvmiranda921
commited on
Commit
•
1b0f91f
1
Parent(s):
8d79c07
Implement simple workflow for parsing spaCy files
Browse files- .gitignore +4 -0
- README.md +43 -0
- corpus/iob/dev.iob +0 -0
- corpus/iob/test.iob +0 -0
- corpus/iob/train.iob +0 -0
- project.yml +42 -0
- requirements.txt +5 -0
- spacy_to_iob.py +50 -0
- tlunified-ner.py +89 -0
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
assets
|
2 |
+
corpus/spacy
|
3 |
+
__pycache__/
|
4 |
+
project.lock
|
README.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- SPACY PROJECT: AUTO-GENERATED DOCS START (do not remove) -->
|
2 |
+
|
3 |
+
# 🪐 spaCy Project: Dataset builder to HuggingFace Hub
|
4 |
+
|
5 |
+
This project contains utility scripts for uploading a dataset to HuggingFace
|
6 |
+
Hub. We want to separate the spaCy dependencies from the loading script, so
|
7 |
+
we're parsing the spaCy files independently.
|
8 |
+
|
9 |
+
The process goes like this: we download the raw corpus from Google Cloud
|
10 |
+
Storage (GCS), convert the spaCy files into a readable IOB format, and parse
|
11 |
+
that using our loading script (i.e., `tlunified-ner.py`).
|
12 |
+
|
13 |
+
We're also shipping the IOB file so that it's easier to access.
|
14 |
+
|
15 |
+
|
16 |
+
## 📋 project.yml
|
17 |
+
|
18 |
+
The [`project.yml`](project.yml) defines the data assets required by the
|
19 |
+
project, as well as the available commands and workflows. For details, see the
|
20 |
+
[spaCy projects documentation](https://spacy.io/usage/projects).
|
21 |
+
|
22 |
+
### ⏯ Commands
|
23 |
+
|
24 |
+
The following commands are defined by the project. They
|
25 |
+
can be executed using [`spacy project run [name]`](https://spacy.io/api/cli#project-run).
|
26 |
+
Commands are only re-run if their inputs have changed.
|
27 |
+
|
28 |
+
| Command | Description |
|
29 |
+
| --- | --- |
|
30 |
+
| `setup-data` | Prepare the Tagalog corpora used for training various spaCy components |
|
31 |
+
| `upload-to-hf` | Upload dataset to HuggingFace Hub |
|
32 |
+
|
33 |
+
### 🗂 Assets
|
34 |
+
|
35 |
+
The following assets are defined by the project. They can
|
36 |
+
be fetched by running [`spacy project assets`](https://spacy.io/api/cli#project-assets)
|
37 |
+
in the project directory.
|
38 |
+
|
39 |
+
| File | Source | Description |
|
40 |
+
| --- | --- | --- |
|
41 |
+
| `assets/corpus.tar.gz` | URL | Annotated TLUnified corpora in spaCy format with train, dev, and test splits. |
|
42 |
+
|
43 |
+
<!-- SPACY PROJECT: AUTO-GENERATED DOCS END (do not remove) -->
|
corpus/iob/dev.iob
ADDED
The diff for this file is too large to render.
See raw diff
|
|
corpus/iob/test.iob
ADDED
The diff for this file is too large to render.
See raw diff
|
|
corpus/iob/train.iob
ADDED
The diff for this file is too large to render.
See raw diff
|
|
project.yml
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
title: "Dataset builder to HuggingFace Hub"
|
2 |
+
description: |
|
3 |
+
This project contains utility scripts for uploading a dataset to HuggingFace
|
4 |
+
Hub. We want to separate the spaCy dependencies from the loading script, so
|
5 |
+
we're parsing the spaCy files independently.
|
6 |
+
|
7 |
+
The process goes like this: we download the raw corpus from Google Cloud
|
8 |
+
Storage (GCS), convert the spaCy files into a readable IOB format, and parse
|
9 |
+
that using our loading script (i.e., `tlunified-ner.py`).
|
10 |
+
|
11 |
+
We're also shipping the IOB file so that it's easier to access.
|
12 |
+
|
13 |
+
directories: ["assets", "corpus/spacy", "corpus/iob"]
|
14 |
+
|
15 |
+
vars:
|
16 |
+
version: 1.0
|
17 |
+
|
18 |
+
assets:
|
19 |
+
- dest: assets/corpus.tar.gz
|
20 |
+
description: "Annotated TLUnified corpora in spaCy format with train, dev, and test splits."
|
21 |
+
url: "https://storage.googleapis.com/ljvmiranda/calamanCy/tl_tlunified_gold/v${vars.version}/corpus.tar.gz"
|
22 |
+
|
23 |
+
commands:
|
24 |
+
- name: "setup-data"
|
25 |
+
help: "Prepare the Tagalog corpora used for training various spaCy components"
|
26 |
+
script:
|
27 |
+
- mkdir -p corpus/spacy
|
28 |
+
- tar -xzvf assets/corpus.tar.gz -C corpus/spacy
|
29 |
+
- python -m spacy_to_iob corpus/spacy/ corpus/iob/
|
30 |
+
outputs:
|
31 |
+
- corpus/iob/train.iob
|
32 |
+
- corpus/iob/dev.iob
|
33 |
+
- corpus/iob/test.iob
|
34 |
+
|
35 |
+
- name: "upload-to-hf"
|
36 |
+
help: "Upload dataset to HuggingFace Hub"
|
37 |
+
script:
|
38 |
+
- ls
|
39 |
+
deps:
|
40 |
+
- corpus/iob/train.iob
|
41 |
+
- corpus/iob/dev.iob
|
42 |
+
- corpus/iob/test.iob
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
spacy
|
2 |
+
typer
|
3 |
+
datasets
|
4 |
+
huggingface_hub
|
5 |
+
wasabi
|
spacy_to_iob.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
|
3 |
+
import spacy
|
4 |
+
import typer
|
5 |
+
from spacy.tokens import DocBin
|
6 |
+
from wasabi import msg
|
7 |
+
|
8 |
+
DELIMITER = "-DOCSTART- -X- O O"
|
9 |
+
|
10 |
+
|
11 |
+
def spacy_to_iob(
|
12 |
+
# fmt: off
|
13 |
+
spacy_indir: Path = typer.Argument(..., help="Path to the directory containing the spaCy files."),
|
14 |
+
iob_outdir: Path = typer.Argument(..., help="Path to the directory to save the IOB files."),
|
15 |
+
lang: str = typer.Option("tl", "-l", "--lang", help="Language code for the spaCy vocab."),
|
16 |
+
verbose: bool = typer.Option(False, "-v", "--verbose", help="Print additional information."),
|
17 |
+
delimiter: str = typer.Option(DELIMITER, "-d", "--delimiter", help="Delimiter between examples.")
|
18 |
+
# fmt: on
|
19 |
+
):
|
20 |
+
"""Convert spaCy files into IOB-formatted files."""
|
21 |
+
nlp = spacy.blank(lang)
|
22 |
+
for spacy_file in spacy_indir.glob(f"*.spacy"):
|
23 |
+
msg.text(f"Converting {str(spacy_file)}", show=verbose)
|
24 |
+
doc_bin = DocBin().from_disk(spacy_file)
|
25 |
+
docs = doc_bin.get_docs(nlp.vocab)
|
26 |
+
|
27 |
+
lines = [] # container for the IOB lines later on
|
28 |
+
for doc in docs:
|
29 |
+
lines.append(delimiter)
|
30 |
+
lines.append("\n\n")
|
31 |
+
for token in doc:
|
32 |
+
label = (
|
33 |
+
f"{token.ent_iob_}-{token.ent_type_}"
|
34 |
+
if token.ent_iob_ != "O"
|
35 |
+
else "O"
|
36 |
+
)
|
37 |
+
line = f"{token.text}\t{label}"
|
38 |
+
lines.append(line)
|
39 |
+
lines.append("\n")
|
40 |
+
lines.append("\n")
|
41 |
+
|
42 |
+
iob_file = iob_outdir / f"{spacy_file.stem}.iob"
|
43 |
+
with open(iob_file, "w", encoding="utf-8") as f:
|
44 |
+
f.writelines(lines)
|
45 |
+
|
46 |
+
msg.good(f"Saved to {iob_file}")
|
47 |
+
|
48 |
+
|
49 |
+
if __name__ == "__main__":
|
50 |
+
typer.run(spacy_to_iob)
|
tlunified-ner.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
logger = datasets.logging.get_logger(__name__)
|
7 |
+
|
8 |
+
_DESCRIPTION = """"""
|
9 |
+
_LICENSE = """GNU GPL v3.0"""
|
10 |
+
_CLASSES = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
|
11 |
+
_VERSION = "1.0"
|
12 |
+
|
13 |
+
|
14 |
+
class TLUnifiedNERConfig(datasets.BuilderConfig):
|
15 |
+
def __init__(self, **kwargs):
|
16 |
+
super(TLUnifiedNER, self).__init__(**kwargs)
|
17 |
+
|
18 |
+
|
19 |
+
class TLUnifiedNER(datasets.GeneratorBasedBuilder):
|
20 |
+
"""Contains an annotated version of the TLUnified dataset from Cruz and Cheng (2021)."""
|
21 |
+
|
22 |
+
VERSION = datasets.Version(_VERSION)
|
23 |
+
|
24 |
+
def _info() -> "datasets.DatasetInfo":
|
25 |
+
return datasets.DatasetInfo(
|
26 |
+
description=_DESCRIPTION,
|
27 |
+
features=datasets.Features(
|
28 |
+
{
|
29 |
+
"id": datasets.Value("string"),
|
30 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
31 |
+
"ner_tags": datasets.Sequence(
|
32 |
+
datasets.feature.ClassLabel(names=_CLASSES)
|
33 |
+
),
|
34 |
+
}
|
35 |
+
),
|
36 |
+
supervised_keys=None,
|
37 |
+
)
|
38 |
+
|
39 |
+
def _split_generators(
|
40 |
+
self, dl_manager: "datasets.builder.DownloadManager"
|
41 |
+
) -> List["datasets.SplitGenerator"]:
|
42 |
+
"""Return a list of SplitGenerators that organizes the splits."""
|
43 |
+
# The file extracts into {train,dev,test}.spacy files. The _generate_examples function
|
44 |
+
# below will define how these files are parsed.
|
45 |
+
corpus_dir = "corpus/iob"
|
46 |
+
data_files = {
|
47 |
+
"train": os.path.join(corpus_dir, "train.iob"),
|
48 |
+
"dev": os.path.join(corpus_dir, "dev.iob"),
|
49 |
+
"test": os.path.join(corpus_dir, "test.iob"),
|
50 |
+
}
|
51 |
+
|
52 |
+
return [
|
53 |
+
# fmt: off
|
54 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
|
55 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
|
56 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
|
57 |
+
# fmt: on
|
58 |
+
]
|
59 |
+
|
60 |
+
def _generate_examples(self, filepath: str):
|
61 |
+
"""Defines how examples are parsed from the IOB file."""
|
62 |
+
logger.info("⏳ Generating examples from = %s", filepath)
|
63 |
+
with open(filepath, encoding="utf-8") as f:
|
64 |
+
guid = 0
|
65 |
+
tokens = []
|
66 |
+
ner_tags = []
|
67 |
+
for line in f:
|
68 |
+
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
69 |
+
if tokens:
|
70 |
+
yield guid, {
|
71 |
+
"id": str(guid),
|
72 |
+
"tokens": tokens,
|
73 |
+
"ner_tags": ner_tags,
|
74 |
+
}
|
75 |
+
guid += 1
|
76 |
+
tokens = []
|
77 |
+
ner_tags = []
|
78 |
+
else:
|
79 |
+
# TLUnified-NER iob are separated by \t
|
80 |
+
token, ner_tag = line.split("\t")
|
81 |
+
tokens.append(token)
|
82 |
+
ner_tags.append(ner_tag.rstrip())
|
83 |
+
# Last example
|
84 |
+
if tokens:
|
85 |
+
yield guid, {
|
86 |
+
"id": str(guid),
|
87 |
+
"tokens": tokens,
|
88 |
+
"ner_tags": ner_tags,
|
89 |
+
}
|