Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Tagalog
Size:
1K - 10K
ArXiv:
DOI:
License:
from pathlib import Path | |
import spacy | |
import typer | |
from spacy.tokens import DocBin | |
from wasabi import msg | |
DELIMITER = "-DOCSTART- -X- O O" | |
def spacy_to_iob( | |
# fmt: off | |
spacy_indir: Path = typer.Argument(..., help="Path to the directory containing the spaCy files."), | |
iob_outdir: Path = typer.Argument(..., help="Path to the directory to save the IOB files."), | |
lang: str = typer.Option("tl", "-l", "--lang", help="Language code for the spaCy vocab."), | |
verbose: bool = typer.Option(False, "-v", "--verbose", help="Print additional information."), | |
delimiter: str = typer.Option(DELIMITER, "-d", "--delimiter", help="Delimiter between examples.") | |
# fmt: on | |
): | |
"""Convert spaCy files into IOB-formatted files.""" | |
nlp = spacy.blank(lang) | |
for spacy_file in spacy_indir.glob(f"*.spacy"): | |
msg.text(f"Converting {str(spacy_file)}", show=verbose) | |
doc_bin = DocBin().from_disk(spacy_file) | |
docs = doc_bin.get_docs(nlp.vocab) | |
lines = [] # container for the IOB lines later on | |
for doc in docs: | |
lines.append(delimiter) | |
lines.append("\n\n") | |
for token in doc: | |
label = ( | |
f"{token.ent_iob_}-{token.ent_type_}" | |
if token.ent_iob_ != "O" | |
else "O" | |
) | |
line = f"{token.text}\t{label}" | |
lines.append(line) | |
lines.append("\n") | |
lines.append("\n") | |
iob_file = iob_outdir / f"{spacy_file.stem}.iob" | |
with open(iob_file, "w", encoding="utf-8") as f: | |
f.writelines(lines) | |
msg.good(f"Saved to {iob_file}") | |
if __name__ == "__main__": | |
typer.run(spacy_to_iob) | |