File size: 4,206 Bytes
273c6a3 de993ca 273c6a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import csv
from ast import literal_eval
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@inproceedings{aghajani-etal-2021-parstwiner,
title = "{P}ars{T}wi{NER}: A Corpus for Named Entity Recognition at Informal {P}ersian",
author = "Aghajani, MohammadMahdi and
Badri, AliAkbar and
Beigy, Hamid",
booktitle = "Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.wnut-1.16",
pages = "131--136",
abstract = "As a result of unstructured sentences and some misspellings and errors, finding named entities in a noisy environment such as social media takes much more effort. ParsTwiNER contains about 250k tokens, based on standard instructions like MUC-6 or CoNLL 2003, gathered from Persian Twitter. Using Cohen{'}s Kappa coefficient, the consistency of annotators is 0.95, a high score. In this study, we demonstrate that some state-of-the-art models degrade on these corpora, and trained a new model using parallel transfer learning based on the BERT architecture. Experimental results show that the model works well in informal Persian as well as in formal Persian.",
}
"""
_DESCRIPTION = """"""
_DOWNLOAD_URLS = {
"train": "https://huggingface.co/datasets/hezarai/parstwiner/resolve/main/parstwiner_train.csv",
"test": "https://huggingface.co/datasets/hezarai/parstwiner/resolve/main/parstwiner_test.csv",
}
class ParsTwiNERConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(ParsTwiNERConfig, self).__init__(**kwargs)
class ParsTwiNER(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
ParsTwiNERConfig(
name="ParsTwiNER",
version=datasets.Version("1.0.0"),
description=_DESCRIPTION,
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-POG",
"I-POG",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-NAT",
"I-NAT",
"B-LOC",
"I-LOC",
"B-EVE",
"I-EVE",
]
)
),
}
),
homepage="https://github.com/overfit-ir/parstwiner",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""
Return SplitGenerators.
"""
train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as csv_file:
csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
next(csv_reader, None)
for id_, row in enumerate(csv_reader):
tokens, ner_tags = row
# Optional preprocessing here
tokens = literal_eval(tokens)
ner_tags = literal_eval(ner_tags)
yield id_, {"tokens": tokens, "ner_tags": ner_tags} |