arxyzan commited on
Commit
fe85b14
1 Parent(s): 35566e1

Create arman-ner.py

Browse files
Files changed (1) hide show
  1. arman-ner.py +107 -0
arman-ner.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ from ast import literal_eval
3
+
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _CITATION = """
9
+ @inproceedings{poostchi-etal-2018-bilstm,
10
+ title = "{B}i{LSTM}-{CRF} for {P}ersian Named-Entity Recognition {A}rman{P}erso{NERC}orpus: the First Entity-Annotated {P}ersian Dataset",
11
+ author = "Poostchi, Hanieh and
12
+ Zare Borzeshi, Ehsan and
13
+ Piccardi, Massimo",
14
+ booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
15
+ month = may,
16
+ year = "2018",
17
+ address = "Miyazaki, Japan",
18
+ publisher = "European Language Resources Association (ELRA)",
19
+ url = "https://aclanthology.org/L18-1701",
20
+ }
21
+ """
22
+
23
+ _DESCRIPTION = """"""
24
+
25
+ _DOWNLOAD_URLS = {
26
+ "train": "https://huggingface.co/datasets/hezarai/arman-ner/resolve/main/arman-ner_train.csv",
27
+ "test": "https://huggingface.co/datasets/hezarai/arman-ner/resolve/main/arman-ner_test.csv",
28
+ }
29
+
30
+
31
+ class ArmanNERConfig(datasets.BuilderConfig):
32
+ def __init__(self, **kwargs):
33
+ super(ArmanNERConfig, self).__init__(**kwargs)
34
+
35
+
36
+ class ArmanNER(datasets.GeneratorBasedBuilder):
37
+ BUILDER_CONFIGS = [
38
+ ArmanNERConfig(
39
+ name="Arman-NER",
40
+ version=datasets.Version("1.0.0"),
41
+ description=_DESCRIPTION,
42
+ ),
43
+ ]
44
+
45
+ def _info(self):
46
+ return datasets.DatasetInfo(
47
+ description=_DESCRIPTION,
48
+ features=datasets.Features(
49
+ {
50
+ "tokens": datasets.Sequence(datasets.Value("string")),
51
+ "ner_tags": datasets.Sequence(
52
+ datasets.features.ClassLabel(
53
+ names=[
54
+ "O",
55
+ "B-pro",
56
+ "I-pro",
57
+ "B-pers",
58
+ "I-pers",
59
+ "B-org",
60
+ "I-org",
61
+ "B-loc",
62
+ "I-loc",
63
+ "B-fac",
64
+ "I-fac",
65
+ "B-event",
66
+ "I-event"
67
+ ]
68
+ )
69
+ ),
70
+ }
71
+ ),
72
+ homepage="https://huggingface.co/datasets/hezarai/arman-ner",
73
+ citation=_CITATION,
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+ """
78
+ Return SplitGenerators.
79
+ """
80
+
81
+ train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
82
+ test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
83
+
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
87
+ ),
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
90
+ ),
91
+ ]
92
+
93
+ def _generate_examples(self, filepath):
94
+ logger.info("⏳ Generating examples from = %s", filepath)
95
+ with open(filepath, encoding="utf-8") as csv_file:
96
+ csv_reader = csv.reader(
97
+ csv_file, quotechar='"', skipinitialspace=True
98
+ )
99
+
100
+ next(csv_reader, None)
101
+
102
+ for id_, row in enumerate(csv_reader):
103
+ tokens, ner_tags = row
104
+ # Optional preprocessing here
105
+ tokens = literal_eval(tokens)
106
+ ner_tags = literal_eval(ner_tags)
107
+ yield id_, {"tokens": tokens, "ner_tags": ner_tags}