surdan commited on
Commit
8210aca
1 Parent(s): 8dcae56

Upload Prepare_original_data.ipynb

Browse files
Files changed (1) hide show
  1. Prepare_original_data.ipynb +427 -0
Prepare_original_data.ipynb ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "1fc75ebf",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "## datasets==2.0.0 pandas==1.4.2"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "c9cc126c",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "import os\n",
21
+ "import numpy as np\n",
22
+ "import pandas as pd\n",
23
+ "import re\n",
24
+ "from tqdm import tqdm\n",
25
+ "from datasets import Dataset, DatasetDict\n",
26
+ "import pickle\n",
27
+ "import json"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": null,
33
+ "id": "2adeaf52",
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "def get_list_values(text):\n",
38
+ " return text.split()\n",
39
+ "\n",
40
+ "def replc_t_n(text):\n",
41
+ " return re.sub(\"\\t|\\n\", \" \", text).strip()\n",
42
+ "\n",
43
+ "def read_file(filepath, readlines=False):\n",
44
+ " with open(filepath, \"r\") as f:\n",
45
+ " if readlines:\n",
46
+ " txt = f.readlines()\n",
47
+ " else:\n",
48
+ " txt = f.read()\n",
49
+ " return txt"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "code",
54
+ "execution_count": null,
55
+ "id": "26a51547",
56
+ "metadata": {},
57
+ "outputs": [],
58
+ "source": [
59
+ "def split_text_on_labeled_tokens(text, labels):\n",
60
+ " \"\"\"\n",
61
+ " Split text on labeled token\n",
62
+ "\n",
63
+ " :param text: input text\n",
64
+ " :type text: string\n",
65
+ " :param labels: token labels with position in text \n",
66
+ " :type labels: list\n",
67
+ " :return: list of splited text on tokens, list of entity label for each token\n",
68
+ " :rtype: list, list\n",
69
+ " \"\"\"\n",
70
+ " ### inner function\n",
71
+ " def chunk_text_labeling(text, start, end, is_ner = False):\n",
72
+ " \"\"\"\n",
73
+ " Labeling part of text by text position\n",
74
+ "\n",
75
+ " :param text: input text\n",
76
+ " :type text: string\n",
77
+ " :param start: start position of entity in text \n",
78
+ " :type start: int\n",
79
+ " :param end: end position of entity in text \n",
80
+ " :type end: int\n",
81
+ " :param is_ner: part of text is named entity or not \n",
82
+ " :type is_ner: bool\n",
83
+ " \"\"\"\n",
84
+ " chunk_iter = 0\n",
85
+ " ner_chunk = text[start: end].split()\n",
86
+ " for part_of_chunk in ner_chunk:\n",
87
+ " split_text.append(part_of_chunk)\n",
88
+ " if is_ner:\n",
89
+ " if chunk_iter == 0:\n",
90
+ " ner_label.append(\"B-\"+ner)\n",
91
+ " else:\n",
92
+ " ner_label.append(\"I-\"+ner)\n",
93
+ " chunk_iter += 1\n",
94
+ " else:\n",
95
+ " ner_label.append(\"O\") \n",
96
+ " ### inner function\n",
97
+ " \n",
98
+ " init_start = 0\n",
99
+ " split_text = []\n",
100
+ " ner_label = []\n",
101
+ " for ner, start, end in labels:\n",
102
+ "\n",
103
+ " if start > init_start:\n",
104
+ "\n",
105
+ " chunk_text_labeling(text, init_start, start) \n",
106
+ " chunk_text_labeling(text, start, end, True)\n",
107
+ " init_start = end\n",
108
+ " else:\n",
109
+ " chunk_text_labeling(text, start, end, True)\n",
110
+ " init_start = end\n",
111
+ " \n",
112
+ " return split_text, ner_label"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": null,
118
+ "id": "0ba5da7e",
119
+ "metadata": {},
120
+ "outputs": [],
121
+ "source": [
122
+ "def grouped_and_sort_labeled_data(annotation_file):\n",
123
+ " \"\"\"\n",
124
+ " Get list of entities with corresponding position in text\n",
125
+ "\n",
126
+ " :param annotation_file: List of entities\n",
127
+ " :type annotation_file: list\n",
128
+ " :return: list entitiens sorted by start position in text\n",
129
+ " :rtype: list\n",
130
+ " \"\"\"\n",
131
+ " df_ann = pd.DataFrame([get_list_values(replc_t_n(i)) for i in annotation_file if \";\" not in i]) \n",
132
+ " df_ann[2] = df_ann[2].astype(\"int\")\n",
133
+ " df_ann[3] = df_ann[3].astype(\"int\")\n",
134
+ " grouped = df_ann.groupby([1, 2])[3].min().reset_index()\n",
135
+ " \n",
136
+ " return grouped.sort_values(by=2)[[1,2,3]].values"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "code",
141
+ "execution_count": null,
142
+ "id": "46fdb74b",
143
+ "metadata": {},
144
+ "outputs": [],
145
+ "source": [
146
+ "def check_isalnum(text):\n",
147
+ " return any(i.isalnum() for i in text)\n",
148
+ "\n",
149
+ "def keep_only_alnum(text):\n",
150
+ " return \"\".join([i if i.isalnum() else \" \" for i in text]).strip()\n",
151
+ "\n",
152
+ "def drop_punct(seq, labels):\n",
153
+ " \"\"\"\n",
154
+ " Drop punctuation from labeled data\n",
155
+ "\n",
156
+ " :param seq: List of tokens\n",
157
+ " :type seq: list\n",
158
+ " :param labels: List of entities\n",
159
+ " :type labels: list\n",
160
+ " \"\"\"\n",
161
+ " new_seq = []\n",
162
+ " new_labels = []\n",
163
+ " for i in range(len(seq)):\n",
164
+ " if seq[i].isalnum():\n",
165
+ " new_seq.append(seq[i])\n",
166
+ " new_labels.append(labels[i]) \n",
167
+ " return new_seq, new_labels\n",
168
+ "\n",
169
+ "def drop_duplicate_tokens(seq, labels):\n",
170
+ " new_seq = []\n",
171
+ " new_labels = []\n",
172
+ " for i in range(len(seq)):\n",
173
+ " if (i != 0) & (seq[i-1] == seq[i]):\n",
174
+ " continue\n",
175
+ " else:\n",
176
+ " new_seq.append(seq[i])\n",
177
+ " new_labels.append(labels[i])\n",
178
+ " return new_seq, new_labels\n",
179
+ "\n",
180
+ "def prepare_sequences(seqs, labels):\n",
181
+ " clear_tokens = [keep_only_alnum(i) if check_isalnum(i) else i for i in seqs]\n",
182
+ " d_p_tokens, d_p_labels = drop_punct(clear_tokens, labels)\n",
183
+ " return drop_duplicate_tokens(d_p_tokens, d_p_labels)\n",
184
+ " \n",
185
+ "\n",
186
+ "def map_label_to_id(ids_dict, labels):\n",
187
+ " \"\"\"\n",
188
+ " Convert string label to corresponding id\n",
189
+ "\n",
190
+ " :param ids_dict: {\"age\": 0, \"event\": 1.....}\n",
191
+ " :type ids_dict: dict\n",
192
+ " :param labels: List of entities [\"age\", \"event\", \"O\"....]\n",
193
+ " :type labels: list\n",
194
+ " \"\"\"\n",
195
+ " return [ids_dict[i] for i in labels]"
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "markdown",
200
+ "id": "5b735210",
201
+ "metadata": {},
202
+ "source": [
203
+ "### Preparing files in folders"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "markdown",
208
+ "id": "23ad44bb",
209
+ "metadata": {},
210
+ "source": [
211
+ "#### The data have been taken from https://github.com/dialogue-evaluation/RuNNE"
212
+ ]
213
+ },
214
+ {
215
+ "cell_type": "code",
216
+ "execution_count": null,
217
+ "id": "1c2748fa",
218
+ "metadata": {},
219
+ "outputs": [],
220
+ "source": [
221
+ "folders = [\"train\", \"test\", \"dev\"]"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "code",
226
+ "execution_count": null,
227
+ "id": "ab65dc18",
228
+ "metadata": {},
229
+ "outputs": [],
230
+ "source": [
231
+ "for folder in folders:\n",
232
+ " base_path = f\"RuNNE/data/{folder}\"\n",
233
+ " temp_folder = os.listdir(base_path)\n",
234
+ " \n",
235
+ " ## getting list filenames of annotation\n",
236
+ " files_with_ann = [i for i in temp_folder if \".ann\" in i]\n",
237
+ "\n",
238
+ " all_sequences = []\n",
239
+ " all_labels = []\n",
240
+ " \n",
241
+ " for f_ann in tqdm(files_with_ann):\n",
242
+ " \n",
243
+ " ## getting filename for text by replaced of extension\n",
244
+ " txt_file = f_ann.replace(\".ann\", \".txt\")\n",
245
+ "\n",
246
+ " ann = read_file(base_path +\"/\"+ f_ann, readlines=True)\n",
247
+ " txt = read_file(base_path +\"/\"+ txt_file)\n",
248
+ " \n",
249
+ " ## check len, because in dev folder there are empty files\n",
250
+ " if len(ann) == 0:\n",
251
+ " continue\n",
252
+ " labels = grouped_and_sort_labeled_data(ann)\n",
253
+ " \n",
254
+ " ## splitting text on tokens and labeling each of them\n",
255
+ " split_text, ner_label = split_text_on_labeled_tokens(txt, labels)\n",
256
+ " seq_split_indexes = [i for i, v in enumerate(split_text) if v == \".\"]\n",
257
+ " \n",
258
+ " ## adding prepared data from each file to general list\n",
259
+ " prev = 0\n",
260
+ " for i in seq_split_indexes:\n",
261
+ " \n",
262
+ " short_text = split_text[prev: i]\n",
263
+ " short_label = ner_label[prev: i]\n",
264
+ " \n",
265
+ " clear_tokens, clear_label = prepare_sequences(short_text, short_label)\n",
266
+ " \n",
267
+ " all_sequences.append(clear_tokens)\n",
268
+ " all_labels.append(clear_label)\n",
269
+ " ## we don't take into account the dots in text \n",
270
+ " prev = i+1\n",
271
+ " \n",
272
+ " ## save data to file for each part of splitted dataset\n",
273
+ " df_folder = pd.DataFrame({\"sequences\": all_sequences, \"labels\": all_labels})\n",
274
+ " with open(f'{folder}_data.pickle', 'wb') as f:\n",
275
+ " pickle.dump(df_folder, f)\n",
276
+ " print(f\"For folder <{folder}> prepared <{df_folder.shape[0]}> sequences\")"
277
+ ]
278
+ },
279
+ {
280
+ "cell_type": "markdown",
281
+ "id": "cc61030b",
282
+ "metadata": {},
283
+ "source": [
284
+ "### Creating DatasetDict fro prepared data"
285
+ ]
286
+ },
287
+ {
288
+ "cell_type": "code",
289
+ "execution_count": null,
290
+ "id": "8c24ab41",
291
+ "metadata": {},
292
+ "outputs": [],
293
+ "source": [
294
+ "## load 3 dataframe and init them into transformer dataset\n",
295
+ "dsd = DatasetDict()\n",
296
+ "for folder in folders:\n",
297
+ " with open(f'{folder}_data.pickle', 'rb') as f:\n",
298
+ " data = pickle.load(f)\n",
299
+ " dsd[folder] = Dataset.from_pandas(data)"
300
+ ]
301
+ },
302
+ {
303
+ "cell_type": "markdown",
304
+ "id": "04e76e90",
305
+ "metadata": {},
306
+ "source": [
307
+ "### Creating dictionary for labels ids "
308
+ ]
309
+ },
310
+ {
311
+ "cell_type": "code",
312
+ "execution_count": null,
313
+ "id": "ce021634",
314
+ "metadata": {},
315
+ "outputs": [],
316
+ "source": [
317
+ "## get unique entyties\n",
318
+ "for_df = []\n",
319
+ "for folder in folders:\n",
320
+ " with open(f'{folder}_data.pickle', 'rb') as f:\n",
321
+ " for_df.append(pickle.load(f))\n",
322
+ "lbls = pd.concat(for_df)[\"labels\"].values\n",
323
+ "\n",
324
+ "dd = dict()\n",
325
+ "ids = 0\n",
326
+ "for ll in lbls:\n",
327
+ " for lbl in ll:\n",
328
+ " if lbl not in dd:\n",
329
+ " dd[lbl] = ids\n",
330
+ " ids += 1\n",
331
+ "\n",
332
+ " \n",
333
+ "# # count each entity\n",
334
+ "# countss = dict()\n",
335
+ "# for ll in lbls:\n",
336
+ "# for lbl in ll:\n",
337
+ "# if lbl not in countss:\n",
338
+ "# countss[lbl] = 1\n",
339
+ "# else:\n",
340
+ "# countss[lbl] += 1\n",
341
+ "\n",
342
+ "# del countss[\"O\"]\n",
343
+ "# sorted_counts = {k: v for k, v in sorted(countss.items(), key=lambda item: item[0].split(\"-\")[1])}\n",
344
+ "\n",
345
+ "# for k, v in sorted_counts.items():\n",
346
+ "# print(\"- \"+k+f\": {v}\")"
347
+ ]
348
+ },
349
+ {
350
+ "cell_type": "code",
351
+ "execution_count": null,
352
+ "id": "58000df7",
353
+ "metadata": {},
354
+ "outputs": [],
355
+ "source": [
356
+ "## sort mapper\n",
357
+ "\n",
358
+ "ll = [i for i in dd.keys() if i != \"O\"] \n",
359
+ "ll_sort = (sorted(ll, key=lambda x: x.split(\"-\")[1]))\n",
360
+ "new_dd = {k: v for v, k in enumerate([\"O\"] + ll_sort)}\n",
361
+ " \n",
362
+ " \n",
363
+ "reverse_dd = {v: k for k, v in new_dd.items()}\n",
364
+ "with open('id_to_label_map.pickle', 'wb') as f:\n",
365
+ " pickle.dump(reverse_dd, f)"
366
+ ]
367
+ },
368
+ {
369
+ "cell_type": "markdown",
370
+ "id": "b30a7098",
371
+ "metadata": {},
372
+ "source": [
373
+ "### Creating new column with numerical labels"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "execution_count": null,
379
+ "id": "51fd6b38",
380
+ "metadata": {},
381
+ "outputs": [],
382
+ "source": [
383
+ "dsd_with_ids = dsd.map(\n",
384
+ " lambda x: {\"ids\": [map_label_to_id(new_dd, i) for i in x[\"labels\"]]}, batched=True, remove_columns = \"labels\")"
385
+ ]
386
+ },
387
+ {
388
+ "cell_type": "code",
389
+ "execution_count": null,
390
+ "id": "b7ecf94f",
391
+ "metadata": {},
392
+ "outputs": [],
393
+ "source": [
394
+ "dsd_with_ids.push_to_hub(\"\")"
395
+ ]
396
+ },
397
+ {
398
+ "cell_type": "code",
399
+ "execution_count": null,
400
+ "id": "5eb5f3fa",
401
+ "metadata": {},
402
+ "outputs": [],
403
+ "source": []
404
+ }
405
+ ],
406
+ "metadata": {
407
+ "kernelspec": {
408
+ "display_name": "hf_env",
409
+ "language": "python",
410
+ "name": "hf_env"
411
+ },
412
+ "language_info": {
413
+ "codemirror_mode": {
414
+ "name": "ipython",
415
+ "version": 3
416
+ },
417
+ "file_extension": ".py",
418
+ "mimetype": "text/x-python",
419
+ "name": "python",
420
+ "nbconvert_exporter": "python",
421
+ "pygments_lexer": "ipython3",
422
+ "version": "3.8.10"
423
+ }
424
+ },
425
+ "nbformat": 4,
426
+ "nbformat_minor": 5
427
+ }