simpler prmu script
Browse files
prmu.py
CHANGED
@@ -1,37 +1,18 @@
|
|
1 |
-
|
2 |
-
# coding: utf-8
|
3 |
|
4 |
-
# In[1]:
|
5 |
-
|
6 |
-
|
7 |
-
from this import d
|
8 |
-
from datasets import load_dataset, load_from_disk
|
9 |
-
import spacy
|
10 |
-
import re
|
11 |
-
# from spacy.lang.en import English
|
12 |
-
from spacy.tokenizer import _get_regex_pattern
|
13 |
-
from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
|
14 |
-
from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
|
15 |
-
from spacy.util import compile_infix_regex
|
16 |
-
from nltk.stem.snowball import SnowballStemmer as Stemmer
|
17 |
-
import numpy as np
|
18 |
import sys
|
|
|
|
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
print("LOADING DATASET")
|
23 |
-
dataset = load_dataset("json", data_files={"test":"data.jsonl"})
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
# In[3]:
|
28 |
|
29 |
nlp = spacy.load("en_core_web_sm")
|
30 |
-
re_token_match = _get_regex_pattern(nlp.Defaults.token_match)
|
31 |
-
re_token_match = f"({re_token_match}|\w+-\w+)"
|
32 |
-
nlp.tokenizer.token_match = re.compile(re_token_match).match
|
33 |
|
|
|
34 |
|
|
|
|
|
|
|
35 |
|
36 |
# Modify tokenizer infix patterns
|
37 |
infixes = (
|
@@ -53,15 +34,10 @@ infix_re = compile_infix_regex(infixes)
|
|
53 |
nlp.tokenizer.infix_finditer = infix_re.finditer
|
54 |
|
55 |
|
56 |
-
# In[5]:
|
57 |
-
|
58 |
-
|
59 |
def contains(subseq, inseq):
|
60 |
return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))
|
61 |
|
62 |
|
63 |
-
|
64 |
-
|
65 |
def find_pmru(tok_title, tok_text, tok_kp):
|
66 |
"""Find PRMU category of a given keyphrase."""
|
67 |
|
@@ -84,48 +60,41 @@ def find_pmru(tok_title, tok_text, tok_kp):
|
|
84 |
# if "no" words are present
|
85 |
else:
|
86 |
return "U"
|
87 |
-
return prmu
|
88 |
|
89 |
-
def tokenize(dataset):
|
90 |
-
keyphrases_stems= []
|
91 |
-
for keyphrase in dataset["keyphrases"]:
|
92 |
-
keyphrase_spacy = nlp(keyphrase)
|
93 |
-
keyphrase_tokens = [token.text for token in keyphrase_spacy]
|
94 |
-
keyphrase_stems = [Stemmer('porter').stem(w.lower()) for w in keyphrase_tokens]
|
95 |
-
keyphrase_stems = " ".join(keyphrase_stems)
|
96 |
-
keyphrases_stems.append(keyphrase_stems)
|
97 |
-
|
98 |
-
dataset["tokenized_keyphrases"] = keyphrases_stems
|
99 |
-
return dataset
|
100 |
|
101 |
-
|
102 |
-
Function that tokenizes the dataset (title, text and keyphrases)
|
103 |
-
and runs the prmu algorithm.
|
104 |
-
"""
|
105 |
-
def prmu_dataset(dataset):
|
106 |
-
title_spacy = nlp(dataset['title'])
|
107 |
-
abstract_spacy = nlp(dataset['text'])
|
108 |
|
109 |
-
|
110 |
-
abstract_tokens = [token.text for token in abstract_spacy]
|
111 |
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
114 |
|
115 |
-
|
116 |
|
117 |
-
|
|
|
118 |
|
119 |
-
|
|
|
120 |
|
|
|
|
|
121 |
|
122 |
-
|
|
|
|
|
123 |
|
|
|
124 |
|
125 |
-
|
126 |
-
|
127 |
|
128 |
-
|
129 |
-
|
130 |
|
131 |
-
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
|
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import sys
|
4 |
+
import json
|
5 |
+
import spacy
|
6 |
|
7 |
+
from nltk.stem.snowball import SnowballStemmer as Stemmer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
nlp = spacy.load("en_core_web_sm")
|
|
|
|
|
|
|
10 |
|
11 |
+
# https://spacy.io/usage/linguistic-features#native-tokenizer-additions
|
12 |
|
13 |
+
from spacy.lang.char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
|
14 |
+
from spacy.lang.char_classes import CONCAT_QUOTES, LIST_ELLIPSES, LIST_ICONS
|
15 |
+
from spacy.util import compile_infix_regex
|
16 |
|
17 |
# Modify tokenizer infix patterns
|
18 |
infixes = (
|
|
|
34 |
nlp.tokenizer.infix_finditer = infix_re.finditer
|
35 |
|
36 |
|
|
|
|
|
|
|
37 |
def contains(subseq, inseq):
|
38 |
return any(inseq[pos:pos + len(subseq)] == subseq for pos in range(0, len(inseq) - len(subseq) + 1))
|
39 |
|
40 |
|
|
|
|
|
41 |
def find_pmru(tok_title, tok_text, tok_kp):
|
42 |
"""Find PRMU category of a given keyphrase."""
|
43 |
|
|
|
60 |
# if "no" words are present
|
61 |
else:
|
62 |
return "U"
|
|
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
if __name__ == '__main__':
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
+
data = []
|
|
|
68 |
|
69 |
+
# read the dataset
|
70 |
+
with open(sys.argv[1], 'r') as f:
|
71 |
+
# loop through the documents
|
72 |
+
for line in f:
|
73 |
+
doc = json.loads(line.strip())
|
74 |
|
75 |
+
print(doc['id'])
|
76 |
|
77 |
+
title_spacy = nlp(doc['title'])
|
78 |
+
abstract_spacy = nlp(doc['abstract'])
|
79 |
|
80 |
+
title_tokens = [token.text for token in title_spacy]
|
81 |
+
abstract_tokens = [token.text for token in abstract_spacy]
|
82 |
|
83 |
+
title_stems = [Stemmer('porter').stem(w.lower()) for w in title_tokens]
|
84 |
+
abstract_stems = [Stemmer('porter').stem(w.lower()) for w in abstract_tokens]
|
85 |
|
86 |
+
keyphrases_stems = []
|
87 |
+
for keyphrase in doc['keyphrases']:
|
88 |
+
keyphrases_stems.append(keyphrase.split())
|
89 |
|
90 |
+
prmu = [find_pmru(title_stems, abstract_stems, kp) for kp in keyphrases_stems]
|
91 |
|
92 |
+
if doc['prmu'] != prmu:
|
93 |
+
print("PRMU categories are not identical!")
|
94 |
|
95 |
+
doc['prmu'] = prmu
|
96 |
+
data.append(json.dumps(doc))
|
97 |
|
98 |
+
# write the json
|
99 |
+
with open(sys.argv[2], 'w') as o:
|
100 |
+
o.write("\n".join(data))
|