Datasets:

Modalities:
Text
Formats:
text
Libraries:
Datasets
License:
khulnasoft commited on
Commit
0fff0b6
1 Parent(s): 0069356

Create matcher.py

Browse files
Files changed (1) hide show
  1. evaluation_data/carb/matcher.py +339 -0
evaluation_data/carb/matcher.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import division
2
+ import string
3
+ from nltk.translate.bleu_score import sentence_bleu
4
+ from nltk.corpus import stopwords
5
+ from copy import copy
6
+ import ipdb
7
+
8
+ class Matcher:
9
+ @staticmethod
10
+ def bowMatch(ref, ex, ignoreStopwords, ignoreCase):
11
+ """
12
+ A binary function testing for exact lexical match (ignoring ordering) between reference
13
+ and predicted extraction
14
+ """
15
+ s1 = ref.bow()
16
+ s2 = ex.bow()
17
+ if ignoreCase:
18
+ s1 = s1.lower()
19
+ s2 = s2.lower()
20
+
21
+ s1Words = s1.split(' ')
22
+ s2Words = s2.split(' ')
23
+
24
+ if ignoreStopwords:
25
+ s1Words = Matcher.removeStopwords(s1Words)
26
+ s2Words = Matcher.removeStopwords(s2Words)
27
+
28
+ return sorted(s1Words) == sorted(s2Words)
29
+
30
+ @staticmethod
31
+ def predMatch(ref, ex, ignoreStopwords, ignoreCase):
32
+ """
33
+ Return whehter gold and predicted extractions agree on the predicate
34
+ """
35
+ s1 = ref.elementToStr(ref.pred)
36
+ s2 = ex.elementToStr(ex.pred)
37
+ if ignoreCase:
38
+ s1 = s1.lower()
39
+ s2 = s2.lower()
40
+
41
+ s1Words = s1.split(' ')
42
+ s2Words = s2.split(' ')
43
+
44
+ if ignoreStopwords:
45
+ s1Words = Matcher.removeStopwords(s1Words)
46
+ s2Words = Matcher.removeStopwords(s2Words)
47
+
48
+ return s1Words == s2Words
49
+
50
+
51
+ @staticmethod
52
+ def argMatch(ref, ex, ignoreStopwords, ignoreCase):
53
+ """
54
+ Return whehter gold and predicted extractions agree on the arguments
55
+ """
56
+ sRef = ' '.join([ref.elementToStr(elem) for elem in ref.args])
57
+ sEx = ' '.join([ex.elementToStr(elem) for elem in ex.args])
58
+
59
+ count = 0
60
+
61
+ for w1 in sRef:
62
+ for w2 in sEx:
63
+ if w1 == w2:
64
+ count += 1
65
+
66
+ # We check how well does the extraction lexically cover the reference
67
+ # Note: this is somewhat lenient as it doesn't penalize the extraction for
68
+ # being too long
69
+ coverage = float(count) / len(sRef)
70
+
71
+
72
+ return coverage > Matcher.LEXICAL_THRESHOLD
73
+
74
+ @staticmethod
75
+ def bleuMatch(ref, ex, ignoreStopwords, ignoreCase):
76
+ sRef = ref.bow()
77
+ sEx = ex.bow()
78
+ bleu = sentence_bleu(references = [sRef.split(' ')], hypothesis = sEx.split(' '))
79
+ return bleu > Matcher.BLEU_THRESHOLD
80
+
81
+ @staticmethod
82
+ def lexicalMatch(ref, ex, ignoreStopwords, ignoreCase):
83
+ sRef = ref.bow().split(' ')
84
+ sEx = ex.bow().split(' ')
85
+ count = 0
86
+ #for w1 in sRef:
87
+ # if w1 in sEx:
88
+ # count += 1
89
+ # sEx.remove(w1)
90
+ for w1 in sRef:
91
+ for w2 in sEx:
92
+ if w1 == w2:
93
+ count += 1
94
+
95
+ # We check how well does the extraction lexically cover the reference
96
+ # Note: this is somewhat lenient as it doesn't penalize the extraction for
97
+ # being too long
98
+ coverage = float(count) / len(sRef)
99
+
100
+ return coverage > Matcher.LEXICAL_THRESHOLD
101
+
102
+ @staticmethod
103
+ def tuple_match(ref, ex, ignoreStopwords, ignoreCase):
104
+ precision = [0, 0] # 0 out of 0 predicted words match
105
+ recall = [0, 0] # 0 out of 0 reference words match
106
+ # If, for each part, any word is the same as a reference word, then it's a match.
107
+
108
+ predicted_words = ex.pred.split()
109
+ gold_words = ref.pred.split()
110
+ precision[1] += len(predicted_words)
111
+ recall[1] += len(gold_words)
112
+
113
+ # matching_words = sum(1 for w in predicted_words if w in gold_words)
114
+ matching_words = 0
115
+ for w in gold_words:
116
+ if w in predicted_words:
117
+ matching_words += 1
118
+ predicted_words.remove(w)
119
+
120
+ if matching_words == 0:
121
+ return False # t <-> gt is not a match
122
+ precision[0] += matching_words
123
+ recall[0] += matching_words
124
+
125
+ for i in range(len(ref.args)):
126
+ gold_words = ref.args[i].split()
127
+ recall[1] += len(gold_words)
128
+ if len(ex.args) <= i:
129
+ if i<2:
130
+ return False
131
+ else:
132
+ continue
133
+ predicted_words = ex.args[i].split()
134
+ precision[1] += len(predicted_words)
135
+ matching_words = 0
136
+ for w in gold_words:
137
+ if w in predicted_words:
138
+ matching_words += 1
139
+ predicted_words.remove(w)
140
+
141
+ if matching_words == 0 and i<2:
142
+ return False # t <-> gt is not a match
143
+ precision[0] += matching_words
144
+ # Currently this slightly penalises systems when the reference
145
+ # reformulates the sentence words, because the reformulation doesn't
146
+ # match the predicted word. It's a one-wrong-word penalty to precision,
147
+ # to all systems that correctly extracted the reformulated word.
148
+ recall[0] += matching_words
149
+
150
+ prec = 1.0 * precision[0] / precision[1]
151
+ rec = 1.0 * recall[0] / recall[1]
152
+ return [prec, rec]
153
+
154
+ # STRICTER LINIENT MATCH
155
+ def linient_tuple_match(ref, ex, ignoreStopwords, ignoreCase):
156
+ precision = [0, 0] # 0 out of 0 predicted words match
157
+ recall = [0, 0] # 0 out of 0 reference words match
158
+ # If, for each part, any word is the same as a reference word, then it's a match.
159
+
160
+ predicted_words = ex.pred.split()
161
+ gold_words = ref.pred.split()
162
+ precision[1] += len(predicted_words)
163
+ recall[1] += len(gold_words)
164
+
165
+ # matching_words = sum(1 for w in predicted_words if w in gold_words)
166
+ matching_words = 0
167
+ for w in gold_words:
168
+ if w in predicted_words:
169
+ matching_words += 1
170
+ predicted_words.remove(w)
171
+
172
+ # matching 'be' with its different forms
173
+ forms_of_be = ["be","is","am","are","was","were","been","being"]
174
+ if "be" in predicted_words:
175
+ for form in forms_of_be:
176
+ if form in gold_words:
177
+ matching_words += 1
178
+ predicted_words.remove("be")
179
+ break
180
+
181
+ if matching_words == 0:
182
+ return [0,0] # t <-> gt is not a match
183
+
184
+ precision[0] += matching_words
185
+ recall[0] += matching_words
186
+
187
+ for i in range(len(ref.args)):
188
+ gold_words = ref.args[i].split()
189
+ recall[1] += len(gold_words)
190
+ if len(ex.args) <= i:
191
+ if i<2:
192
+ return [0,0] # changed
193
+ else:
194
+ continue
195
+ predicted_words = ex.args[i].split()
196
+ precision[1] += len(predicted_words)
197
+ matching_words = 0
198
+ for w in gold_words:
199
+ if w in predicted_words:
200
+ matching_words += 1
201
+ predicted_words.remove(w)
202
+
203
+ precision[0] += matching_words
204
+ # Currently this slightly penalises systems when the reference
205
+ # reformulates the sentence words, because the reformulation doesn't
206
+ # match the predicted word. It's a one-wrong-word penalty to precision,
207
+ # to all systems that correctly extracted the reformulated word.
208
+ recall[0] += matching_words
209
+
210
+ if(precision[1] == 0):
211
+ prec = 0
212
+ else:
213
+ prec = 1.0 * precision[0] / precision[1]
214
+ if(recall[1] == 0):
215
+ rec = 0
216
+ else:
217
+ rec = 1.0 * recall[0] / recall[1]
218
+ return [prec, rec]
219
+
220
+
221
+ @staticmethod
222
+ def simple_tuple_match(ref, ex, ignoreStopwords, ignoreCase):
223
+ ref.args = [ref.args[0], ' '.join(ref.args[1:])]
224
+ ex.args = [ex.args[0], ' '.join(ex.args[1:])]
225
+
226
+ precision = [0, 0] # 0 out of 0 predicted words match
227
+ recall = [0, 0] # 0 out of 0 reference words match
228
+ # If, for each part, any word is the same as a reference word, then it's a match.
229
+
230
+ predicted_words = ex.pred.split()
231
+ gold_words = ref.pred.split()
232
+ precision[1] += len(predicted_words)
233
+ recall[1] += len(gold_words)
234
+
235
+ matching_words = 0
236
+ for w in gold_words:
237
+ if w in predicted_words:
238
+ matching_words += 1
239
+ predicted_words.remove(w)
240
+
241
+ precision[0] += matching_words
242
+ recall[0] += matching_words
243
+
244
+ for i in range(len(ref.args)):
245
+ gold_words = ref.args[i].split()
246
+ recall[1] += len(gold_words)
247
+ if len(ex.args) <= i:
248
+ break
249
+ predicted_words = ex.args[i].split()
250
+ precision[1] += len(predicted_words)
251
+ matching_words = 0
252
+ for w in gold_words:
253
+ if w in predicted_words:
254
+ matching_words += 1
255
+ predicted_words.remove(w)
256
+ precision[0] += matching_words
257
+
258
+ # Currently this slightly penalises systems when the reference
259
+ # reformulates the sentence words, because the reformulation doesn't
260
+ # match the predicted word. It's a one-wrong-word penalty to precision,
261
+ # to all systems that correctly extracted the reformulated word.
262
+ recall[0] += matching_words
263
+
264
+ prec = 1.0 * precision[0] / precision[1]
265
+ rec = 1.0 * recall[0] / recall[1]
266
+ return [prec, rec]
267
+
268
+ # @staticmethod
269
+ # def binary_linient_tuple_match(ref, ex, ignoreStopwords, ignoreCase):
270
+ # if len(ref.args)>=2:
271
+ # # r = ref.copy()
272
+ # r = copy(ref)
273
+ # r.args = [ref.args[0], ' '.join(ref.args[1:])]
274
+ # else:
275
+ # r = ref
276
+ # if len(ex.args)>=2:
277
+ # # e = ex.copy()
278
+ # e = copy(ex)
279
+ # e.args = [ex.args[0], ' '.join(ex.args[1:])]
280
+ # else:
281
+ # e = ex
282
+ # return Matcher.linient_tuple_match(r, e, ignoreStopwords, ignoreCase)
283
+
284
+ @staticmethod
285
+ def binary_linient_tuple_match(ref, ex, ignoreStopwords, ignoreCase):
286
+ if len(ref.args)>=2:
287
+ r = copy(ref)
288
+ r.args = [ref.args[0], ' '.join(ref.args[1:])]
289
+ else:
290
+ r = ref
291
+ if len(ex.args)>=2:
292
+ e = copy(ex)
293
+ e.args = [ex.args[0], ' '.join(ex.args[1:])]
294
+ else:
295
+ e = ex
296
+ stright_match = Matcher.linient_tuple_match(r, e, ignoreStopwords, ignoreCase)
297
+
298
+ said_type_reln = ["said", "told", "added", "adds", "says", "adds"]
299
+ said_type_sentence = False
300
+ for said_verb in said_type_reln:
301
+ if said_verb in ref.pred:
302
+ said_type_sentence = True
303
+ break
304
+ if not said_type_sentence:
305
+ return stright_match
306
+ else:
307
+ if len(ex.args)>=2:
308
+ e = copy(ex)
309
+ e.args = [' '.join(ex.args[1:]), ex.args[0]]
310
+ else:
311
+ e = ex
312
+ reverse_match = Matcher.linient_tuple_match(r, e, ignoreStopwords, ignoreCase)
313
+
314
+ return max(stright_match, reverse_match)
315
+
316
+ @staticmethod
317
+ def binary_tuple_match(ref, ex, ignoreStopwords, ignoreCase):
318
+ if len(ref.args)>=2:
319
+ # r = ref.copy()
320
+ r = copy(ref)
321
+ r.args = [ref.args[0], ' '.join(ref.args[1:])]
322
+ else:
323
+ r = ref
324
+ if len(ex.args)>=2:
325
+ # e = ex.copy()
326
+ e = copy(ex)
327
+ e.args = [ex.args[0], ' '.join(ex.args[1:])]
328
+ else:
329
+ e = ex
330
+ return Matcher.tuple_match(r, e, ignoreStopwords, ignoreCase)
331
+
332
+ @staticmethod
333
+ def removeStopwords(ls):
334
+ return [w for w in ls if w.lower() not in Matcher.stopwords]
335
+
336
+ # CONSTANTS
337
+ BLEU_THRESHOLD = 0.4
338
+ LEXICAL_THRESHOLD = 0.5 # Note: changing this value didn't change the ordering of the tested systems
339
+ stopwords = stopwords.words('english') + list(string.punctuation)