Datasets:

Modalities:
Text
Formats:
text
Libraries:
Datasets
License:
khulnasoft commited on
Commit
aaed6d7
1 Parent(s): 047c047

Upload 16 files

Browse files
evaluation_data/carb/oie_readers/__init__.py ADDED
File without changes
evaluation_data/carb/oie_readers/allennlpReader.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .oieReader import OieReader
2
+ from .extraction import Extraction
3
+ import math
4
+ import os
5
+ import ipdb
6
+
7
+ class AllennlpReader(OieReader):
8
+
9
+ def __init__(self, threshold=None):
10
+ self.name = 'Allennlp'
11
+ self.threshold = threshold
12
+
13
+ def read(self, fn):
14
+ d = {}
15
+ # with open(fn) as fin:
16
+ if os.path.exists(fn):
17
+ # print("reading from file")
18
+ fin = open(fn)
19
+ else:
20
+ # print("reading from string")
21
+ fin = fn.strip().split('\n')
22
+
23
+ for line in fin:
24
+ '''
25
+ data = line.strip().split('\t')
26
+ confidence = data[0]
27
+ if not all(data[2:5]):
28
+ continue
29
+ arg1, rel = [s[s.index('(') + 1:s.index(',List(')] for s in data[2:4]]
30
+ #args = data[4].strip().split(');')
31
+ #print arg2s
32
+ args = [s[s.index('(') + 1:s.index(',List(')] for s in data[4].strip().split(');')]
33
+ # if arg1 == "the younger La Flesche":
34
+ # print len(args)
35
+ text = data[5]
36
+ if data[1]:
37
+ #print arg1, rel
38
+ s = data[1]
39
+ if not (arg1 + ' ' + rel).startswith(s[s.index('(') + 1:s.index(',List(')]):
40
+ #print "##########Not adding context"
41
+ arg1 = s[s.index('(') + 1:s.index(',List(')] + ' ' + arg1
42
+ #print arg1 + rel, ",,,,, ", s[s.index('(') + 1:s.index(',List(')]
43
+ '''
44
+ # print(line)
45
+ line = line.strip().split('\t')
46
+ # print(line)
47
+ text = line[0]
48
+ try:
49
+ confidence = line[2]
50
+ except:
51
+ confidence = 0
52
+ # raise Exception('Unable to find confidence in line: ',line)
53
+ line = line[1]
54
+ try:
55
+ arg1 = line[line.index('<arg1>') + 6:line.index('</arg1>')]
56
+ except:
57
+ arg1 = ""
58
+ try:
59
+ rel = line[line.index('<rel>') + 5:line.index('</rel>')]
60
+ except:
61
+ rel = ""
62
+ try:
63
+ arg2 = line[line.index('<arg2>') + 6:line.index('</arg2>')]
64
+ except:
65
+ arg2 = ""
66
+
67
+ if(type(self.threshold) != type(None) and float(confidence) < self.threshold):
68
+ continue
69
+
70
+ if not ( arg1 or arg2 or rel):
71
+ continue
72
+ #confidence = 1
73
+ #print(arg1, rel, arg2, confidence)
74
+ # curExtraction = Extraction(pred = rel, head_pred_index = -1, sent = text, confidence = -1/float(confidence))
75
+ # curExtraction = Extraction(pred = rel, head_pred_index = -1, sent = text, confidence = math.exp(float(confidence)))
76
+ curExtraction = Extraction(pred = rel, head_pred_index = -1, sent = text, confidence = float(confidence))
77
+ curExtraction.addArg(arg1)
78
+ curExtraction.addArg(arg2)
79
+ #for arg in args:
80
+ # curExtraction.addArg(arg)
81
+ d[text] = d.get(text, []) + [curExtraction]
82
+
83
+ if os.path.exists(fn):
84
+ fin.close()
85
+ # print(d)
86
+ self.oie = d
evaluation_data/carb/oie_readers/argument.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nltk
2
+ from operator import itemgetter
3
+
4
+ class Argument:
5
+ def __init__(self, arg):
6
+ self.words = [x for x in arg[0].strip().split(' ') if x]
7
+ self.posTags = map(itemgetter(1), nltk.pos_tag(self.words))
8
+ self.indices = arg[1]
9
+ self.feats = {}
10
+
11
+ def __str__(self):
12
+ return "({})".format('\t'.join(map(str,
13
+ [escape_special_chars(' '.join(self.words)),
14
+ str(self.indices)])))
15
+
16
+ COREF = 'coref'
17
+
18
+ ## Helper functions
19
+ def escape_special_chars(s):
20
+ return s.replace('\t', '\\t')
21
+
evaluation_data/carb/oie_readers/benchmarkGoldReader.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Usage:
2
+ benchmarkGoldReader --in=INPUT_FILE
3
+
4
+ Read a tab-formatted file.
5
+ Each line consists of:
6
+ sent, prob, pred, arg1, arg2, ...
7
+
8
+ """
9
+
10
+ from oie_readers.oieReader import OieReader
11
+ from oie_readers.extraction import Extraction
12
+ from docopt import docopt
13
+ import logging
14
+
15
+ logging.basicConfig(level = logging.DEBUG)
16
+
17
+ class BenchmarkGoldReader(OieReader):
18
+
19
+ def __init__(self):
20
+ self.name = 'BenchmarkGoldReader'
21
+
22
+ def read(self, fn):
23
+ """
24
+ Read a tabbed format line
25
+ Each line consists of:
26
+ sent, prob, pred, arg1, arg2, ...
27
+ """
28
+ d = {}
29
+ ex_index = 0
30
+ with open(fn) as fin:
31
+ for line in fin:
32
+ if not line.strip():
33
+ continue
34
+ data = line.strip().split('\t')
35
+ text, rel = data[:2]
36
+ curExtraction = Extraction(pred = rel.strip(),
37
+ head_pred_index = None,
38
+ sent = text.strip(),
39
+ confidence = 1.0,
40
+ question_dist = "./question_distributions/dist_wh_sbj_obj1.json",
41
+ index = ex_index)
42
+ ex_index += 1
43
+
44
+ for arg in data[2:]:
45
+ curExtraction.addArg(arg.strip())
46
+
47
+ d[text] = d.get(text, []) + [curExtraction]
48
+ self.oie = d
49
+
50
+
51
+ if __name__ == "__main__":
52
+ args = docopt(__doc__)
53
+ input_fn = args["--in"]
54
+ tr = BenchmarkGoldReader()
55
+ tr.read(input_fn)
evaluation_data/carb/oie_readers/clausieReader.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Usage:
2
+ <file-name> --in=INPUT_FILE --out=OUTPUT_FILE [--debug]
3
+
4
+ Convert to tabbed format
5
+ """
6
+ # External imports
7
+ import logging
8
+ from pprint import pprint
9
+ from pprint import pformat
10
+ from docopt import docopt
11
+
12
+ # Local imports
13
+ from oie_readers.oieReader import OieReader
14
+ from oie_readers.extraction import Extraction
15
+ import ipdb
16
+ #=-----
17
+
18
+ class ClausieReader(OieReader):
19
+
20
+ def __init__(self):
21
+ self.name = 'ClausIE'
22
+
23
+ def read(self, fn):
24
+ d = {}
25
+ with open(fn, encoding="utf-8") as fin:
26
+ for line in fin:
27
+ data = line.strip().split('\t')
28
+ if len(data) == 1:
29
+ text = data[0]
30
+ elif len(data) == 5:
31
+ arg1, rel, arg2 = [s[1:-1] for s in data[1:4]]
32
+ confidence = data[4]
33
+
34
+ curExtraction = Extraction(pred = rel,
35
+ head_pred_index = -1,
36
+ sent = text,
37
+ confidence = float(confidence))
38
+
39
+ curExtraction.addArg(arg1)
40
+ curExtraction.addArg(arg2)
41
+ d[text] = d.get(text, []) + [curExtraction]
42
+ self.oie = d
43
+ # self.normalizeConfidence()
44
+
45
+ # # remove exxtractions below the confidence threshold
46
+ # if type(self.threshold) != type(None):
47
+ # new_d = {}
48
+ # for sent in self.oie:
49
+ # for extraction in self.oie[sent]:
50
+ # if extraction.confidence < self.threshold:
51
+ # continue
52
+ # else:
53
+ # new_d[sent] = new_d.get(sent, []) + [extraction]
54
+ # self.oie = new_d
55
+
56
+
57
+
58
+ def normalizeConfidence(self):
59
+ ''' Normalize confidence to resemble probabilities '''
60
+ EPSILON = 1e-3
61
+
62
+ confidences = [extraction.confidence for sent in self.oie for extraction in self.oie[sent]]
63
+ maxConfidence = max(confidences)
64
+ minConfidence = min(confidences)
65
+
66
+ denom = maxConfidence - minConfidence + (2*EPSILON)
67
+
68
+ for sent, extractions in self.oie.items():
69
+ for extraction in extractions:
70
+ extraction.confidence = ( (extraction.confidence - minConfidence) + EPSILON) / denom
71
+
72
+
73
+
74
+ if __name__ == "__main__":
75
+ # Parse command line arguments
76
+ args = docopt(__doc__)
77
+ inp_fn = args["--in"]
78
+ out_fn = args["--out"]
79
+ debug = args["--debug"]
80
+ if debug:
81
+ logging.basicConfig(level = logging.DEBUG)
82
+ else:
83
+ logging.basicConfig(level = logging.INFO)
84
+
85
+
86
+ oie = ClausieReader()
87
+ oie.read(inp_fn)
88
+ oie.output_tabbed(out_fn)
89
+
90
+ logging.info("DONE")
evaluation_data/carb/oie_readers/extraction.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from oie_readers.argument import Argument
2
+ from operator import itemgetter
3
+ from collections import defaultdict
4
+ import nltk
5
+ import itertools
6
+ import logging
7
+ import numpy as np
8
+
9
+
10
+ class Extraction:
11
+ """
12
+ Stores sentence, single predicate and corresponding arguments.
13
+ """
14
+ def __init__(self, pred, head_pred_index, sent, confidence, question_dist = '', index = -1):
15
+ self.pred = pred
16
+ self.head_pred_index = head_pred_index
17
+ self.sent = sent
18
+ self.args = []
19
+ self.confidence = confidence
20
+ self.matched = []
21
+ self.questions = {}
22
+ self.indsForQuestions = defaultdict(lambda: set())
23
+ self.is_mwp = False
24
+ self.question_dist = question_dist
25
+ self.index = index
26
+
27
+ def distArgFromPred(self, arg):
28
+ assert(len(self.pred) == 2)
29
+ dists = []
30
+ for x in self.pred[1]:
31
+ for y in arg.indices:
32
+ dists.append(abs(x - y))
33
+
34
+ return min(dists)
35
+
36
+ def argsByDistFromPred(self, question):
37
+ return sorted(self.questions[question], key = lambda arg: self.distArgFromPred(arg))
38
+
39
+ def addArg(self, arg, question = None):
40
+ self.args.append(arg)
41
+ if question:
42
+ self.questions[question] = self.questions.get(question,[]) + [Argument(arg)]
43
+
44
+ def noPronounArgs(self):
45
+ """
46
+ Returns True iff all of this extraction's arguments are not pronouns.
47
+ """
48
+ for (a, _) in self.args:
49
+ tokenized_arg = nltk.word_tokenize(a)
50
+ if len(tokenized_arg) == 1:
51
+ _, pos_tag = nltk.pos_tag(tokenized_arg)[0]
52
+ if ('PRP' in pos_tag):
53
+ return False
54
+ return True
55
+
56
+ def isContiguous(self):
57
+ return all([indices for (_, indices) in self.args])
58
+
59
+ def toBinary(self):
60
+ ''' Try to represent this extraction's arguments as binary
61
+ If fails, this function will return an empty list. '''
62
+
63
+ ret = [self.elementToStr(self.pred)]
64
+
65
+ if len(self.args) == 2:
66
+ # we're in luck
67
+ return ret + [self.elementToStr(arg) for arg in self.args]
68
+
69
+ return []
70
+
71
+ if not self.isContiguous():
72
+ # give up on non contiguous arguments (as we need indexes)
73
+ return []
74
+
75
+ # otherwise, try to merge based on indices
76
+ # TODO: you can explore other methods for doing this
77
+ binarized = self.binarizeByIndex()
78
+
79
+ if binarized:
80
+ return ret + binarized
81
+
82
+ return []
83
+
84
+
85
+ def elementToStr(self, elem, print_indices = True):
86
+ ''' formats an extraction element (pred or arg) as a raw string
87
+ removes indices and trailing spaces '''
88
+ if print_indices:
89
+ return str(elem)
90
+ if isinstance(elem, str):
91
+ return elem
92
+ if isinstance(elem, tuple):
93
+ ret = elem[0].rstrip().lstrip()
94
+ else:
95
+ ret = ' '.join(elem.words)
96
+ assert ret, "empty element? {0}".format(elem)
97
+ return ret
98
+
99
+ def binarizeByIndex(self):
100
+ extraction = [self.pred] + self.args
101
+ markPred = [(w, ind, i == 0) for i, (w, ind) in enumerate(extraction)]
102
+ sortedExtraction = sorted(markPred, key = lambda ws, indices, f : indices[0])
103
+ s = ' '.join(['{1} {0} {1}'.format(self.elementToStr(elem), SEP) if elem[2] else self.elementToStr(elem) for elem in sortedExtraction])
104
+ binArgs = [a for a in s.split(SEP) if a.rstrip().lstrip()]
105
+
106
+ if len(binArgs) == 2:
107
+ return binArgs
108
+
109
+ # failure
110
+ return []
111
+
112
+ def bow(self):
113
+ return ' '.join([self.elementToStr(elem) for elem in [self.pred] + self.args])
114
+
115
+ def getSortedArgs(self):
116
+ """
117
+ Sort the list of arguments.
118
+ If a question distribution is provided - use it,
119
+ otherwise, default to the order of appearance in the sentence.
120
+ """
121
+ if self.question_dist:
122
+ # There's a question distribtuion - use it
123
+ return self.sort_args_by_distribution()
124
+ ls = []
125
+ for q, args in self.questions.iteritems():
126
+ if (len(args) != 1):
127
+ logging.debug("Not one argument: {}".format(args))
128
+ continue
129
+ arg = args[0]
130
+ indices = list(self.indsForQuestions[q].union(arg.indices))
131
+ if not indices:
132
+ logging.debug("Empty indexes for arg {} -- backing to zero".format(arg))
133
+ indices = [0]
134
+ ls.append(((arg, q), indices))
135
+ return [a for a, _ in sorted(ls,
136
+ key = lambda _, indices: min(indices))]
137
+
138
+ def question_prob_for_loc(self, question, loc):
139
+ """
140
+ Returns the probability of the given question leading to argument
141
+ appearing in the given location in the output slot.
142
+ """
143
+ gen_question = generalize_question(question)
144
+ q_dist = self.question_dist[gen_question]
145
+ logging.debug("distribution of {}: {}".format(gen_question,
146
+ q_dist))
147
+
148
+ return float(q_dist.get(loc, 0)) / \
149
+ sum(q_dist.values())
150
+
151
+ def sort_args_by_distribution(self):
152
+ """
153
+ Use this instance's question distribution (this func assumes it exists)
154
+ in determining the positioning of the arguments.
155
+ Greedy algorithm:
156
+ 0. Decide on which argument will serve as the ``subject'' (first slot) of this extraction
157
+ 0.1 Based on the most probable one for this spot
158
+ (special care is given to select the highly-influential subject position)
159
+ 1. For all other arguments, sort arguments by the prevalance of their questions
160
+ 2. For each argument:
161
+ 2.1 Assign to it the most probable slot still available
162
+ 2.2 If non such exist (fallback) - default to put it in the last location
163
+ """
164
+ INF_LOC = 100 # Used as an impractical last argument
165
+
166
+ # Store arguments by slot
167
+ ret = {INF_LOC: []}
168
+ logging.debug("sorting: {}".format(self.questions))
169
+
170
+ # Find the most suitable arguemnt for the subject location
171
+ logging.debug("probs for subject: {}".format([(q, self.question_prob_for_loc(q, 0))
172
+ for (q, _) in self.questions.iteritems()]))
173
+
174
+ subj_question, subj_args = max(self.questions.iteritems(),
175
+ key = lambda q, _: self.question_prob_for_loc(q, 0))
176
+
177
+ ret[0] = [(subj_args[0], subj_question)]
178
+
179
+ # Find the rest
180
+ for (question, args) in sorted([(q, a)
181
+ for (q, a) in self.questions.iteritems() if (q not in [subj_question])],
182
+ key = lambda q, _: \
183
+ sum(self.question_dist[generalize_question(q)].values()),
184
+ reverse = True):
185
+ gen_question = generalize_question(question)
186
+ arg = args[0]
187
+ assigned_flag = False
188
+ for (loc, count) in sorted(self.question_dist[gen_question].iteritems(),
189
+ key = lambda _ , c: c,
190
+ reverse = True):
191
+ if loc not in ret:
192
+ # Found an empty slot for this item
193
+ # Place it there and break out
194
+ ret[loc] = [(arg, question)]
195
+ assigned_flag = True
196
+ break
197
+
198
+ if not assigned_flag:
199
+ # Add this argument to the non-assigned (hopefully doesn't happen much)
200
+ logging.debug("Couldn't find an open assignment for {}".format((arg, gen_question)))
201
+ ret[INF_LOC].append((arg, question))
202
+
203
+ logging.debug("Linearizing arg list: {}".format(ret))
204
+
205
+ # Finished iterating - consolidate and return a list of arguments
206
+ return [arg
207
+ for (_, arg_ls) in sorted(ret.iteritems(),
208
+ key = lambda k, v: int(k))
209
+ for arg in arg_ls]
210
+
211
+
212
+ def __str__(self):
213
+ pred_str = self.elementToStr(self.pred)
214
+ return '{}\t{}\t{}'.format(self.get_base_verb(pred_str),
215
+ self.compute_global_pred(pred_str,
216
+ self.questions.keys()),
217
+ '\t'.join([escape_special_chars(self.augment_arg_with_question(self.elementToStr(arg),
218
+ question))
219
+ for arg, question in self.getSortedArgs()]))
220
+
221
+ def get_base_verb(self, surface_pred):
222
+ """
223
+ Given the surface pred, return the original annotated verb
224
+ """
225
+ # Assumes that at this point the verb is always the last word
226
+ # in the surface predicate
227
+ return surface_pred.split(' ')[-1]
228
+
229
+
230
+ def compute_global_pred(self, surface_pred, questions):
231
+ """
232
+ Given the surface pred and all instansiations of questions,
233
+ make global coherence decisions regarding the final form of the predicate
234
+ This should hopefully take care of multi word predicates and correct inflections
235
+ """
236
+ from operator import itemgetter
237
+ split_surface = surface_pred.split(' ')
238
+
239
+ if len(split_surface) > 1:
240
+ # This predicate has a modal preceding the base verb
241
+ verb = split_surface[-1]
242
+ ret = split_surface[:-1] # get all of the elements in the modal
243
+ else:
244
+ verb = split_surface[0]
245
+ ret = []
246
+
247
+ split_questions = map(lambda question: question.split(' '),
248
+ questions)
249
+
250
+ preds = map(normalize_element,
251
+ map(itemgetter(QUESTION_TRG_INDEX),
252
+ split_questions))
253
+ if len(set(preds)) > 1:
254
+ # This predicate is appears in multiple ways, let's stick to the base form
255
+ ret.append(verb)
256
+
257
+ if len(set(preds)) == 1:
258
+ # Change the predciate to the inflected form
259
+ # if there's exactly one way in which the predicate is conveyed
260
+ ret.append(preds[0])
261
+
262
+ pps = map(normalize_element,
263
+ map(itemgetter(QUESTION_PP_INDEX),
264
+ split_questions))
265
+
266
+ obj2s = map(normalize_element,
267
+ map(itemgetter(QUESTION_OBJ2_INDEX),
268
+ split_questions))
269
+
270
+ if (len(set(pps)) == 1):
271
+ # If all questions for the predicate include the same pp attachemnt -
272
+ # assume it's a multiword predicate
273
+ self.is_mwp = True # Signal to arguments that they shouldn't take the preposition
274
+ ret.append(pps[0])
275
+
276
+ # Concat all elements in the predicate and return
277
+ return " ".join(ret).strip()
278
+
279
+
280
+ def augment_arg_with_question(self, arg, question):
281
+ """
282
+ Decide what elements from the question to incorporate in the given
283
+ corresponding argument
284
+ """
285
+ # Parse question
286
+ wh, aux, sbj, trg, obj1, pp, obj2 = map(normalize_element,
287
+ question.split(' ')[:-1]) # Last split is the question mark
288
+
289
+ # Place preposition in argument
290
+ # This is safer when dealing with n-ary arguments, as it's directly attaches to the
291
+ # appropriate argument
292
+ if (not self.is_mwp) and pp and (not obj2):
293
+ if not(arg.startswith("{} ".format(pp))):
294
+ # Avoid repeating the preporition in cases where both question and answer contain it
295
+ return " ".join([pp,
296
+ arg])
297
+
298
+ # Normal cases
299
+ return arg
300
+
301
+ def clusterScore(self, cluster):
302
+ """
303
+ Calculate cluster density score as the mean distance of the maximum distance of each slot.
304
+ Lower score represents a denser cluster.
305
+ """
306
+ logging.debug("*-*-*- Cluster: {}".format(cluster))
307
+
308
+ # Find global centroid
309
+ arr = np.array([x for ls in cluster for x in ls])
310
+ centroid = np.sum(arr)/arr.shape[0]
311
+ logging.debug("Centroid: {}".format(centroid))
312
+
313
+ # Calculate mean over all maxmimum points
314
+ return np.average([max([abs(x - centroid) for x in ls]) for ls in cluster])
315
+
316
+ def resolveAmbiguity(self):
317
+ """
318
+ Heursitic to map the elments (argument and predicates) of this extraction
319
+ back to the indices of the sentence.
320
+ """
321
+ ## TODO: This removes arguments for which there was no consecutive span found
322
+ ## Part of these are non-consecutive arguments,
323
+ ## but other could be a bug in recognizing some punctuation marks
324
+
325
+ elements = [self.pred] \
326
+ + [(s, indices)
327
+ for (s, indices)
328
+ in self.args
329
+ if indices]
330
+ logging.debug("Resolving ambiguity in: {}".format(elements))
331
+
332
+ # Collect all possible combinations of arguments and predicate indices
333
+ # (hopefully it's not too much)
334
+ all_combinations = list(itertools.product(*map(itemgetter(1), elements)))
335
+ logging.debug("Number of combinations: {}".format(len(all_combinations)))
336
+
337
+ # Choose the ones with best clustering and unfold them
338
+ resolved_elements = zip(map(itemgetter(0), elements),
339
+ min(all_combinations,
340
+ key = lambda cluster: self.clusterScore(cluster)))
341
+ logging.debug("Resolved elements = {}".format(resolved_elements))
342
+
343
+ self.pred = resolved_elements[0]
344
+ self.args = resolved_elements[1:]
345
+
346
+ def conll(self, external_feats = {}):
347
+ """
348
+ Return a CoNLL string representation of this extraction
349
+ """
350
+ return '\n'.join(["\t".join(map(str,
351
+ [i, w] + \
352
+ list(self.pred) + \
353
+ [self.head_pred_index] + \
354
+ external_feats + \
355
+ [self.get_label(i)]))
356
+ for (i, w)
357
+ in enumerate(self.sent.split(" "))]) + '\n'
358
+
359
+ def get_label(self, index):
360
+ """
361
+ Given an index of a word in the sentence -- returns the appropriate BIO conll label
362
+ Assumes that ambiguation was already resolved.
363
+ """
364
+ # Get the element(s) in which this index appears
365
+ ent = [(elem_ind, elem)
366
+ for (elem_ind, elem)
367
+ in enumerate(map(itemgetter(1),
368
+ [self.pred] + self.args))
369
+ if index in elem]
370
+
371
+ if not ent:
372
+ # index doesnt appear in any element
373
+ return "O"
374
+
375
+ if len(ent) > 1:
376
+ # The same word appears in two different answers
377
+ # In this case we choose the first one as label
378
+ logging.warn("Index {} appears in one than more element: {}".\
379
+ format(index,
380
+ "\t".join(map(str,
381
+ [ent,
382
+ self.sent,
383
+ self.pred,
384
+ self.args]))))
385
+
386
+ ## Some indices appear in more than one argument (ones where the above message appears)
387
+ ## From empricial observation, these seem to mostly consist of different levels of granularity:
388
+ ## what had _ been taken _ _ _ ? loan commitments topping $ 3 billion
389
+ ## how much had _ been taken _ _ _ ? topping $ 3 billion
390
+ ## In these cases we heuristically choose the shorter answer span, hopefully creating minimal spans
391
+ ## E.g., in this example two arguemnts are created: (loan commitments, topping $ 3 billion)
392
+
393
+ elem_ind, elem = min(ent, key = lambda _, ls: len(ls))
394
+
395
+ # Distinguish between predicate and arguments
396
+ prefix = "P" if elem_ind == 0 else "A{}".format(elem_ind - 1)
397
+
398
+ # Distinguish between Beginning and Inside labels
399
+ suffix = "B" if index == elem[0] else "I"
400
+
401
+ return "{}-{}".format(prefix, suffix)
402
+
403
+ def __str__(self):
404
+ return '{0}\t{1}'.format(self.elementToStr(self.pred,
405
+ print_indices = True),
406
+ '\t'.join([self.elementToStr(arg)
407
+ for arg
408
+ in self.args]))
409
+
410
+ # Flatten a list of lists
411
+ flatten = lambda l: [item for sublist in l for item in sublist]
412
+
413
+
414
+ def normalize_element(elem):
415
+ """
416
+ Return a surface form of the given question element.
417
+ the output should be properly able to precede a predicate (or blank otherwise)
418
+ """
419
+ return elem.replace("_", " ") \
420
+ if (elem != "_")\
421
+ else ""
422
+
423
+ ## Helper functions
424
+ def escape_special_chars(s):
425
+ return s.replace('\t', '\\t')
426
+
427
+
428
+ def generalize_question(question):
429
+ """
430
+ Given a question in the context of the sentence and the predicate index within
431
+ the question - return a generalized version which extracts only order-imposing features
432
+ """
433
+ import nltk # Using nltk since couldn't get spaCy to agree on the tokenization
434
+ wh, aux, sbj, trg, obj1, pp, obj2 = question.split(' ')[:-1] # Last split is the question mark
435
+ return ' '.join([wh, sbj, obj1])
436
+
437
+
438
+
439
+ ## CONSTANTS
440
+ SEP = ';;;'
441
+ QUESTION_TRG_INDEX = 3 # index of the predicate within the question
442
+ QUESTION_PP_INDEX = 5
443
+ QUESTION_OBJ2_INDEX = 6
evaluation_data/carb/oie_readers/goldReader.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from oie_readers.oieReader import OieReader
2
+ from oie_readers.extraction import Extraction
3
+ from _collections import defaultdict
4
+ import ipdb
5
+
6
+ class GoldReader(OieReader):
7
+
8
+ # Path relative to repo root folder
9
+ default_filename = './oie_corpus/all.oie'
10
+
11
+ def __init__(self):
12
+ self.name = 'Gold'
13
+
14
+ def read(self, fn):
15
+ d = defaultdict(lambda: [])
16
+ with open(fn) as fin:
17
+ for line_ind, line in enumerate(fin):
18
+ # print line
19
+ data = line.strip().split('\t')
20
+ text, rel = data[:2]
21
+ args = data[2:]
22
+ confidence = 1
23
+
24
+ curExtraction = Extraction(pred = rel.strip(),
25
+ head_pred_index = None,
26
+ sent = text.strip(),
27
+ confidence = float(confidence),
28
+ index = line_ind)
29
+ for arg in args:
30
+ if "C: " in arg:
31
+ continue
32
+ curExtraction.addArg(arg.strip())
33
+
34
+ d[text.strip()].append(curExtraction)
35
+ self.oie = d
36
+
37
+
38
+ if __name__ == '__main__' :
39
+ g = GoldReader()
40
+ g.read('../oie_corpus/all.oie', includeNominal = False)
41
+ d = g.oie
42
+ e = d.items()[0]
43
+ print(e[1][0].bow())
44
+ print(g.count())
evaluation_data/carb/oie_readers/oieReader.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class OieReader:
2
+
3
+ def read(self, fn, includeNominal):
4
+ ''' should set oie as a class member
5
+ as a dictionary of extractions by sentence'''
6
+ raise Exception("Don't run me")
7
+
8
+ def count(self):
9
+ ''' number of extractions '''
10
+ return sum([len(extractions) for _, extractions in self.oie.items()])
11
+
12
+ def split_to_corpus(self, corpus_fn, out_fn):
13
+ """
14
+ Given a corpus file name, containing a list of sentences
15
+ print only the extractions pertaining to it to out_fn in a tab separated format:
16
+ sent, prob, pred, arg1, arg2, ...
17
+ """
18
+ raw_sents = [line.strip() for line in open(corpus_fn)]
19
+ with open(out_fn, 'w') as fout:
20
+ for line in self.get_tabbed().split('\n'):
21
+ data = line.split('\t')
22
+ sent = data[0]
23
+ if sent in raw_sents:
24
+ fout.write(line + '\n')
25
+
26
+ def output_tabbed(self, out_fn):
27
+ """
28
+ Write a tabbed represenation of this corpus.
29
+ """
30
+ with open(out_fn, 'w') as fout:
31
+ fout.write(self.get_tabbed())
32
+
33
+ def get_tabbed(self):
34
+ """
35
+ Get a tabbed format representation of this corpus (assumes that input was
36
+ already read).
37
+ """
38
+ return "\n".join(['\t'.join(map(str,
39
+ [ex.sent,
40
+ ex.confidence,
41
+ ex.pred,
42
+ '\t'.join(ex.args)]))
43
+ for (sent, exs) in self.oie.iteritems()
44
+ for ex in exs])
45
+
evaluation_data/carb/oie_readers/ollieReader.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from oie_readers.oieReader import OieReader
2
+ from oie_readers.extraction import Extraction
3
+
4
+ class OllieReader(OieReader):
5
+
6
+ def __init__(self):
7
+ self.name = 'OLLIE'
8
+
9
+ def read(self, fn):
10
+ d = {}
11
+ with open(fn) as fin:
12
+ fin.readline() #remove header
13
+ for line in fin:
14
+ data = line.strip().split('\t')
15
+ confidence, arg1, rel, arg2, enabler, attribution, text = data[:7]
16
+ curExtraction = Extraction(pred = rel, head_pred_index = -1, sent = text, confidence = float(confidence))
17
+ curExtraction.addArg(arg1)
18
+ curExtraction.addArg(arg2)
19
+ d[text] = d.get(text, []) + [curExtraction]
20
+ self.oie = d
21
+
22
+
evaluation_data/carb/oie_readers/openieFiveReader.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from oie_readers.oieReader import OieReader
2
+ from oie_readers.extraction import Extraction
3
+
4
+ class OpenieFiveReader(OieReader):
5
+
6
+ def __init__(self):
7
+ self.name = 'OpenIE-5'
8
+
9
+ def read(self, fn):
10
+ d = {}
11
+ with open(fn) as fin:
12
+ for line in fin:
13
+ data = line.strip().split('\t')
14
+ confidence = data[0]
15
+
16
+ if not all(data[2:5]):
17
+ continue
18
+ arg1, rel = [s[s.index('(') + 1:s.index(',List(')] for s in data[2:4]]
19
+ #args = data[4].strip().split(');')
20
+ #print arg2s
21
+ args = [s[s.index('(') + 1:s.index(',List(')] for s in data[4].strip().split(');')]
22
+ # if arg1 == "the younger La Flesche":
23
+ # print len(args)
24
+ text = data[5]
25
+ if data[1]:
26
+ #print arg1, rel
27
+ s = data[1]
28
+ if not (arg1 + ' ' + rel).startswith(s[s.index('(') + 1:s.index(',List(')]):
29
+ #print "##########Not adding context"
30
+ arg1 = s[s.index('(') + 1:s.index(',List(')] + ' ' + arg1
31
+ #print arg1 + rel, ",,,,, ", s[s.index('(') + 1:s.index(',List(')]
32
+ #curExtraction = Extraction(pred = rel, sent = text, confidence = float(confidence))
33
+ curExtraction = Extraction(pred = rel, head_pred_index = -1, sent = text, confidence = float(confidence))
34
+ curExtraction.addArg(arg1)
35
+ for arg in args:
36
+ curExtraction.addArg(arg)
37
+ d[text] = d.get(text, []) + [curExtraction]
38
+ self.oie = d
evaluation_data/carb/oie_readers/openieFourReader.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Usage:
2
+ <file-name> --in=INPUT_FILE --out=OUTPUT_FILE [--debug]
3
+
4
+ Convert to tabbed format
5
+ """
6
+ # External imports
7
+ import logging
8
+ from pprint import pprint
9
+ from pprint import pformat
10
+ from docopt import docopt
11
+
12
+ # Local imports
13
+ from oie_readers.oieReader import OieReader
14
+ from oie_readers.extraction import Extraction
15
+ import ipdb
16
+
17
+ #=-----
18
+
19
+ class OpenieFourReader(OieReader):
20
+
21
+ def __init__(self):
22
+ self.name = 'OpenIE-4'
23
+
24
+ def read(self, fn):
25
+ d = {}
26
+ with open(fn) as fin:
27
+ for line in fin:
28
+ data = line.strip().split('\t')
29
+ confidence = data[0]
30
+ if not all(data[2:5]):
31
+ logging.debug("Skipped line: {}".format(line))
32
+ continue
33
+ arg1, rel, arg2 = [s[s.index('(') + 1:s.index(',List(')] for s in data[2:5]]
34
+ text = data[5]
35
+ curExtraction = Extraction(pred = rel, head_pred_index = -1, sent = text, confidence = float(confidence))
36
+ curExtraction.addArg(arg1)
37
+ curExtraction.addArg(arg2)
38
+ d[text] = d.get(text, []) + [curExtraction]
39
+ self.oie = d
40
+
41
+
42
+
43
+ if __name__ == "__main__":
44
+ # Parse command line arguments
45
+ args = docopt(__doc__)
46
+ inp_fn = args["--in"]
47
+ out_fn = args["--out"]
48
+ debug = args["--debug"]
49
+ if debug:
50
+ logging.basicConfig(level = logging.DEBUG)
51
+ else:
52
+ logging.basicConfig(level = logging.INFO)
53
+
54
+
55
+ oie = OpenieFourReader()
56
+ oie.read(inp_fn)
57
+ oie.output_tabbed(out_fn)
58
+
59
+ logging.info("DONE")
evaluation_data/carb/oie_readers/propsReader.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from oie_readers.oieReader import OieReader
2
+ from oie_readers.extraction import Extraction
3
+
4
+
5
+ class PropSReader(OieReader):
6
+
7
+ def __init__(self):
8
+ self.name = 'PropS'
9
+
10
+ def read(self, fn):
11
+ d = {}
12
+ with open(fn) as fin:
13
+ for line in fin:
14
+ if not line.strip():
15
+ continue
16
+ data = line.strip().split('\t')
17
+ confidence, text, rel = data[:3]
18
+ curExtraction = Extraction(pred = rel, sent = text, confidence = float(confidence), head_pred_index=-1)
19
+
20
+ for arg in data[4::2]:
21
+ curExtraction.addArg(arg)
22
+
23
+ d[text] = d.get(text, []) + [curExtraction]
24
+ self.oie = d
25
+ # self.normalizeConfidence()
26
+
27
+
28
+ def normalizeConfidence(self):
29
+ ''' Normalize confidence to resemble probabilities '''
30
+ EPSILON = 1e-3
31
+
32
+ self.confidences = [extraction.confidence for sent in self.oie for extraction in self.oie[sent]]
33
+ maxConfidence = max(self.confidences)
34
+ minConfidence = min(self.confidences)
35
+
36
+ denom = maxConfidence - minConfidence + (2*EPSILON)
37
+
38
+ for sent, extractions in self.oie.items():
39
+ for extraction in extractions:
40
+ extraction.confidence = ( (extraction.confidence - minConfidence) + EPSILON) / denom
41
+
42
+
43
+
44
+
evaluation_data/carb/oie_readers/reVerbReader.py CHANGED
@@ -24,4 +24,6 @@ class ReVerbReader(OieReader):
24
 
25
  # ReVerb requires a different files from which to get the input sentences
26
  # Relative to repo root folder
27
- RAW_SENTS_FILE = './raw_sentences/all.txt'
 
 
 
24
 
25
  # ReVerb requires a different files from which to get the input sentences
26
  # Relative to repo root folder
27
+ RAW_SENTS_FILE = './raw_sentences/all.txt'
28
+
29
+
evaluation_data/carb/oie_readers/split_corpus.py CHANGED
@@ -34,4 +34,4 @@ if __name__ == "__main__":
34
  reader = available_readers[args["--reader"]]()
35
  reader.read(inp)
36
  reader.split_to_corpus(corpus,
37
- out)
 
34
  reader = available_readers[args["--reader"]]()
35
  reader.read(inp)
36
  reader.split_to_corpus(corpus,
37
+ out)
evaluation_data/carb/oie_readers/stanfordReader.py CHANGED
@@ -19,4 +19,4 @@ class StanfordReader(OieReader):
19
  curExtraction.addArg(arg1)
20
  curExtraction.addArg(arg2)
21
  d[text] = d.get(text, []) + [curExtraction]
22
- self.oie = d
 
19
  curExtraction.addArg(arg1)
20
  curExtraction.addArg(arg2)
21
  d[text] = d.get(text, []) + [curExtraction]
22
+ self.oie = d
evaluation_data/carb/oie_readers/tabReader.py CHANGED
@@ -53,4 +53,4 @@ if __name__ == "__main__":
53
  args = docopt(__doc__)
54
  input_fn = args["--in"]
55
  tr = TabReader()
56
- tr.read(input_fn)
 
53
  args = docopt(__doc__)
54
  input_fn = args["--in"]
55
  tr = TabReader()
56
+ tr.read(input_fn)