nwo
stringlengths 6
76
| sha
stringlengths 40
40
| path
stringlengths 5
118
| language
stringclasses 1
value | identifier
stringlengths 1
89
| parameters
stringlengths 2
5.4k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
51.1k
| docstring
stringlengths 1
17.6k
| docstring_summary
stringlengths 0
7.02k
| docstring_tokens
sequence | function
stringlengths 30
51.1k
| function_tokens
sequence | url
stringlengths 85
218
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/train.py | python | Trainer.test_epoch | (self, epoch, dataset, should_print=False, use_mask=True) | return acc | use_mask: mask the 0 label | use_mask: mask the 0 label | [
"use_mask",
":",
"mask",
"the",
"0",
"label"
] | def test_epoch(self, epoch, dataset, should_print=False, use_mask=True):
"""
use_mask: mask the 0 label
"""
self.model.eval()
acc_list = []
for index, data in tqdm(enumerate(dataset)):
self._to_device(data)
percent = index / len(dataset) * 100
if should_print:
print('[Epoch %d] Test | Data %d (%d%%): acc: | path: %s' % \
(epoch, index, percent, data.path), ' ' * 30, end='\r')
outputs = self.model(data.nodes, data.edges, data.adj, data.incidence)
_lab_len = len(data.labels)
if use_mask:
for i in data.labels:
if i == 0: _lab_len -= 1
_labels = torch.LongTensor(
[(-1 if i == 0 else i) for i in data.labels]).to(self.device)
else: _labels = data.labels
acc = (outputs.max(dim=1)[1] == _labels).float().sum().item() / _lab_len
acc_list.append(acc)
# if index % 10 == 0:
if should_print:
print('[Epoch %d] Test | Data %d (%d%%): acc: %.3f | path: %s' % \
(epoch, index, percent, acc, data.path), ' ' * 30, end='\n')
acc = sum(acc_list) / len(acc_list)
return acc | [
"def",
"test_epoch",
"(",
"self",
",",
"epoch",
",",
"dataset",
",",
"should_print",
"=",
"False",
",",
"use_mask",
"=",
"True",
")",
":",
"self",
".",
"model",
".",
"eval",
"(",
")",
"acc_list",
"=",
"[",
"]",
"for",
"index",
",",
"data",
"in",
"tqdm",
"(",
"enumerate",
"(",
"dataset",
")",
")",
":",
"self",
".",
"_to_device",
"(",
"data",
")",
"percent",
"=",
"index",
"/",
"len",
"(",
"dataset",
")",
"*",
"100",
"if",
"should_print",
":",
"print",
"(",
"'[Epoch %d] Test | Data %d (%d%%): acc: | path: %s'",
"%",
"(",
"epoch",
",",
"index",
",",
"percent",
",",
"data",
".",
"path",
")",
",",
"' '",
"*",
"30",
",",
"end",
"=",
"'\\r'",
")",
"outputs",
"=",
"self",
".",
"model",
"(",
"data",
".",
"nodes",
",",
"data",
".",
"edges",
",",
"data",
".",
"adj",
",",
"data",
".",
"incidence",
")",
"_lab_len",
"=",
"len",
"(",
"data",
".",
"labels",
")",
"if",
"use_mask",
":",
"for",
"i",
"in",
"data",
".",
"labels",
":",
"if",
"i",
"==",
"0",
":",
"_lab_len",
"-=",
"1",
"_labels",
"=",
"torch",
".",
"LongTensor",
"(",
"[",
"(",
"-",
"1",
"if",
"i",
"==",
"0",
"else",
"i",
")",
"for",
"i",
"in",
"data",
".",
"labels",
"]",
")",
".",
"to",
"(",
"self",
".",
"device",
")",
"else",
":",
"_labels",
"=",
"data",
".",
"labels",
"acc",
"=",
"(",
"outputs",
".",
"max",
"(",
"dim",
"=",
"1",
")",
"[",
"1",
"]",
"==",
"_labels",
")",
".",
"float",
"(",
")",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
"/",
"_lab_len",
"acc_list",
".",
"append",
"(",
"acc",
")",
"# if index % 10 == 0:",
"if",
"should_print",
":",
"print",
"(",
"'[Epoch %d] Test | Data %d (%d%%): acc: %.3f | path: %s'",
"%",
"(",
"epoch",
",",
"index",
",",
"percent",
",",
"acc",
",",
"data",
".",
"path",
")",
",",
"' '",
"*",
"30",
",",
"end",
"=",
"'\\n'",
")",
"acc",
"=",
"sum",
"(",
"acc_list",
")",
"/",
"len",
"(",
"acc_list",
")",
"return",
"acc"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/train.py#L108-L135 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/data/utils.py | python | json2Table | (json_obj, tid="", splitted_content=False) | return Table(row_n + 1, col_n + 1, cells, tid) | Construct a Table object from json object
Args:
json_obj: a json object
Returns:
a Table object | Construct a Table object from json object
Args:
json_obj: a json object
Returns:
a Table object | [
"Construct",
"a",
"Table",
"object",
"from",
"json",
"object",
"Args",
":",
"json_obj",
":",
"a",
"json",
"object",
"Returns",
":",
"a",
"Table",
"object"
] | def json2Table(json_obj, tid="", splitted_content=False):
"""Construct a Table object from json object
Args:
json_obj: a json object
Returns:
a Table object
"""
jo = json_obj["cells"]
row_n, col_n = 0, 0
cells = []
for co in jo:
content = co["content"]
if content is None: continue
if splitted_content:
content = " ".join(content)
else:
content = content.strip()
if content == "": continue
start_row = co["start_row"]
end_row = co["end_row"]
start_col = co["start_col"]
end_col = co["end_col"]
row_n = max(row_n, end_row)
col_n = max(col_n, end_col)
cell = Chunk(content, (start_row, end_row, start_col, end_col))
cells.append(cell)
return Table(row_n + 1, col_n + 1, cells, tid) | [
"def",
"json2Table",
"(",
"json_obj",
",",
"tid",
"=",
"\"\"",
",",
"splitted_content",
"=",
"False",
")",
":",
"jo",
"=",
"json_obj",
"[",
"\"cells\"",
"]",
"row_n",
",",
"col_n",
"=",
"0",
",",
"0",
"cells",
"=",
"[",
"]",
"for",
"co",
"in",
"jo",
":",
"content",
"=",
"co",
"[",
"\"content\"",
"]",
"if",
"content",
"is",
"None",
":",
"continue",
"if",
"splitted_content",
":",
"content",
"=",
"\" \"",
".",
"join",
"(",
"content",
")",
"else",
":",
"content",
"=",
"content",
".",
"strip",
"(",
")",
"if",
"content",
"==",
"\"\"",
":",
"continue",
"start_row",
"=",
"co",
"[",
"\"start_row\"",
"]",
"end_row",
"=",
"co",
"[",
"\"end_row\"",
"]",
"start_col",
"=",
"co",
"[",
"\"start_col\"",
"]",
"end_col",
"=",
"co",
"[",
"\"end_col\"",
"]",
"row_n",
"=",
"max",
"(",
"row_n",
",",
"end_row",
")",
"col_n",
"=",
"max",
"(",
"col_n",
",",
"end_col",
")",
"cell",
"=",
"Chunk",
"(",
"content",
",",
"(",
"start_row",
",",
"end_row",
",",
"start_col",
",",
"end_col",
")",
")",
"cells",
".",
"append",
"(",
"cell",
")",
"return",
"Table",
"(",
"row_n",
"+",
"1",
",",
"col_n",
"+",
"1",
",",
"cells",
",",
"tid",
")"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/data/utils.py#L38-L64 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/data/utils.py | python | add_knn_edges | (chunks, relations, k=20, debug=False) | return relations, recall | Add edges according to knn of vertexes. | Add edges according to knn of vertexes. | [
"Add",
"edges",
"according",
"to",
"knn",
"of",
"vertexes",
"."
] | def add_knn_edges(chunks, relations, k=20, debug=False):
"""Add edges according to knn of vertexes.
"""
edges = set()
rel_recall = {}
for i, j, _ in relations:
edges.add((i, j) if i < j else (j, i))
rel_recall[(i, j) if i < j else (j, i)] = False
for i in range(len(chunks)):
_dis_ij = []
for j in range(len(chunks)):
if j == i: continue
_dis_ij.append((_eul_dis(chunks, i, j), j))
sorted_dis_ij = sorted(_dis_ij)
for _, j in sorted_dis_ij[:k]:
_i, _j = (i, j) if i < j else (j, i)
if (_i, _j) in rel_recall: rel_recall[(_i, _j)] = True
if (_i, _j) not in edges:
edges.add((_i, _j))
relations.append((_i, _j, 0))
cnt = 0
for _, val in rel_recall.items():
if val: cnt += 1
recall = 0 if len(rel_recall) == 0 else cnt / len(rel_recall)
if debug:
print("add knn edge. recall:%.3f" % recall)
return relations, recall | [
"def",
"add_knn_edges",
"(",
"chunks",
",",
"relations",
",",
"k",
"=",
"20",
",",
"debug",
"=",
"False",
")",
":",
"edges",
"=",
"set",
"(",
")",
"rel_recall",
"=",
"{",
"}",
"for",
"i",
",",
"j",
",",
"_",
"in",
"relations",
":",
"edges",
".",
"add",
"(",
"(",
"i",
",",
"j",
")",
"if",
"i",
"<",
"j",
"else",
"(",
"j",
",",
"i",
")",
")",
"rel_recall",
"[",
"(",
"i",
",",
"j",
")",
"if",
"i",
"<",
"j",
"else",
"(",
"j",
",",
"i",
")",
"]",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"chunks",
")",
")",
":",
"_dis_ij",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"chunks",
")",
")",
":",
"if",
"j",
"==",
"i",
":",
"continue",
"_dis_ij",
".",
"append",
"(",
"(",
"_eul_dis",
"(",
"chunks",
",",
"i",
",",
"j",
")",
",",
"j",
")",
")",
"sorted_dis_ij",
"=",
"sorted",
"(",
"_dis_ij",
")",
"for",
"_",
",",
"j",
"in",
"sorted_dis_ij",
"[",
":",
"k",
"]",
":",
"_i",
",",
"_j",
"=",
"(",
"i",
",",
"j",
")",
"if",
"i",
"<",
"j",
"else",
"(",
"j",
",",
"i",
")",
"if",
"(",
"_i",
",",
"_j",
")",
"in",
"rel_recall",
":",
"rel_recall",
"[",
"(",
"_i",
",",
"_j",
")",
"]",
"=",
"True",
"if",
"(",
"_i",
",",
"_j",
")",
"not",
"in",
"edges",
":",
"edges",
".",
"add",
"(",
"(",
"_i",
",",
"_j",
")",
")",
"relations",
".",
"append",
"(",
"(",
"_i",
",",
"_j",
",",
"0",
")",
")",
"cnt",
"=",
"0",
"for",
"_",
",",
"val",
"in",
"rel_recall",
".",
"items",
"(",
")",
":",
"if",
"val",
":",
"cnt",
"+=",
"1",
"recall",
"=",
"0",
"if",
"len",
"(",
"rel_recall",
")",
"==",
"0",
"else",
"cnt",
"/",
"len",
"(",
"rel_recall",
")",
"if",
"debug",
":",
"print",
"(",
"\"add knn edge. recall:%.3f\"",
"%",
"recall",
")",
"return",
"relations",
",",
"recall"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/data/utils.py#L120-L146 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/data/loader.py | python | TableDataset.clean_chunk_rel | (self, chunks, relations) | return new_chunks, new_rels | Remove null chunks | Remove null chunks | [
"Remove",
"null",
"chunks"
] | def clean_chunk_rel(self, chunks, relations):
"""Remove null chunks"""
new_chunks = []
oldid2newid = [-1 for i in range(len(chunks))]
for i, c in enumerate(chunks):
if c.x2 == c.x1 or c.y2 == c.y1 or c.text == "":
continue
oldid2newid[i] = len(new_chunks)
new_chunks.append(c)
new_rels = []
for i, j, t in relations:
ni = oldid2newid[i]
nj = oldid2newid[j]
if ni != -1 and nj != -1: new_rels.append((ni, nj, t))
return new_chunks, new_rels | [
"def",
"clean_chunk_rel",
"(",
"self",
",",
"chunks",
",",
"relations",
")",
":",
"new_chunks",
"=",
"[",
"]",
"oldid2newid",
"=",
"[",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"chunks",
")",
")",
"]",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"chunks",
")",
":",
"if",
"c",
".",
"x2",
"==",
"c",
".",
"x1",
"or",
"c",
".",
"y2",
"==",
"c",
".",
"y1",
"or",
"c",
".",
"text",
"==",
"\"\"",
":",
"continue",
"oldid2newid",
"[",
"i",
"]",
"=",
"len",
"(",
"new_chunks",
")",
"new_chunks",
".",
"append",
"(",
"c",
")",
"new_rels",
"=",
"[",
"]",
"for",
"i",
",",
"j",
",",
"t",
"in",
"relations",
":",
"ni",
"=",
"oldid2newid",
"[",
"i",
"]",
"nj",
"=",
"oldid2newid",
"[",
"j",
"]",
"if",
"ni",
"!=",
"-",
"1",
"and",
"nj",
"!=",
"-",
"1",
":",
"new_rels",
".",
"append",
"(",
"(",
"ni",
",",
"nj",
",",
"t",
")",
")",
"return",
"new_chunks",
",",
"new_rels"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/data/loader.py#L140-L154 |
|
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/data/rel_gen.py | python | dump_iters_as_tsv | (filename, iterables, spliter="\t") | Dump iters as tsv.
item1\titem2\t... (from iterable1)
item1\titem2\t... (from iterable2) | Dump iters as tsv.
item1\titem2\t... (from iterable1)
item1\titem2\t... (from iterable2) | [
"Dump",
"iters",
"as",
"tsv",
".",
"item1",
"\\",
"titem2",
"\\",
"t",
"...",
"(",
"from",
"iterable1",
")",
"item1",
"\\",
"titem2",
"\\",
"t",
"...",
"(",
"from",
"iterable2",
")"
] | def dump_iters_as_tsv(filename, iterables, spliter="\t"):
"""
Dump iters as tsv.
item1\titem2\t... (from iterable1)
item1\titem2\t... (from iterable2)
"""
with open(filename, "w") as f:
for iterable in iterables:
iterable = [str(i) for i in iterable]
f.write(spliter.join(iterable) + "\n") | [
"def",
"dump_iters_as_tsv",
"(",
"filename",
",",
"iterables",
",",
"spliter",
"=",
"\"\\t\"",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"for",
"iterable",
"in",
"iterables",
":",
"iterable",
"=",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"iterable",
"]",
"f",
".",
"write",
"(",
"spliter",
".",
"join",
"(",
"iterable",
")",
"+",
"\"\\n\"",
")"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/data/rel_gen.py#L18-L27 |
||
Academic-Hammer/SciTSR | 79954b5143295162ceaf7e9d9af918a29fe12f55 | scitsr/data/rel_gen.py | python | match | (src:dict, trg:dict, src_chunks, trg_chunks, fid) | return sid2tid | Match chunks to latex cells w.r.t. the contents. | Match chunks to latex cells w.r.t. the contents. | [
"Match",
"chunks",
"to",
"latex",
"cells",
"w",
".",
"r",
".",
"t",
".",
"the",
"contents",
"."
] | def match(src:dict, trg:dict, src_chunks, trg_chunks, fid):
"""Match chunks to latex cells w.r.t. the contents."""
sid2tid = {}
print("--------%s---------------------------" % fid)
for stxt, sids in src.items():
if stxt in trg:
tids = trg[stxt]
if len(sids) == 1 and len(tids) == 1: sid2tid[sids[0]] = tids[0]
elif len(sids) == len(tids):
schunks = [(sid, src_chunks[sid]) for sid in sids]
tchunks = [(tid, trg_chunks[tid]) for tid in tids]
sorted_sc = sorted(schunks, key=lambda x: (-x[1].y1, x[1].x1))
sorted_tc = sorted(tchunks, key=lambda x: (x[1].x1, x[1].y1))
for (sid, _), (tid, _) in zip(sorted_sc, sorted_tc):
sid2tid[sid] = tid
else:
print("[W] length of sids and tids doesn't match")
else:
print("[W] no match for text %s" % stxt)
print("-----------------------------------------------------------")
return sid2tid | [
"def",
"match",
"(",
"src",
":",
"dict",
",",
"trg",
":",
"dict",
",",
"src_chunks",
",",
"trg_chunks",
",",
"fid",
")",
":",
"sid2tid",
"=",
"{",
"}",
"print",
"(",
"\"--------%s---------------------------\"",
"%",
"fid",
")",
"for",
"stxt",
",",
"sids",
"in",
"src",
".",
"items",
"(",
")",
":",
"if",
"stxt",
"in",
"trg",
":",
"tids",
"=",
"trg",
"[",
"stxt",
"]",
"if",
"len",
"(",
"sids",
")",
"==",
"1",
"and",
"len",
"(",
"tids",
")",
"==",
"1",
":",
"sid2tid",
"[",
"sids",
"[",
"0",
"]",
"]",
"=",
"tids",
"[",
"0",
"]",
"elif",
"len",
"(",
"sids",
")",
"==",
"len",
"(",
"tids",
")",
":",
"schunks",
"=",
"[",
"(",
"sid",
",",
"src_chunks",
"[",
"sid",
"]",
")",
"for",
"sid",
"in",
"sids",
"]",
"tchunks",
"=",
"[",
"(",
"tid",
",",
"trg_chunks",
"[",
"tid",
"]",
")",
"for",
"tid",
"in",
"tids",
"]",
"sorted_sc",
"=",
"sorted",
"(",
"schunks",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"-",
"x",
"[",
"1",
"]",
".",
"y1",
",",
"x",
"[",
"1",
"]",
".",
"x1",
")",
")",
"sorted_tc",
"=",
"sorted",
"(",
"tchunks",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
"[",
"1",
"]",
".",
"x1",
",",
"x",
"[",
"1",
"]",
".",
"y1",
")",
")",
"for",
"(",
"sid",
",",
"_",
")",
",",
"(",
"tid",
",",
"_",
")",
"in",
"zip",
"(",
"sorted_sc",
",",
"sorted_tc",
")",
":",
"sid2tid",
"[",
"sid",
"]",
"=",
"tid",
"else",
":",
"print",
"(",
"\"[W] length of sids and tids doesn't match\"",
")",
"else",
":",
"print",
"(",
"\"[W] no match for text %s\"",
"%",
"stxt",
")",
"print",
"(",
"\"-----------------------------------------------------------\"",
")",
"return",
"sid2tid"
] | https://github.com/Academic-Hammer/SciTSR/blob/79954b5143295162ceaf7e9d9af918a29fe12f55/scitsr/data/rel_gen.py#L30-L50 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | eval_utils.py | python | normalize_answer | (s) | return white_space_fix(remove_articles(remove_punc(lower(s)))) | Lower text and remove punctuation, articles and extra whitespace. | Lower text and remove punctuation, articles and extra whitespace. | [
"Lower",
"text",
"and",
"remove",
"punctuation",
"articles",
"and",
"extra",
"whitespace",
"."
] | def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)))) | [
"def",
"normalize_answer",
"(",
"s",
")",
":",
"def",
"remove_articles",
"(",
"text",
")",
":",
"return",
"re",
".",
"sub",
"(",
"r'\\b(a|an|the)\\b'",
",",
"' '",
",",
"text",
")",
"def",
"white_space_fix",
"(",
"text",
")",
":",
"return",
"' '",
".",
"join",
"(",
"text",
".",
"split",
"(",
")",
")",
"def",
"remove_punc",
"(",
"text",
")",
":",
"exclude",
"=",
"set",
"(",
"string",
".",
"punctuation",
")",
"return",
"''",
".",
"join",
"(",
"ch",
"for",
"ch",
"in",
"text",
"if",
"ch",
"not",
"in",
"exclude",
")",
"def",
"lower",
"(",
"text",
")",
":",
"return",
"text",
".",
"lower",
"(",
")",
"return",
"white_space_fix",
"(",
"remove_articles",
"(",
"remove_punc",
"(",
"lower",
"(",
"s",
")",
")",
")",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/eval_utils.py#L13-L28 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | sequential_sentence_selector/run_sequential_sentence_selector.py | python | convert_examples_to_features | (examples, max_seq_length, max_sent_num, max_sf_num, tokenizer, train=False) | return features | Loads a data file into a list of `InputBatch`s. | Loads a data file into a list of `InputBatch`s. | [
"Loads",
"a",
"data",
"file",
"into",
"a",
"list",
"of",
"InputBatch",
"s",
"."
] | def convert_examples_to_features(examples, max_seq_length, max_sent_num, max_sf_num, tokenizer, train=False):
"""Loads a data file into a list of `InputBatch`s."""
DUMMY = [0] * max_seq_length
DUMMY_ = [0.0] * max_sent_num
features = []
logger.info('#### Constructing features... ####')
for (ex_index, example) in enumerate(tqdm(examples, desc='Example')):
tokens_q = tokenizer.tokenize(
'Q: {} A: {}'.format(example.question, example.answer))
tokens_q = ['[CLS]'] + tokens_q + ['[SEP]']
input_ids = []
input_masks = []
segment_ids = []
for title in example.titles:
sents = example.context[title]
for (i, s) in enumerate(sents):
if len(input_ids) == max_sent_num:
break
tokens_s = tokenizer.tokenize(
s)[:max_seq_length-len(tokens_q)-1]
tokens_s = tokens_s + ['[SEP]']
padding = [0] * (max_seq_length -
len(tokens_s) - len(tokens_q))
input_ids_ = tokenizer.convert_tokens_to_ids(
tokens_q + tokens_s)
input_masks_ = [1] * len(input_ids_)
segment_ids_ = [0] * len(tokens_q) + [1] * len(tokens_s)
input_ids_ += padding
input_ids.append(input_ids_)
input_masks_ += padding
input_masks.append(input_masks_)
segment_ids_ += padding
segment_ids.append(segment_ids_)
assert len(input_ids_) == max_seq_length
assert len(input_masks_) == max_seq_length
assert len(segment_ids_) == max_seq_length
target_ids = []
target_offset = 0
for title in example.titles:
sfs = example.supporting_facts[title]
for i in sfs:
if i < len(example.context[title]) and i+target_offset < len(input_ids):
target_ids.append(i+target_offset)
else:
logger.warning('')
logger.warning('Invalid annotation: {}'.format(sfs))
logger.warning('Invalid annotation: {}'.format(
example.context[title]))
target_offset += len(example.context[title])
assert len(input_ids) <= max_sent_num
assert len(target_ids) <= max_sf_num
num_sents = len(input_ids)
num_sfs = len(target_ids)
output_masks = [([1.0] * len(input_ids) + [0.0] * (max_sent_num -
len(input_ids) + 1)) for _ in range(max_sent_num + 2)]
if train:
for i in range(len(target_ids)):
for j in range(len(target_ids)):
if i == j:
continue
output_masks[i][target_ids[j]] = 0.0
for i in range(len(output_masks)):
if i >= num_sfs+1:
for j in range(len(output_masks[i])):
output_masks[i][j] = 0.0
else:
for i in range(len(input_ids)):
output_masks[i+1][i] = 0.0
target_ids += [0] * (max_sf_num - len(target_ids))
padding = [DUMMY] * (max_sent_num - len(input_ids))
input_ids += padding
input_masks += padding
segment_ids += padding
features.append(
InputFeatures(input_ids=input_ids,
input_masks=input_masks,
segment_ids=segment_ids,
target_ids=target_ids,
output_masks=output_masks,
num_sents=num_sents,
num_sfs=num_sfs,
ex_index=ex_index))
logger.info('Done!')
return features | [
"def",
"convert_examples_to_features",
"(",
"examples",
",",
"max_seq_length",
",",
"max_sent_num",
",",
"max_sf_num",
",",
"tokenizer",
",",
"train",
"=",
"False",
")",
":",
"DUMMY",
"=",
"[",
"0",
"]",
"*",
"max_seq_length",
"DUMMY_",
"=",
"[",
"0.0",
"]",
"*",
"max_sent_num",
"features",
"=",
"[",
"]",
"logger",
".",
"info",
"(",
"'#### Constructing features... ####'",
")",
"for",
"(",
"ex_index",
",",
"example",
")",
"in",
"enumerate",
"(",
"tqdm",
"(",
"examples",
",",
"desc",
"=",
"'Example'",
")",
")",
":",
"tokens_q",
"=",
"tokenizer",
".",
"tokenize",
"(",
"'Q: {} A: {}'",
".",
"format",
"(",
"example",
".",
"question",
",",
"example",
".",
"answer",
")",
")",
"tokens_q",
"=",
"[",
"'[CLS]'",
"]",
"+",
"tokens_q",
"+",
"[",
"'[SEP]'",
"]",
"input_ids",
"=",
"[",
"]",
"input_masks",
"=",
"[",
"]",
"segment_ids",
"=",
"[",
"]",
"for",
"title",
"in",
"example",
".",
"titles",
":",
"sents",
"=",
"example",
".",
"context",
"[",
"title",
"]",
"for",
"(",
"i",
",",
"s",
")",
"in",
"enumerate",
"(",
"sents",
")",
":",
"if",
"len",
"(",
"input_ids",
")",
"==",
"max_sent_num",
":",
"break",
"tokens_s",
"=",
"tokenizer",
".",
"tokenize",
"(",
"s",
")",
"[",
":",
"max_seq_length",
"-",
"len",
"(",
"tokens_q",
")",
"-",
"1",
"]",
"tokens_s",
"=",
"tokens_s",
"+",
"[",
"'[SEP]'",
"]",
"padding",
"=",
"[",
"0",
"]",
"*",
"(",
"max_seq_length",
"-",
"len",
"(",
"tokens_s",
")",
"-",
"len",
"(",
"tokens_q",
")",
")",
"input_ids_",
"=",
"tokenizer",
".",
"convert_tokens_to_ids",
"(",
"tokens_q",
"+",
"tokens_s",
")",
"input_masks_",
"=",
"[",
"1",
"]",
"*",
"len",
"(",
"input_ids_",
")",
"segment_ids_",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"tokens_q",
")",
"+",
"[",
"1",
"]",
"*",
"len",
"(",
"tokens_s",
")",
"input_ids_",
"+=",
"padding",
"input_ids",
".",
"append",
"(",
"input_ids_",
")",
"input_masks_",
"+=",
"padding",
"input_masks",
".",
"append",
"(",
"input_masks_",
")",
"segment_ids_",
"+=",
"padding",
"segment_ids",
".",
"append",
"(",
"segment_ids_",
")",
"assert",
"len",
"(",
"input_ids_",
")",
"==",
"max_seq_length",
"assert",
"len",
"(",
"input_masks_",
")",
"==",
"max_seq_length",
"assert",
"len",
"(",
"segment_ids_",
")",
"==",
"max_seq_length",
"target_ids",
"=",
"[",
"]",
"target_offset",
"=",
"0",
"for",
"title",
"in",
"example",
".",
"titles",
":",
"sfs",
"=",
"example",
".",
"supporting_facts",
"[",
"title",
"]",
"for",
"i",
"in",
"sfs",
":",
"if",
"i",
"<",
"len",
"(",
"example",
".",
"context",
"[",
"title",
"]",
")",
"and",
"i",
"+",
"target_offset",
"<",
"len",
"(",
"input_ids",
")",
":",
"target_ids",
".",
"append",
"(",
"i",
"+",
"target_offset",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"''",
")",
"logger",
".",
"warning",
"(",
"'Invalid annotation: {}'",
".",
"format",
"(",
"sfs",
")",
")",
"logger",
".",
"warning",
"(",
"'Invalid annotation: {}'",
".",
"format",
"(",
"example",
".",
"context",
"[",
"title",
"]",
")",
")",
"target_offset",
"+=",
"len",
"(",
"example",
".",
"context",
"[",
"title",
"]",
")",
"assert",
"len",
"(",
"input_ids",
")",
"<=",
"max_sent_num",
"assert",
"len",
"(",
"target_ids",
")",
"<=",
"max_sf_num",
"num_sents",
"=",
"len",
"(",
"input_ids",
")",
"num_sfs",
"=",
"len",
"(",
"target_ids",
")",
"output_masks",
"=",
"[",
"(",
"[",
"1.0",
"]",
"*",
"len",
"(",
"input_ids",
")",
"+",
"[",
"0.0",
"]",
"*",
"(",
"max_sent_num",
"-",
"len",
"(",
"input_ids",
")",
"+",
"1",
")",
")",
"for",
"_",
"in",
"range",
"(",
"max_sent_num",
"+",
"2",
")",
"]",
"if",
"train",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"target_ids",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"target_ids",
")",
")",
":",
"if",
"i",
"==",
"j",
":",
"continue",
"output_masks",
"[",
"i",
"]",
"[",
"target_ids",
"[",
"j",
"]",
"]",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"output_masks",
")",
")",
":",
"if",
"i",
">=",
"num_sfs",
"+",
"1",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"output_masks",
"[",
"i",
"]",
")",
")",
":",
"output_masks",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"0.0",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"input_ids",
")",
")",
":",
"output_masks",
"[",
"i",
"+",
"1",
"]",
"[",
"i",
"]",
"=",
"0.0",
"target_ids",
"+=",
"[",
"0",
"]",
"*",
"(",
"max_sf_num",
"-",
"len",
"(",
"target_ids",
")",
")",
"padding",
"=",
"[",
"DUMMY",
"]",
"*",
"(",
"max_sent_num",
"-",
"len",
"(",
"input_ids",
")",
")",
"input_ids",
"+=",
"padding",
"input_masks",
"+=",
"padding",
"segment_ids",
"+=",
"padding",
"features",
".",
"append",
"(",
"InputFeatures",
"(",
"input_ids",
"=",
"input_ids",
",",
"input_masks",
"=",
"input_masks",
",",
"segment_ids",
"=",
"segment_ids",
",",
"target_ids",
"=",
"target_ids",
",",
"output_masks",
"=",
"output_masks",
",",
"num_sents",
"=",
"num_sents",
",",
"num_sfs",
"=",
"num_sfs",
",",
"ex_index",
"=",
"ex_index",
")",
")",
"logger",
".",
"info",
"(",
"'Done!'",
")",
"return",
"features"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/sequential_sentence_selector/run_sequential_sentence_selector.py#L87-L198 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | graph_retriever/utils.py | python | convert_examples_to_features | (examples, max_seq_length, max_para_num, graph_retriever_config, tokenizer, train = False) | return features | Loads a data file into a list of `InputBatch`s. | Loads a data file into a list of `InputBatch`s. | [
"Loads",
"a",
"data",
"file",
"into",
"a",
"list",
"of",
"InputBatch",
"s",
"."
] | def convert_examples_to_features(examples, max_seq_length, max_para_num, graph_retriever_config, tokenizer, train = False):
"""Loads a data file into a list of `InputBatch`s."""
if not train and graph_retriever_config.db_save_path is not None:
max_para_num = graph_retriever_config.max_context_size
graph_retriever_config.max_para_num = max(graph_retriever_config.max_para_num, max_para_num)
max_steps = graph_retriever_config.max_select_num
DUMMY = [0] * max_seq_length
features = []
logger.info('#### Converting examples to features... ####')
for (ex_index, example) in enumerate(tqdm(examples, desc='Example')):
tokens_q = tokenize_question(example.question, tokenizer)
##############
# Short gold #
##############
title2index = {}
input_ids = []
input_masks = []
segment_ids = []
# Append gold and non-gold paragraphs from context
if train and graph_retriever_config.use_redundant and len(example.redundant_gold) > 0:
if graph_retriever_config.use_multiple_redundant:
titles_list = example.short_gold + [redundant[0] for redundant in example.all_redundant_gold] + list(example.context.keys())
else:
titles_list = example.short_gold + [example.redundant_gold[0]] + list(example.context.keys())
else:
titles_list = example.short_gold + list(example.context.keys())
for p in titles_list:
if len(input_ids) == max_para_num:
break
# Avoid appending gold paragraphs as negative
if p in title2index:
continue
# fullwiki eval
# Gold paragraphs are not always in context
if not train and graph_retriever_config.open and p not in example.context:
continue
title2index[p] = len(title2index)
example.title_order.append(p)
p = example.context[p]
input_ids_, input_masks_, segment_ids_ = tokenize_paragraph(p, tokens_q, max_seq_length, tokenizer)
input_ids.append(input_ids_)
input_masks.append(input_masks_)
segment_ids.append(segment_ids_)
# Open-domain setting
if graph_retriever_config.open:
num_paragraphs_no_links = len(input_ids)
for p_ in example.context:
if not train and graph_retriever_config.db_save_path is not None:
break
if len(input_ids) == max_para_num:
break
if p_ not in example.all_linked_paras_dic:
continue
for l in example.all_linked_paras_dic[p_]:
if len(input_ids) == max_para_num:
break
if l in title2index:
continue
title2index[l] = len(title2index)
example.title_order.append(l)
p = example.all_linked_paras_dic[p_][l]
input_ids_, input_masks_, segment_ids_ = tokenize_paragraph(p, tokens_q, max_seq_length, tokenizer)
input_ids.append(input_ids_)
input_masks.append(input_masks_)
segment_ids.append(segment_ids_)
assert len(input_ids) <= max_para_num
num_paragraphs = len(input_ids)
num_steps = len(example.short_gold)+1 # 1 for EOE
if train:
assert num_steps <= max_steps
output_masks = [([1.0] * len(input_ids) + [0.0] * (max_para_num - len(input_ids) + 1)) for _ in range(max_para_num + 2)]
if (not train) and graph_retriever_config.open:
assert len(example.context) == num_paragraphs_no_links
for i in range(len(output_masks[0])):
if i >= num_paragraphs_no_links:
output_masks[0][i] = 0.0
for i in range(len(input_ids)):
output_masks[i+1][i] = 0.0
if train:
size = num_steps-1
for i in range(size):
for j in range(size):
if i != j:
output_masks[i][j] = 0.0
for i in range(size):
output_masks[size][i] = 0.0
for i in range(max_steps):
if i > size:
for j in range(len(output_masks[i])):
output_masks[i][j] = 0.0
# Use REDUNDANT setting
# Avoid treating the redundant paragraph as a negative example at the first step
if graph_retriever_config.use_redundant and len(example.redundant_gold) > 0:
if graph_retriever_config.use_multiple_redundant:
for redundant in example.all_redundant_gold:
output_masks[0][title2index[redundant[0]]] = 0.0
else:
output_masks[0][title2index[example.redundant_gold[0]]] = 0.0
padding = [DUMMY] * (max_para_num - len(input_ids))
input_ids += padding
input_masks += padding
segment_ids += padding
features.append(
InputFeatures(input_ids=input_ids,
input_masks=input_masks,
segment_ids=segment_ids,
output_masks = output_masks,
num_paragraphs = num_paragraphs,
num_steps = num_steps,
ex_index = ex_index))
if not train or not graph_retriever_config.use_redundant or len(example.redundant_gold) == 0:
continue
##################
# Redundant gold #
##################
for redundant_gold in example.all_redundant_gold:
hist = set()
input_ids_r = []
input_masks_r = []
segment_ids_r = []
# Append gold and non-gold paragraphs from context
for p in redundant_gold + list(example.context.keys()):
if len(input_ids_r) == max_para_num:
break
#assert p in title2index
if p not in title2index:
assert p not in redundant_gold
continue
if p in hist:
continue
hist.add(p)
index = title2index[p]
input_ids_r.append(input_ids[index])
input_masks_r.append(input_masks[index])
segment_ids_r.append(segment_ids[index])
# Open-domain setting (mainly for HotpotQA fullwiki)
if graph_retriever_config.open:
for p in title2index:
if len(input_ids_r) == max_para_num:
break
if p in hist:
continue
hist.add(p)
index = title2index[p]
input_ids_r.append(input_ids[index])
input_masks_r.append(input_masks[index])
segment_ids_r.append(segment_ids[index])
assert len(input_ids_r) <= max_para_num
num_paragraphs_r = len(input_ids_r)
num_steps_r = len(redundant_gold)+1
assert num_steps_r <= max_steps
output_masks_r = [([1.0] * len(input_ids_r) + [0.0] * (max_para_num - len(input_ids_r) + 1)) for _ in range(max_para_num + 2)]
size = num_steps_r-1
for i in range(size):
for j in range(size):
if i != j:
output_masks_r[i][j] = 0.0
if i > 0:
output_masks_r[i][0] = 1.0
for i in range(size): #size-1
output_masks_r[size][i] = 0.0
for i in range(max_steps):
if i > size:
for j in range(len(output_masks_r[i])):
output_masks_r[i][j] = 0.0
padding = [DUMMY] * (max_para_num - len(input_ids_r))
input_ids_r += padding
input_masks_r += padding
segment_ids_r += padding
features.append(
InputFeatures(input_ids=input_ids_r,
input_masks=input_masks_r,
segment_ids=segment_ids_r,
output_masks = output_masks_r,
num_paragraphs = num_paragraphs_r,
num_steps = num_steps_r,
ex_index = None))
if not graph_retriever_config.use_multiple_redundant:
break
logger.info('Done!')
return features | [
"def",
"convert_examples_to_features",
"(",
"examples",
",",
"max_seq_length",
",",
"max_para_num",
",",
"graph_retriever_config",
",",
"tokenizer",
",",
"train",
"=",
"False",
")",
":",
"if",
"not",
"train",
"and",
"graph_retriever_config",
".",
"db_save_path",
"is",
"not",
"None",
":",
"max_para_num",
"=",
"graph_retriever_config",
".",
"max_context_size",
"graph_retriever_config",
".",
"max_para_num",
"=",
"max",
"(",
"graph_retriever_config",
".",
"max_para_num",
",",
"max_para_num",
")",
"max_steps",
"=",
"graph_retriever_config",
".",
"max_select_num",
"DUMMY",
"=",
"[",
"0",
"]",
"*",
"max_seq_length",
"features",
"=",
"[",
"]",
"logger",
".",
"info",
"(",
"'#### Converting examples to features... ####'",
")",
"for",
"(",
"ex_index",
",",
"example",
")",
"in",
"enumerate",
"(",
"tqdm",
"(",
"examples",
",",
"desc",
"=",
"'Example'",
")",
")",
":",
"tokens_q",
"=",
"tokenize_question",
"(",
"example",
".",
"question",
",",
"tokenizer",
")",
"##############",
"# Short gold #",
"##############",
"title2index",
"=",
"{",
"}",
"input_ids",
"=",
"[",
"]",
"input_masks",
"=",
"[",
"]",
"segment_ids",
"=",
"[",
"]",
"# Append gold and non-gold paragraphs from context",
"if",
"train",
"and",
"graph_retriever_config",
".",
"use_redundant",
"and",
"len",
"(",
"example",
".",
"redundant_gold",
")",
">",
"0",
":",
"if",
"graph_retriever_config",
".",
"use_multiple_redundant",
":",
"titles_list",
"=",
"example",
".",
"short_gold",
"+",
"[",
"redundant",
"[",
"0",
"]",
"for",
"redundant",
"in",
"example",
".",
"all_redundant_gold",
"]",
"+",
"list",
"(",
"example",
".",
"context",
".",
"keys",
"(",
")",
")",
"else",
":",
"titles_list",
"=",
"example",
".",
"short_gold",
"+",
"[",
"example",
".",
"redundant_gold",
"[",
"0",
"]",
"]",
"+",
"list",
"(",
"example",
".",
"context",
".",
"keys",
"(",
")",
")",
"else",
":",
"titles_list",
"=",
"example",
".",
"short_gold",
"+",
"list",
"(",
"example",
".",
"context",
".",
"keys",
"(",
")",
")",
"for",
"p",
"in",
"titles_list",
":",
"if",
"len",
"(",
"input_ids",
")",
"==",
"max_para_num",
":",
"break",
"# Avoid appending gold paragraphs as negative",
"if",
"p",
"in",
"title2index",
":",
"continue",
"# fullwiki eval",
"# Gold paragraphs are not always in context",
"if",
"not",
"train",
"and",
"graph_retriever_config",
".",
"open",
"and",
"p",
"not",
"in",
"example",
".",
"context",
":",
"continue",
"title2index",
"[",
"p",
"]",
"=",
"len",
"(",
"title2index",
")",
"example",
".",
"title_order",
".",
"append",
"(",
"p",
")",
"p",
"=",
"example",
".",
"context",
"[",
"p",
"]",
"input_ids_",
",",
"input_masks_",
",",
"segment_ids_",
"=",
"tokenize_paragraph",
"(",
"p",
",",
"tokens_q",
",",
"max_seq_length",
",",
"tokenizer",
")",
"input_ids",
".",
"append",
"(",
"input_ids_",
")",
"input_masks",
".",
"append",
"(",
"input_masks_",
")",
"segment_ids",
".",
"append",
"(",
"segment_ids_",
")",
"# Open-domain setting",
"if",
"graph_retriever_config",
".",
"open",
":",
"num_paragraphs_no_links",
"=",
"len",
"(",
"input_ids",
")",
"for",
"p_",
"in",
"example",
".",
"context",
":",
"if",
"not",
"train",
"and",
"graph_retriever_config",
".",
"db_save_path",
"is",
"not",
"None",
":",
"break",
"if",
"len",
"(",
"input_ids",
")",
"==",
"max_para_num",
":",
"break",
"if",
"p_",
"not",
"in",
"example",
".",
"all_linked_paras_dic",
":",
"continue",
"for",
"l",
"in",
"example",
".",
"all_linked_paras_dic",
"[",
"p_",
"]",
":",
"if",
"len",
"(",
"input_ids",
")",
"==",
"max_para_num",
":",
"break",
"if",
"l",
"in",
"title2index",
":",
"continue",
"title2index",
"[",
"l",
"]",
"=",
"len",
"(",
"title2index",
")",
"example",
".",
"title_order",
".",
"append",
"(",
"l",
")",
"p",
"=",
"example",
".",
"all_linked_paras_dic",
"[",
"p_",
"]",
"[",
"l",
"]",
"input_ids_",
",",
"input_masks_",
",",
"segment_ids_",
"=",
"tokenize_paragraph",
"(",
"p",
",",
"tokens_q",
",",
"max_seq_length",
",",
"tokenizer",
")",
"input_ids",
".",
"append",
"(",
"input_ids_",
")",
"input_masks",
".",
"append",
"(",
"input_masks_",
")",
"segment_ids",
".",
"append",
"(",
"segment_ids_",
")",
"assert",
"len",
"(",
"input_ids",
")",
"<=",
"max_para_num",
"num_paragraphs",
"=",
"len",
"(",
"input_ids",
")",
"num_steps",
"=",
"len",
"(",
"example",
".",
"short_gold",
")",
"+",
"1",
"# 1 for EOE",
"if",
"train",
":",
"assert",
"num_steps",
"<=",
"max_steps",
"output_masks",
"=",
"[",
"(",
"[",
"1.0",
"]",
"*",
"len",
"(",
"input_ids",
")",
"+",
"[",
"0.0",
"]",
"*",
"(",
"max_para_num",
"-",
"len",
"(",
"input_ids",
")",
"+",
"1",
")",
")",
"for",
"_",
"in",
"range",
"(",
"max_para_num",
"+",
"2",
")",
"]",
"if",
"(",
"not",
"train",
")",
"and",
"graph_retriever_config",
".",
"open",
":",
"assert",
"len",
"(",
"example",
".",
"context",
")",
"==",
"num_paragraphs_no_links",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"output_masks",
"[",
"0",
"]",
")",
")",
":",
"if",
"i",
">=",
"num_paragraphs_no_links",
":",
"output_masks",
"[",
"0",
"]",
"[",
"i",
"]",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"input_ids",
")",
")",
":",
"output_masks",
"[",
"i",
"+",
"1",
"]",
"[",
"i",
"]",
"=",
"0.0",
"if",
"train",
":",
"size",
"=",
"num_steps",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"for",
"j",
"in",
"range",
"(",
"size",
")",
":",
"if",
"i",
"!=",
"j",
":",
"output_masks",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"output_masks",
"[",
"size",
"]",
"[",
"i",
"]",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"max_steps",
")",
":",
"if",
"i",
">",
"size",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"output_masks",
"[",
"i",
"]",
")",
")",
":",
"output_masks",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"0.0",
"# Use REDUNDANT setting",
"# Avoid treating the redundant paragraph as a negative example at the first step",
"if",
"graph_retriever_config",
".",
"use_redundant",
"and",
"len",
"(",
"example",
".",
"redundant_gold",
")",
">",
"0",
":",
"if",
"graph_retriever_config",
".",
"use_multiple_redundant",
":",
"for",
"redundant",
"in",
"example",
".",
"all_redundant_gold",
":",
"output_masks",
"[",
"0",
"]",
"[",
"title2index",
"[",
"redundant",
"[",
"0",
"]",
"]",
"]",
"=",
"0.0",
"else",
":",
"output_masks",
"[",
"0",
"]",
"[",
"title2index",
"[",
"example",
".",
"redundant_gold",
"[",
"0",
"]",
"]",
"]",
"=",
"0.0",
"padding",
"=",
"[",
"DUMMY",
"]",
"*",
"(",
"max_para_num",
"-",
"len",
"(",
"input_ids",
")",
")",
"input_ids",
"+=",
"padding",
"input_masks",
"+=",
"padding",
"segment_ids",
"+=",
"padding",
"features",
".",
"append",
"(",
"InputFeatures",
"(",
"input_ids",
"=",
"input_ids",
",",
"input_masks",
"=",
"input_masks",
",",
"segment_ids",
"=",
"segment_ids",
",",
"output_masks",
"=",
"output_masks",
",",
"num_paragraphs",
"=",
"num_paragraphs",
",",
"num_steps",
"=",
"num_steps",
",",
"ex_index",
"=",
"ex_index",
")",
")",
"if",
"not",
"train",
"or",
"not",
"graph_retriever_config",
".",
"use_redundant",
"or",
"len",
"(",
"example",
".",
"redundant_gold",
")",
"==",
"0",
":",
"continue",
"##################",
"# Redundant gold #",
"##################",
"for",
"redundant_gold",
"in",
"example",
".",
"all_redundant_gold",
":",
"hist",
"=",
"set",
"(",
")",
"input_ids_r",
"=",
"[",
"]",
"input_masks_r",
"=",
"[",
"]",
"segment_ids_r",
"=",
"[",
"]",
"# Append gold and non-gold paragraphs from context",
"for",
"p",
"in",
"redundant_gold",
"+",
"list",
"(",
"example",
".",
"context",
".",
"keys",
"(",
")",
")",
":",
"if",
"len",
"(",
"input_ids_r",
")",
"==",
"max_para_num",
":",
"break",
"#assert p in title2index",
"if",
"p",
"not",
"in",
"title2index",
":",
"assert",
"p",
"not",
"in",
"redundant_gold",
"continue",
"if",
"p",
"in",
"hist",
":",
"continue",
"hist",
".",
"add",
"(",
"p",
")",
"index",
"=",
"title2index",
"[",
"p",
"]",
"input_ids_r",
".",
"append",
"(",
"input_ids",
"[",
"index",
"]",
")",
"input_masks_r",
".",
"append",
"(",
"input_masks",
"[",
"index",
"]",
")",
"segment_ids_r",
".",
"append",
"(",
"segment_ids",
"[",
"index",
"]",
")",
"# Open-domain setting (mainly for HotpotQA fullwiki)",
"if",
"graph_retriever_config",
".",
"open",
":",
"for",
"p",
"in",
"title2index",
":",
"if",
"len",
"(",
"input_ids_r",
")",
"==",
"max_para_num",
":",
"break",
"if",
"p",
"in",
"hist",
":",
"continue",
"hist",
".",
"add",
"(",
"p",
")",
"index",
"=",
"title2index",
"[",
"p",
"]",
"input_ids_r",
".",
"append",
"(",
"input_ids",
"[",
"index",
"]",
")",
"input_masks_r",
".",
"append",
"(",
"input_masks",
"[",
"index",
"]",
")",
"segment_ids_r",
".",
"append",
"(",
"segment_ids",
"[",
"index",
"]",
")",
"assert",
"len",
"(",
"input_ids_r",
")",
"<=",
"max_para_num",
"num_paragraphs_r",
"=",
"len",
"(",
"input_ids_r",
")",
"num_steps_r",
"=",
"len",
"(",
"redundant_gold",
")",
"+",
"1",
"assert",
"num_steps_r",
"<=",
"max_steps",
"output_masks_r",
"=",
"[",
"(",
"[",
"1.0",
"]",
"*",
"len",
"(",
"input_ids_r",
")",
"+",
"[",
"0.0",
"]",
"*",
"(",
"max_para_num",
"-",
"len",
"(",
"input_ids_r",
")",
"+",
"1",
")",
")",
"for",
"_",
"in",
"range",
"(",
"max_para_num",
"+",
"2",
")",
"]",
"size",
"=",
"num_steps_r",
"-",
"1",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"for",
"j",
"in",
"range",
"(",
"size",
")",
":",
"if",
"i",
"!=",
"j",
":",
"output_masks_r",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"0.0",
"if",
"i",
">",
"0",
":",
"output_masks_r",
"[",
"i",
"]",
"[",
"0",
"]",
"=",
"1.0",
"for",
"i",
"in",
"range",
"(",
"size",
")",
":",
"#size-1",
"output_masks_r",
"[",
"size",
"]",
"[",
"i",
"]",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"max_steps",
")",
":",
"if",
"i",
">",
"size",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"output_masks_r",
"[",
"i",
"]",
")",
")",
":",
"output_masks_r",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"0.0",
"padding",
"=",
"[",
"DUMMY",
"]",
"*",
"(",
"max_para_num",
"-",
"len",
"(",
"input_ids_r",
")",
")",
"input_ids_r",
"+=",
"padding",
"input_masks_r",
"+=",
"padding",
"segment_ids_r",
"+=",
"padding",
"features",
".",
"append",
"(",
"InputFeatures",
"(",
"input_ids",
"=",
"input_ids_r",
",",
"input_masks",
"=",
"input_masks_r",
",",
"segment_ids",
"=",
"segment_ids_r",
",",
"output_masks",
"=",
"output_masks_r",
",",
"num_paragraphs",
"=",
"num_paragraphs_r",
",",
"num_steps",
"=",
"num_steps_r",
",",
"ex_index",
"=",
"None",
")",
")",
"if",
"not",
"graph_retriever_config",
".",
"use_multiple_redundant",
":",
"break",
"logger",
".",
"info",
"(",
"'Done!'",
")",
"return",
"features"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/graph_retriever/utils.py#L470-L710 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | graph_retriever/utils.py | python | DataProcessor._create_examples | (self, file_name, graph_retriever_config, set_type) | return examples | Limit the number of examples used.
This is mainly for sanity-chacking new settings. | Limit the number of examples used.
This is mainly for sanity-chacking new settings. | [
"Limit",
"the",
"number",
"of",
"examples",
"used",
".",
"This",
"is",
"mainly",
"for",
"sanity",
"-",
"chacking",
"new",
"settings",
"."
] | def _create_examples(self, file_name, graph_retriever_config, set_type):
task = graph_retriever_config.task
jsn = json.load(open(file_name, 'r'))
examples = []
'''
Limit the number of examples used.
This is mainly for sanity-chacking new settings.
'''
if graph_retriever_config.example_limit is not None:
random.shuffle(jsn)
jsn = sorted(jsn, key = lambda x: x['q_id'])
jsn = jsn[:graph_retriever_config.example_limit]
'''
Find the mximum size of the initial context (links are not included)
'''
graph_retriever_config.max_context_size = 0
logger.info('#### Loading examples... from {} ####'.format(file_name))
for (_, data) in enumerate(tqdm(jsn, desc='Example')):
guid = data['q_id']
question = data['question']
context = data['context'] # {context title: paragraph}
all_linked_paras_dic = data['all_linked_paras_dic'] # {context title: {linked title: paragraph}}
short_gold = data['short_gold'] # [title 1, title 2] (Both are gold)
redundant_gold = data['redundant_gold'] # [title 1, title 2, title 3] ("title 1" is not gold)
all_redundant_gold = data['all_redundant_gold']
'''
Limit the number of redundant examples
'''
all_redundant_gold = all_redundant_gold[:graph_retriever_config.max_redundant_num]
'''
Control the size of the initial TF-IDF retrieved paragraphs
*** Training time: to take a blalance between TF-IDF-based and link-based negative examples ***
'''
if graph_retriever_config.tfidf_limit is not None:
new_context = {}
for title in context:
if len(new_context) == graph_retriever_config.tfidf_limit:
break
new_context[title] = context[title]
context = new_context
'''
Use TagMe-based context at test time.
'''
if set_type == 'dev' and task == 'nq' and graph_retriever_config.tagme:
assert 'tagged_context' in data
'''
Reformat "tagged_context" if needed (c.f. the "context" case above)
'''
if type(data['tagged_context']) == list:
tagged_context = {c[0]: c[1] for c in data['tagged_context']}
data['tagged_context'] = tagged_context
'''
Append valid paragraphs from "tagged_context" to "context"
'''
for tagged_title in data['tagged_context']:
tagged_text = data['tagged_context'][tagged_title]
if tagged_title not in context and tagged_title is not None and tagged_title.strip() != '' and tagged_text is not None and tagged_text.strip() != '':
context[tagged_title] = tagged_text
'''
Clean "context" by removing invalid paragraphs
'''
removed_keys = []
for title in context:
if title is None or title.strip() == '' or context[title] is None or context[title].strip() == '':
removed_keys.append(title)
for key in removed_keys:
context.pop(key)
if task in ['squad', 'nq'] and set_type == 'train':
new_context = {}
orig_title = list(context.keys())[0].split('_')[0]
orig_titles = []
other_titles = []
for title in context:
title_ = title.split('_')[0]
if title_ == orig_title:
orig_titles.append(title)
else:
other_titles.append(title)
orig_index = 0
other_index = 0
while orig_index < len(orig_titles) or other_index < len(other_titles):
if orig_index < len(orig_titles):
new_context[orig_titles[orig_index]] = context[orig_titles[orig_index]]
orig_index += 1
if other_index < len(other_titles):
new_context[other_titles[other_index]] = context[other_titles[other_index]]
other_index += 1
context = new_context
'''
Convert link format
'''
new_all_linked_paras_dic = {} # {context title: {linked title: paragraph}}
all_linked_paras_dic # {linked_title: paragraph} or mixed
all_linked_para_title_dic = data['all_linked_para_title_dic'] # {context_title: [linked_title_1, linked_title_2, ...]}
removed_keys = []
tmp = {}
for key in all_linked_paras_dic:
if type(all_linked_paras_dic[key]) == dict:
removed_keys.append(key)
for linked_title in all_linked_paras_dic[key]:
if linked_title not in all_linked_paras_dic:
tmp[linked_title] = all_linked_paras_dic[key][linked_title]
if key in all_linked_para_title_dic:
all_linked_para_title_dic[key].append(linked_title)
else:
all_linked_para_title_dic[key] = [linked_title]
for key in removed_keys:
all_linked_paras_dic.pop(key)
for key in tmp:
if key not in all_linked_paras_dic:
all_linked_paras_dic[key] = tmp[key]
for context_title in context:
if context_title not in all_linked_para_title_dic:
continue
new_entry = {}
for linked_title in all_linked_para_title_dic[context_title]:
if linked_title not in all_linked_paras_dic:
continue
new_entry[linked_title] = all_linked_paras_dic[linked_title]
if len(new_entry) > 0:
new_all_linked_paras_dic[context_title] = new_entry
all_linked_paras_dic = new_all_linked_paras_dic
if set_type == 'dev':
'''
Clean "all_linked_paras_dic" by removing invalid paragraphs
'''
for c in all_linked_paras_dic:
removed_keys = []
links = all_linked_paras_dic[c]
for title in links:
if title is None or title.strip() == '' or links[title] is None or type(links[title]) != str or links[title].strip() == '':
removed_keys.append(title)
for key in removed_keys:
links.pop(key)
all_paras = {}
for title in context:
all_paras[title] = context[title]
if not graph_retriever_config.open:
continue
if title not in all_linked_paras_dic:
continue
for title_ in all_linked_paras_dic[title]:
if title_ not in all_paras:
all_paras[title_] = all_linked_paras_dic[title][title_]
else:
all_paras = None
if set_type == 'dev' and graph_retriever_config.expand_links:
expand_links(context, all_linked_paras_dic, all_paras)
if set_type == 'dev' and graph_retriever_config.no_links:
all_linked_paras_dic = {}
graph_retriever_config.max_context_size = max(graph_retriever_config.max_context_size, len(context))
'''
Ensure that all the gold paragraphs are included in "context"
'''
if set_type == 'train':
for t in short_gold + redundant_gold:
assert t in context
examples.append(InputExample(guid = guid,
q = question,
c = context,
para_dic = all_linked_paras_dic,
s_g = short_gold,
r_g = redundant_gold,
all_r_g = all_redundant_gold,
all_paras = all_paras))
if set_type == 'dev':
examples = sorted(examples, key = lambda x: len(x.all_paras))
logger.info('Done!')
return examples | [
"def",
"_create_examples",
"(",
"self",
",",
"file_name",
",",
"graph_retriever_config",
",",
"set_type",
")",
":",
"task",
"=",
"graph_retriever_config",
".",
"task",
"jsn",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"file_name",
",",
"'r'",
")",
")",
"examples",
"=",
"[",
"]",
"if",
"graph_retriever_config",
".",
"example_limit",
"is",
"not",
"None",
":",
"random",
".",
"shuffle",
"(",
"jsn",
")",
"jsn",
"=",
"sorted",
"(",
"jsn",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'q_id'",
"]",
")",
"jsn",
"=",
"jsn",
"[",
":",
"graph_retriever_config",
".",
"example_limit",
"]",
"'''\n Find the mximum size of the initial context (links are not included)\n '''",
"graph_retriever_config",
".",
"max_context_size",
"=",
"0",
"logger",
".",
"info",
"(",
"'#### Loading examples... from {} ####'",
".",
"format",
"(",
"file_name",
")",
")",
"for",
"(",
"_",
",",
"data",
")",
"in",
"enumerate",
"(",
"tqdm",
"(",
"jsn",
",",
"desc",
"=",
"'Example'",
")",
")",
":",
"guid",
"=",
"data",
"[",
"'q_id'",
"]",
"question",
"=",
"data",
"[",
"'question'",
"]",
"context",
"=",
"data",
"[",
"'context'",
"]",
"# {context title: paragraph}",
"all_linked_paras_dic",
"=",
"data",
"[",
"'all_linked_paras_dic'",
"]",
"# {context title: {linked title: paragraph}}",
"short_gold",
"=",
"data",
"[",
"'short_gold'",
"]",
"# [title 1, title 2] (Both are gold)",
"redundant_gold",
"=",
"data",
"[",
"'redundant_gold'",
"]",
"# [title 1, title 2, title 3] (\"title 1\" is not gold)",
"all_redundant_gold",
"=",
"data",
"[",
"'all_redundant_gold'",
"]",
"'''\n Limit the number of redundant examples\n '''",
"all_redundant_gold",
"=",
"all_redundant_gold",
"[",
":",
"graph_retriever_config",
".",
"max_redundant_num",
"]",
"'''\n Control the size of the initial TF-IDF retrieved paragraphs\n *** Training time: to take a blalance between TF-IDF-based and link-based negative examples ***\n '''",
"if",
"graph_retriever_config",
".",
"tfidf_limit",
"is",
"not",
"None",
":",
"new_context",
"=",
"{",
"}",
"for",
"title",
"in",
"context",
":",
"if",
"len",
"(",
"new_context",
")",
"==",
"graph_retriever_config",
".",
"tfidf_limit",
":",
"break",
"new_context",
"[",
"title",
"]",
"=",
"context",
"[",
"title",
"]",
"context",
"=",
"new_context",
"'''\n Use TagMe-based context at test time.\n '''",
"if",
"set_type",
"==",
"'dev'",
"and",
"task",
"==",
"'nq'",
"and",
"graph_retriever_config",
".",
"tagme",
":",
"assert",
"'tagged_context'",
"in",
"data",
"'''\n Reformat \"tagged_context\" if needed (c.f. the \"context\" case above)\n '''",
"if",
"type",
"(",
"data",
"[",
"'tagged_context'",
"]",
")",
"==",
"list",
":",
"tagged_context",
"=",
"{",
"c",
"[",
"0",
"]",
":",
"c",
"[",
"1",
"]",
"for",
"c",
"in",
"data",
"[",
"'tagged_context'",
"]",
"}",
"data",
"[",
"'tagged_context'",
"]",
"=",
"tagged_context",
"'''\n Append valid paragraphs from \"tagged_context\" to \"context\"\n '''",
"for",
"tagged_title",
"in",
"data",
"[",
"'tagged_context'",
"]",
":",
"tagged_text",
"=",
"data",
"[",
"'tagged_context'",
"]",
"[",
"tagged_title",
"]",
"if",
"tagged_title",
"not",
"in",
"context",
"and",
"tagged_title",
"is",
"not",
"None",
"and",
"tagged_title",
".",
"strip",
"(",
")",
"!=",
"''",
"and",
"tagged_text",
"is",
"not",
"None",
"and",
"tagged_text",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"context",
"[",
"tagged_title",
"]",
"=",
"tagged_text",
"'''\n Clean \"context\" by removing invalid paragraphs\n '''",
"removed_keys",
"=",
"[",
"]",
"for",
"title",
"in",
"context",
":",
"if",
"title",
"is",
"None",
"or",
"title",
".",
"strip",
"(",
")",
"==",
"''",
"or",
"context",
"[",
"title",
"]",
"is",
"None",
"or",
"context",
"[",
"title",
"]",
".",
"strip",
"(",
")",
"==",
"''",
":",
"removed_keys",
".",
"append",
"(",
"title",
")",
"for",
"key",
"in",
"removed_keys",
":",
"context",
".",
"pop",
"(",
"key",
")",
"if",
"task",
"in",
"[",
"'squad'",
",",
"'nq'",
"]",
"and",
"set_type",
"==",
"'train'",
":",
"new_context",
"=",
"{",
"}",
"orig_title",
"=",
"list",
"(",
"context",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"orig_titles",
"=",
"[",
"]",
"other_titles",
"=",
"[",
"]",
"for",
"title",
"in",
"context",
":",
"title_",
"=",
"title",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
"if",
"title_",
"==",
"orig_title",
":",
"orig_titles",
".",
"append",
"(",
"title",
")",
"else",
":",
"other_titles",
".",
"append",
"(",
"title",
")",
"orig_index",
"=",
"0",
"other_index",
"=",
"0",
"while",
"orig_index",
"<",
"len",
"(",
"orig_titles",
")",
"or",
"other_index",
"<",
"len",
"(",
"other_titles",
")",
":",
"if",
"orig_index",
"<",
"len",
"(",
"orig_titles",
")",
":",
"new_context",
"[",
"orig_titles",
"[",
"orig_index",
"]",
"]",
"=",
"context",
"[",
"orig_titles",
"[",
"orig_index",
"]",
"]",
"orig_index",
"+=",
"1",
"if",
"other_index",
"<",
"len",
"(",
"other_titles",
")",
":",
"new_context",
"[",
"other_titles",
"[",
"other_index",
"]",
"]",
"=",
"context",
"[",
"other_titles",
"[",
"other_index",
"]",
"]",
"other_index",
"+=",
"1",
"context",
"=",
"new_context",
"'''\n Convert link format\n '''",
"new_all_linked_paras_dic",
"=",
"{",
"}",
"# {context title: {linked title: paragraph}}",
"all_linked_paras_dic",
"# {linked_title: paragraph} or mixed",
"all_linked_para_title_dic",
"=",
"data",
"[",
"'all_linked_para_title_dic'",
"]",
"# {context_title: [linked_title_1, linked_title_2, ...]}",
"removed_keys",
"=",
"[",
"]",
"tmp",
"=",
"{",
"}",
"for",
"key",
"in",
"all_linked_paras_dic",
":",
"if",
"type",
"(",
"all_linked_paras_dic",
"[",
"key",
"]",
")",
"==",
"dict",
":",
"removed_keys",
".",
"append",
"(",
"key",
")",
"for",
"linked_title",
"in",
"all_linked_paras_dic",
"[",
"key",
"]",
":",
"if",
"linked_title",
"not",
"in",
"all_linked_paras_dic",
":",
"tmp",
"[",
"linked_title",
"]",
"=",
"all_linked_paras_dic",
"[",
"key",
"]",
"[",
"linked_title",
"]",
"if",
"key",
"in",
"all_linked_para_title_dic",
":",
"all_linked_para_title_dic",
"[",
"key",
"]",
".",
"append",
"(",
"linked_title",
")",
"else",
":",
"all_linked_para_title_dic",
"[",
"key",
"]",
"=",
"[",
"linked_title",
"]",
"for",
"key",
"in",
"removed_keys",
":",
"all_linked_paras_dic",
".",
"pop",
"(",
"key",
")",
"for",
"key",
"in",
"tmp",
":",
"if",
"key",
"not",
"in",
"all_linked_paras_dic",
":",
"all_linked_paras_dic",
"[",
"key",
"]",
"=",
"tmp",
"[",
"key",
"]",
"for",
"context_title",
"in",
"context",
":",
"if",
"context_title",
"not",
"in",
"all_linked_para_title_dic",
":",
"continue",
"new_entry",
"=",
"{",
"}",
"for",
"linked_title",
"in",
"all_linked_para_title_dic",
"[",
"context_title",
"]",
":",
"if",
"linked_title",
"not",
"in",
"all_linked_paras_dic",
":",
"continue",
"new_entry",
"[",
"linked_title",
"]",
"=",
"all_linked_paras_dic",
"[",
"linked_title",
"]",
"if",
"len",
"(",
"new_entry",
")",
">",
"0",
":",
"new_all_linked_paras_dic",
"[",
"context_title",
"]",
"=",
"new_entry",
"all_linked_paras_dic",
"=",
"new_all_linked_paras_dic",
"if",
"set_type",
"==",
"'dev'",
":",
"'''\n Clean \"all_linked_paras_dic\" by removing invalid paragraphs\n '''",
"for",
"c",
"in",
"all_linked_paras_dic",
":",
"removed_keys",
"=",
"[",
"]",
"links",
"=",
"all_linked_paras_dic",
"[",
"c",
"]",
"for",
"title",
"in",
"links",
":",
"if",
"title",
"is",
"None",
"or",
"title",
".",
"strip",
"(",
")",
"==",
"''",
"or",
"links",
"[",
"title",
"]",
"is",
"None",
"or",
"type",
"(",
"links",
"[",
"title",
"]",
")",
"!=",
"str",
"or",
"links",
"[",
"title",
"]",
".",
"strip",
"(",
")",
"==",
"''",
":",
"removed_keys",
".",
"append",
"(",
"title",
")",
"for",
"key",
"in",
"removed_keys",
":",
"links",
".",
"pop",
"(",
"key",
")",
"all_paras",
"=",
"{",
"}",
"for",
"title",
"in",
"context",
":",
"all_paras",
"[",
"title",
"]",
"=",
"context",
"[",
"title",
"]",
"if",
"not",
"graph_retriever_config",
".",
"open",
":",
"continue",
"if",
"title",
"not",
"in",
"all_linked_paras_dic",
":",
"continue",
"for",
"title_",
"in",
"all_linked_paras_dic",
"[",
"title",
"]",
":",
"if",
"title_",
"not",
"in",
"all_paras",
":",
"all_paras",
"[",
"title_",
"]",
"=",
"all_linked_paras_dic",
"[",
"title",
"]",
"[",
"title_",
"]",
"else",
":",
"all_paras",
"=",
"None",
"if",
"set_type",
"==",
"'dev'",
"and",
"graph_retriever_config",
".",
"expand_links",
":",
"expand_links",
"(",
"context",
",",
"all_linked_paras_dic",
",",
"all_paras",
")",
"if",
"set_type",
"==",
"'dev'",
"and",
"graph_retriever_config",
".",
"no_links",
":",
"all_linked_paras_dic",
"=",
"{",
"}",
"graph_retriever_config",
".",
"max_context_size",
"=",
"max",
"(",
"graph_retriever_config",
".",
"max_context_size",
",",
"len",
"(",
"context",
")",
")",
"'''\n Ensure that all the gold paragraphs are included in \"context\"\n '''",
"if",
"set_type",
"==",
"'train'",
":",
"for",
"t",
"in",
"short_gold",
"+",
"redundant_gold",
":",
"assert",
"t",
"in",
"context",
"examples",
".",
"append",
"(",
"InputExample",
"(",
"guid",
"=",
"guid",
",",
"q",
"=",
"question",
",",
"c",
"=",
"context",
",",
"para_dic",
"=",
"all_linked_paras_dic",
",",
"s_g",
"=",
"short_gold",
",",
"r_g",
"=",
"redundant_gold",
",",
"all_r_g",
"=",
"all_redundant_gold",
",",
"all_paras",
"=",
"all_paras",
")",
")",
"if",
"set_type",
"==",
"'dev'",
":",
"examples",
"=",
"sorted",
"(",
"examples",
",",
"key",
"=",
"lambda",
"x",
":",
"len",
"(",
"x",
".",
"all_paras",
")",
")",
"logger",
".",
"info",
"(",
"'Done!'",
")",
"return",
"examples"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/graph_retriever/utils.py#L228-L442 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_tfidf.py | python | count | (ngram, hash_size, multi_para, doc_id) | return row, col, data | Fetch the text of a document and compute hashed ngrams counts. | Fetch the text of a document and compute hashed ngrams counts. | [
"Fetch",
"the",
"text",
"of",
"a",
"document",
"and",
"compute",
"hashed",
"ngrams",
"counts",
"."
] | def count(ngram, hash_size, multi_para, doc_id):
"""Fetch the text of a document and compute hashed ngrams counts."""
global DOC2IDX
# FIXME: remove hard coding.
row, col, data = [], [], []
# Tokenize
if multi_para is True:
# 1. if multi_para is true, the doc contains multiple paragraphs separated by \n\n and with links.
tokens = tokenize(fetch_text_multi_para(doc_id))
else:
# 2. if not, only intro docs are retrieved and the sentences are separated by \t.
# remove sentence separations ("\t") (only for HotpotQA).
tokens = tokenize(fetch_text(doc_id).replace("\t", ""))
# Get ngrams from tokens, with stopword/punctuation filtering.
ngrams = tokens.ngrams(
n=ngram, uncased=True, filter_fn=filter_ngram
)
# Hash ngrams and count occurences
counts = Counter([hash(gram, hash_size)
for gram in ngrams])
# Return in sparse matrix data format.
row.extend(counts.keys())
col.extend([DOC2IDX[doc_id]] * len(counts))
data.extend(counts.values())
return row, col, data | [
"def",
"count",
"(",
"ngram",
",",
"hash_size",
",",
"multi_para",
",",
"doc_id",
")",
":",
"global",
"DOC2IDX",
"# FIXME: remove hard coding.",
"row",
",",
"col",
",",
"data",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"# Tokenize",
"if",
"multi_para",
"is",
"True",
":",
"# 1. if multi_para is true, the doc contains multiple paragraphs separated by \\n\\n and with links.",
"tokens",
"=",
"tokenize",
"(",
"fetch_text_multi_para",
"(",
"doc_id",
")",
")",
"else",
":",
"# 2. if not, only intro docs are retrieved and the sentences are separated by \\t.",
"# remove sentence separations (\"\\t\") (only for HotpotQA).",
"tokens",
"=",
"tokenize",
"(",
"fetch_text",
"(",
"doc_id",
")",
".",
"replace",
"(",
"\"\\t\"",
",",
"\"\"",
")",
")",
"# Get ngrams from tokens, with stopword/punctuation filtering.",
"ngrams",
"=",
"tokens",
".",
"ngrams",
"(",
"n",
"=",
"ngram",
",",
"uncased",
"=",
"True",
",",
"filter_fn",
"=",
"filter_ngram",
")",
"# Hash ngrams and count occurences",
"counts",
"=",
"Counter",
"(",
"[",
"hash",
"(",
"gram",
",",
"hash_size",
")",
"for",
"gram",
"in",
"ngrams",
"]",
")",
"# Return in sparse matrix data format.",
"row",
".",
"extend",
"(",
"counts",
".",
"keys",
"(",
")",
")",
"col",
".",
"extend",
"(",
"[",
"DOC2IDX",
"[",
"doc_id",
"]",
"]",
"*",
"len",
"(",
"counts",
")",
")",
"data",
".",
"extend",
"(",
"counts",
".",
"values",
"(",
")",
")",
"return",
"row",
",",
"col",
",",
"data"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_tfidf.py#L88-L116 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_tfidf.py | python | get_count_matrix | (args, db, db_opts) | return count_matrix, (DOC2IDX, doc_ids) | Form a sparse word to document count matrix (inverted index).
M[i, j] = # times word i appears in document j. | Form a sparse word to document count matrix (inverted index). | [
"Form",
"a",
"sparse",
"word",
"to",
"document",
"count",
"matrix",
"(",
"inverted",
"index",
")",
"."
] | def get_count_matrix(args, db, db_opts):
"""Form a sparse word to document count matrix (inverted index).
M[i, j] = # times word i appears in document j.
"""
# Map doc_ids to indexes
global DOC2IDX
db_class = get_class(db)
with db_class(**db_opts) as doc_db:
doc_ids = doc_db.get_doc_ids()
DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}
# Setup worker pool
# TODO: Add tokenizer's choice.
tok_class = SimpleTokenizer
workers = ProcessPool(
args.num_workers,
initializer=init,
initargs=(tok_class, db_class, db_opts)
)
# Compute the count matrix in steps (to keep in memory)
logger.info('Mapping...')
row, col, data = [], [], []
step = max(int(len(doc_ids) / 10), 1)
batches = [doc_ids[i:i + step] for i in range(0, len(doc_ids), step)]
_count = partial(count, args.ngram, args.hash_size, args.multi_para)
for i, batch in enumerate(batches):
logger.info('-' * 25 + 'Batch %d/%d' %
(i + 1, len(batches)) + '-' * 25)
for b_row, b_col, b_data in workers.imap_unordered(_count, batch):
row.extend(b_row)
col.extend(b_col)
data.extend(b_data)
workers.close()
workers.join()
logger.info('Creating sparse matrix...')
count_matrix = sp.csr_matrix(
(data, (row, col)), shape=(args.hash_size, len(doc_ids))
)
count_matrix.sum_duplicates()
return count_matrix, (DOC2IDX, doc_ids) | [
"def",
"get_count_matrix",
"(",
"args",
",",
"db",
",",
"db_opts",
")",
":",
"# Map doc_ids to indexes",
"global",
"DOC2IDX",
"db_class",
"=",
"get_class",
"(",
"db",
")",
"with",
"db_class",
"(",
"*",
"*",
"db_opts",
")",
"as",
"doc_db",
":",
"doc_ids",
"=",
"doc_db",
".",
"get_doc_ids",
"(",
")",
"DOC2IDX",
"=",
"{",
"doc_id",
":",
"i",
"for",
"i",
",",
"doc_id",
"in",
"enumerate",
"(",
"doc_ids",
")",
"}",
"# Setup worker pool",
"# TODO: Add tokenizer's choice.",
"tok_class",
"=",
"SimpleTokenizer",
"workers",
"=",
"ProcessPool",
"(",
"args",
".",
"num_workers",
",",
"initializer",
"=",
"init",
",",
"initargs",
"=",
"(",
"tok_class",
",",
"db_class",
",",
"db_opts",
")",
")",
"# Compute the count matrix in steps (to keep in memory)",
"logger",
".",
"info",
"(",
"'Mapping...'",
")",
"row",
",",
"col",
",",
"data",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"step",
"=",
"max",
"(",
"int",
"(",
"len",
"(",
"doc_ids",
")",
"/",
"10",
")",
",",
"1",
")",
"batches",
"=",
"[",
"doc_ids",
"[",
"i",
":",
"i",
"+",
"step",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"doc_ids",
")",
",",
"step",
")",
"]",
"_count",
"=",
"partial",
"(",
"count",
",",
"args",
".",
"ngram",
",",
"args",
".",
"hash_size",
",",
"args",
".",
"multi_para",
")",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"batches",
")",
":",
"logger",
".",
"info",
"(",
"'-'",
"*",
"25",
"+",
"'Batch %d/%d'",
"%",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"batches",
")",
")",
"+",
"'-'",
"*",
"25",
")",
"for",
"b_row",
",",
"b_col",
",",
"b_data",
"in",
"workers",
".",
"imap_unordered",
"(",
"_count",
",",
"batch",
")",
":",
"row",
".",
"extend",
"(",
"b_row",
")",
"col",
".",
"extend",
"(",
"b_col",
")",
"data",
".",
"extend",
"(",
"b_data",
")",
"workers",
".",
"close",
"(",
")",
"workers",
".",
"join",
"(",
")",
"logger",
".",
"info",
"(",
"'Creating sparse matrix...'",
")",
"count_matrix",
"=",
"sp",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"(",
"row",
",",
"col",
")",
")",
",",
"shape",
"=",
"(",
"args",
".",
"hash_size",
",",
"len",
"(",
"doc_ids",
")",
")",
")",
"count_matrix",
".",
"sum_duplicates",
"(",
")",
"return",
"count_matrix",
",",
"(",
"DOC2IDX",
",",
"doc_ids",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_tfidf.py#L119-L161 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_tfidf.py | python | get_tfidf_matrix | (cnts) | return tfidfs | Convert the word count matrix into tfidf one.
tfidf = log(tf + 1) * log((N - Nt + 0.5) / (Nt + 0.5))
* tf = term frequency in document
* N = number of documents
* Nt = number of occurences of term in all documents | Convert the word count matrix into tfidf one. | [
"Convert",
"the",
"word",
"count",
"matrix",
"into",
"tfidf",
"one",
"."
] | def get_tfidf_matrix(cnts):
"""Convert the word count matrix into tfidf one.
tfidf = log(tf + 1) * log((N - Nt + 0.5) / (Nt + 0.5))
* tf = term frequency in document
* N = number of documents
* Nt = number of occurences of term in all documents
"""
Ns = get_doc_freqs(cnts)
idfs = np.log((cnts.shape[1] - Ns + 0.5) / (Ns + 0.5))
idfs[idfs < 0] = 0
idfs = sp.diags(idfs, 0)
tfs = cnts.log1p()
tfidfs = idfs.dot(tfs)
return tfidfs | [
"def",
"get_tfidf_matrix",
"(",
"cnts",
")",
":",
"Ns",
"=",
"get_doc_freqs",
"(",
"cnts",
")",
"idfs",
"=",
"np",
".",
"log",
"(",
"(",
"cnts",
".",
"shape",
"[",
"1",
"]",
"-",
"Ns",
"+",
"0.5",
")",
"/",
"(",
"Ns",
"+",
"0.5",
")",
")",
"idfs",
"[",
"idfs",
"<",
"0",
"]",
"=",
"0",
"idfs",
"=",
"sp",
".",
"diags",
"(",
"idfs",
",",
"0",
")",
"tfs",
"=",
"cnts",
".",
"log1p",
"(",
")",
"tfidfs",
"=",
"idfs",
".",
"dot",
"(",
"tfs",
")",
"return",
"tfidfs"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_tfidf.py#L169-L183 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_tfidf.py | python | get_doc_freqs | (cnts) | return freqs | Return word --> # of docs it appears in. | Return word --> # of docs it appears in. | [
"Return",
"word",
"--",
">",
"#",
"of",
"docs",
"it",
"appears",
"in",
"."
] | def get_doc_freqs(cnts):
"""Return word --> # of docs it appears in."""
binary = (cnts > 0).astype(int)
freqs = np.array(binary.sum(1)).squeeze()
return freqs | [
"def",
"get_doc_freqs",
"(",
"cnts",
")",
":",
"binary",
"=",
"(",
"cnts",
">",
"0",
")",
".",
"astype",
"(",
"int",
")",
"freqs",
"=",
"np",
".",
"array",
"(",
"binary",
".",
"sum",
"(",
"1",
")",
")",
".",
"squeeze",
"(",
")",
"return",
"freqs"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_tfidf.py#L186-L190 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/doc_db.py | python | DocDB.close | (self) | Close the connection to the database. | Close the connection to the database. | [
"Close",
"the",
"connection",
"to",
"the",
"database",
"."
] | def close(self):
"""Close the connection to the database."""
self.connection.close() | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"connection",
".",
"close",
"(",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/doc_db.py#L25-L27 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/doc_db.py | python | DocDB.get_doc_ids | (self) | return results | Fetch all ids of docs stored in the db. | Fetch all ids of docs stored in the db. | [
"Fetch",
"all",
"ids",
"of",
"docs",
"stored",
"in",
"the",
"db",
"."
] | def get_doc_ids(self):
"""Fetch all ids of docs stored in the db."""
cursor = self.connection.cursor()
cursor.execute("SELECT id FROM documents")
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results | [
"def",
"get_doc_ids",
"(",
"self",
")",
":",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"SELECT id FROM documents\"",
")",
"results",
"=",
"[",
"r",
"[",
"0",
"]",
"for",
"r",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"]",
"cursor",
".",
"close",
"(",
")",
"return",
"results"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/doc_db.py#L29-L35 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/doc_db.py | python | DocDB.get_doc_text | (self, doc_id) | return result if result is None else result[0] | Fetch the raw text of the doc for 'doc_id'. | Fetch the raw text of the doc for 'doc_id'. | [
"Fetch",
"the",
"raw",
"text",
"of",
"the",
"doc",
"for",
"doc_id",
"."
] | def get_doc_text(self, doc_id):
"""Fetch the raw text of the doc for 'doc_id'."""
cursor = self.connection.cursor()
cursor.execute(
"SELECT text FROM documents WHERE id = ?",
(doc_id,)
)
result = cursor.fetchone()
cursor.close()
return result if result is None else result[0] | [
"def",
"get_doc_text",
"(",
"self",
",",
"doc_id",
")",
":",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"SELECT text FROM documents WHERE id = ?\"",
",",
"(",
"doc_id",
",",
")",
")",
"result",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"cursor",
".",
"close",
"(",
")",
"return",
"result",
"if",
"result",
"is",
"None",
"else",
"result",
"[",
"0",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/doc_db.py#L37-L46 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/doc_db.py | python | DocDB.get_hyper_linked | (self, doc_id) | return result if (result is None or len(result[0]) == 0) else [normalize(title) for title in result[0].split("\t")] | Fetch the hyper-linked titles of the doc for 'doc_id'. | Fetch the hyper-linked titles of the doc for 'doc_id'. | [
"Fetch",
"the",
"hyper",
"-",
"linked",
"titles",
"of",
"the",
"doc",
"for",
"doc_id",
"."
] | def get_hyper_linked(self, doc_id):
"""Fetch the hyper-linked titles of the doc for 'doc_id'."""
cursor = self.connection.cursor()
cursor.execute(
"SELECT linked_title FROM documents WHERE id = ?",
(doc_id,)
)
result = cursor.fetchone()
cursor.close()
return result if (result is None or len(result[0]) == 0) else [normalize(title) for title in result[0].split("\t")] | [
"def",
"get_hyper_linked",
"(",
"self",
",",
"doc_id",
")",
":",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"SELECT linked_title FROM documents WHERE id = ?\"",
",",
"(",
"doc_id",
",",
")",
")",
"result",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"cursor",
".",
"close",
"(",
")",
"return",
"result",
"if",
"(",
"result",
"is",
"None",
"or",
"len",
"(",
"result",
"[",
"0",
"]",
")",
"==",
"0",
")",
"else",
"[",
"normalize",
"(",
"title",
")",
"for",
"title",
"in",
"result",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\t\"",
")",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/doc_db.py#L48-L57 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/doc_db.py | python | DocDB.get_original_title | (self, doc_id) | return result if result is None else result[0] | Fetch the original title name of the doc. | Fetch the original title name of the doc. | [
"Fetch",
"the",
"original",
"title",
"name",
"of",
"the",
"doc",
"."
] | def get_original_title(self, doc_id):
"""Fetch the original title name of the doc."""
cursor = self.connection.cursor()
cursor.execute(
"SELECT original_title FROM documents WHERE id = ?",
(doc_id,)
)
result = cursor.fetchone()
cursor.close()
return result if result is None else result[0] | [
"def",
"get_original_title",
"(",
"self",
",",
"doc_id",
")",
":",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"SELECT original_title FROM documents WHERE id = ?\"",
",",
"(",
"doc_id",
",",
")",
")",
"result",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"cursor",
".",
"close",
"(",
")",
"return",
"result",
"if",
"result",
"is",
"None",
"else",
"result",
"[",
"0",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/doc_db.py#L59-L68 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/doc_db.py | python | DocDB.get_doc_text_hyper_linked_titles_for_articles | (self, doc_id) | fetch all of the paragraphs with their corresponding hyperlink titles.
e.g.,
>>> paras, links = db.get_doc_text_hyper_linked_titles_for_articles("Tokyo Imperial Palace_0")
>>> paras[2]
'It is built on the site of the old Edo Castle. The total area including the gardens is . During the height of the 1980s Japanese property bubble, the palace grounds were valued by some to be more than the value of all of the real estate in the state of California.'
>>> links[2]
['Edo Castle', 'Japanese asset price bubble', 'Real estate', 'California'] | fetch all of the paragraphs with their corresponding hyperlink titles.
e.g.,
>>> paras, links = db.get_doc_text_hyper_linked_titles_for_articles("Tokyo Imperial Palace_0")
>>> paras[2]
'It is built on the site of the old Edo Castle. The total area including the gardens is . During the height of the 1980s Japanese property bubble, the palace grounds were valued by some to be more than the value of all of the real estate in the state of California.'
>>> links[2]
['Edo Castle', 'Japanese asset price bubble', 'Real estate', 'California'] | [
"fetch",
"all",
"of",
"the",
"paragraphs",
"with",
"their",
"corresponding",
"hyperlink",
"titles",
".",
"e",
".",
"g",
".",
">>>",
"paras",
"links",
"=",
"db",
".",
"get_doc_text_hyper_linked_titles_for_articles",
"(",
"Tokyo",
"Imperial",
"Palace_0",
")",
">>>",
"paras",
"[",
"2",
"]",
"It",
"is",
"built",
"on",
"the",
"site",
"of",
"the",
"old",
"Edo",
"Castle",
".",
"The",
"total",
"area",
"including",
"the",
"gardens",
"is",
".",
"During",
"the",
"height",
"of",
"the",
"1980s",
"Japanese",
"property",
"bubble",
"the",
"palace",
"grounds",
"were",
"valued",
"by",
"some",
"to",
"be",
"more",
"than",
"the",
"value",
"of",
"all",
"of",
"the",
"real",
"estate",
"in",
"the",
"state",
"of",
"California",
".",
">>>",
"links",
"[",
"2",
"]",
"[",
"Edo",
"Castle",
"Japanese",
"asset",
"price",
"bubble",
"Real",
"estate",
"California",
"]"
] | def get_doc_text_hyper_linked_titles_for_articles(self, doc_id):
"""
fetch all of the paragraphs with their corresponding hyperlink titles.
e.g.,
>>> paras, links = db.get_doc_text_hyper_linked_titles_for_articles("Tokyo Imperial Palace_0")
>>> paras[2]
'It is built on the site of the old Edo Castle. The total area including the gardens is . During the height of the 1980s Japanese property bubble, the palace grounds were valued by some to be more than the value of all of the real estate in the state of California.'
>>> links[2]
['Edo Castle', 'Japanese asset price bubble', 'Real estate', 'California']
"""
cursor = self.connection.cursor()
cursor.execute(
"SELECT text FROM documents WHERE id = ?",
(doc_id,)
)
result = cursor.fetchone()
cursor.close()
if result is None:
return [], []
else:
hyper_linked_paragraphs = result[0].split("\n\n")
paragraphs, hyper_linked_titles = [], []
for hyper_linked_paragraph in hyper_linked_paragraphs:
paragraphs.append(remove_tags(hyper_linked_paragraph))
hyper_linked_titles.append([normalize(title) for title in find_hyper_linked_titles(
hyper_linked_paragraph)])
return paragraphs, hyper_linked_titles | [
"def",
"get_doc_text_hyper_linked_titles_for_articles",
"(",
"self",
",",
"doc_id",
")",
":",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"SELECT text FROM documents WHERE id = ?\"",
",",
"(",
"doc_id",
",",
")",
")",
"result",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"cursor",
".",
"close",
"(",
")",
"if",
"result",
"is",
"None",
":",
"return",
"[",
"]",
",",
"[",
"]",
"else",
":",
"hyper_linked_paragraphs",
"=",
"result",
"[",
"0",
"]",
".",
"split",
"(",
"\"\\n\\n\"",
")",
"paragraphs",
",",
"hyper_linked_titles",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"hyper_linked_paragraph",
"in",
"hyper_linked_paragraphs",
":",
"paragraphs",
".",
"append",
"(",
"remove_tags",
"(",
"hyper_linked_paragraph",
")",
")",
"hyper_linked_titles",
".",
"append",
"(",
"[",
"normalize",
"(",
"title",
")",
"for",
"title",
"in",
"find_hyper_linked_titles",
"(",
"hyper_linked_paragraph",
")",
"]",
")",
"return",
"paragraphs",
",",
"hyper_linked_titles"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/doc_db.py#L70-L98 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_db.py | python | import_module | (filename) | return module | Import a module given a full path to the file. | Import a module given a full path to the file. | [
"Import",
"a",
"module",
"given",
"a",
"full",
"path",
"to",
"the",
"file",
"."
] | def import_module(filename):
"""Import a module given a full path to the file."""
spec = importlib.util.spec_from_file_location('doc_filter', filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module | [
"def",
"import_module",
"(",
"filename",
")",
":",
"spec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"'doc_filter'",
",",
"filename",
")",
"module",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"spec",
")",
"spec",
".",
"loader",
".",
"exec_module",
"(",
"module",
")",
"return",
"module"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_db.py#L43-L48 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_db.py | python | iter_files | (path) | Walk through all files located under a root path. | Walk through all files located under a root path. | [
"Walk",
"through",
"all",
"files",
"located",
"under",
"a",
"root",
"path",
"."
] | def iter_files(path):
"""Walk through all files located under a root path."""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for f in filenames:
yield os.path.join(dirpath, f)
else:
raise RuntimeError('Path %s is invalid' % path) | [
"def",
"iter_files",
"(",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"yield",
"path",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"for",
"dirpath",
",",
"_",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"f",
"in",
"filenames",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"dirpath",
",",
"f",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Path %s is invalid'",
"%",
"path",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_db.py#L56-L65 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_db.py | python | get_contents_hotpotqa | (filename) | return documents | Parse the contents of a file. Each line is a JSON encoded document. | Parse the contents of a file. Each line is a JSON encoded document. | [
"Parse",
"the",
"contents",
"of",
"a",
"file",
".",
"Each",
"line",
"is",
"a",
"JSON",
"encoded",
"document",
"."
] | def get_contents_hotpotqa(filename):
"""Parse the contents of a file. Each line is a JSON encoded document."""
global PREPROCESS_FN
documents = []
extracted_items = process_jsonlines_hotpotqa(filename)
for extracted_item in extracted_items:
title = extracted_item["title"]
text = extracted_item["plain_text"]
original_title = extracted_item["original_title"]
hyper_linked_titles = extracted_item["hyper_linked_titles"]
documents.append((title, text,
hyper_linked_titles, original_title))
return documents | [
"def",
"get_contents_hotpotqa",
"(",
"filename",
")",
":",
"global",
"PREPROCESS_FN",
"documents",
"=",
"[",
"]",
"extracted_items",
"=",
"process_jsonlines_hotpotqa",
"(",
"filename",
")",
"for",
"extracted_item",
"in",
"extracted_items",
":",
"title",
"=",
"extracted_item",
"[",
"\"title\"",
"]",
"text",
"=",
"extracted_item",
"[",
"\"plain_text\"",
"]",
"original_title",
"=",
"extracted_item",
"[",
"\"original_title\"",
"]",
"hyper_linked_titles",
"=",
"extracted_item",
"[",
"\"hyper_linked_titles\"",
"]",
"documents",
".",
"append",
"(",
"(",
"title",
",",
"text",
",",
"hyper_linked_titles",
",",
"original_title",
")",
")",
"return",
"documents"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_db.py#L68-L81 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_db.py | python | get_contents | (filename) | return documents | Parse the contents of a file. Each line is a JSON encoded document. | Parse the contents of a file. Each line is a JSON encoded document. | [
"Parse",
"the",
"contents",
"of",
"a",
"file",
".",
"Each",
"line",
"is",
"a",
"JSON",
"encoded",
"document",
"."
] | def get_contents(filename):
"""Parse the contents of a file. Each line is a JSON encoded document."""
global PREPROCESS_FN
documents = []
extracted_items = process_jsonlines(filename)
for extracted_item in extracted_items:
title = extracted_item["title"]
text = extracted_item["plain_text"]
original_title = extracted_item["original_title"]
hyper_linked_titles = extracted_item["hyper_linked_titles"]
documents.append((title, text,
hyper_linked_titles, original_title))
return documents | [
"def",
"get_contents",
"(",
"filename",
")",
":",
"global",
"PREPROCESS_FN",
"documents",
"=",
"[",
"]",
"extracted_items",
"=",
"process_jsonlines",
"(",
"filename",
")",
"for",
"extracted_item",
"in",
"extracted_items",
":",
"title",
"=",
"extracted_item",
"[",
"\"title\"",
"]",
"text",
"=",
"extracted_item",
"[",
"\"plain_text\"",
"]",
"original_title",
"=",
"extracted_item",
"[",
"\"original_title\"",
"]",
"hyper_linked_titles",
"=",
"extracted_item",
"[",
"\"hyper_linked_titles\"",
"]",
"documents",
".",
"append",
"(",
"(",
"title",
",",
"text",
",",
"hyper_linked_titles",
",",
"original_title",
")",
")",
"return",
"documents"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_db.py#L83-L96 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/build_db.py | python | store_contents | (wiki_dir, save_path, preprocess, num_workers=None, hotpotqa_format=False) | Preprocess and store a corpus of documents in sqlite.
Args:
data_path: Root path to directory (or directory of directories) of files
containing json encoded documents (must have `id` and `text` fields).
save_path: Path to output sqlite db.
preprocess: Path to file defining a custom `preprocess` function. Takes
in and outputs a structured doc.
num_workers: Number of parallel processes to use when reading docs. | Preprocess and store a corpus of documents in sqlite. | [
"Preprocess",
"and",
"store",
"a",
"corpus",
"of",
"documents",
"in",
"sqlite",
"."
] | def store_contents(wiki_dir, save_path, preprocess, num_workers=None, hotpotqa_format=False):
"""Preprocess and store a corpus of documents in sqlite.
Args:
data_path: Root path to directory (or directory of directories) of files
containing json encoded documents (must have `id` and `text` fields).
save_path: Path to output sqlite db.
preprocess: Path to file defining a custom `preprocess` function. Takes
in and outputs a structured doc.
num_workers: Number of parallel processes to use when reading docs.
"""
filenames = [f for f in glob.glob(
wiki_dir + "/*/wiki_*", recursive=True) if ".bz2" not in f]
if os.path.isfile(save_path):
raise RuntimeError('%s already exists! Not overwriting.' % save_path)
logger.info('Reading into database...')
conn = sqlite3.connect(save_path)
c = conn.cursor()
c.execute(
"CREATE TABLE documents (id PRIMARY KEY, text, linked_title, original_title);")
workers = ProcessPool(num_workers, initializer=init,
initargs=(preprocess,))
count = 0
# Due to the slight difference of input format between preprocessed HotpotQA wikipedia data and
# the ones by Wikiextractor, we call different functions for data process.
if hotpotqa_format is True:
content_processing_method = get_contents_hotpotqa
else:
content_processing_method = get_contents
with tqdm(total=len(filenames)) as pbar:
for pairs in tqdm(workers.imap_unordered(content_processing_method, filenames)):
count += len(pairs)
c.executemany(
"INSERT OR REPLACE INTO documents VALUES (?,?,?,?)", pairs)
pbar.update()
logger.info('Read %d docs.' % count)
logger.info('Committing...')
conn.commit()
conn.close() | [
"def",
"store_contents",
"(",
"wiki_dir",
",",
"save_path",
",",
"preprocess",
",",
"num_workers",
"=",
"None",
",",
"hotpotqa_format",
"=",
"False",
")",
":",
"filenames",
"=",
"[",
"f",
"for",
"f",
"in",
"glob",
".",
"glob",
"(",
"wiki_dir",
"+",
"\"/*/wiki_*\"",
",",
"recursive",
"=",
"True",
")",
"if",
"\".bz2\"",
"not",
"in",
"f",
"]",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"save_path",
")",
":",
"raise",
"RuntimeError",
"(",
"'%s already exists! Not overwriting.'",
"%",
"save_path",
")",
"logger",
".",
"info",
"(",
"'Reading into database...'",
")",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"save_path",
")",
"c",
"=",
"conn",
".",
"cursor",
"(",
")",
"c",
".",
"execute",
"(",
"\"CREATE TABLE documents (id PRIMARY KEY, text, linked_title, original_title);\"",
")",
"workers",
"=",
"ProcessPool",
"(",
"num_workers",
",",
"initializer",
"=",
"init",
",",
"initargs",
"=",
"(",
"preprocess",
",",
")",
")",
"count",
"=",
"0",
"# Due to the slight difference of input format between preprocessed HotpotQA wikipedia data and ",
"# the ones by Wikiextractor, we call different functions for data process.",
"if",
"hotpotqa_format",
"is",
"True",
":",
"content_processing_method",
"=",
"get_contents_hotpotqa",
"else",
":",
"content_processing_method",
"=",
"get_contents",
"with",
"tqdm",
"(",
"total",
"=",
"len",
"(",
"filenames",
")",
")",
"as",
"pbar",
":",
"for",
"pairs",
"in",
"tqdm",
"(",
"workers",
".",
"imap_unordered",
"(",
"content_processing_method",
",",
"filenames",
")",
")",
":",
"count",
"+=",
"len",
"(",
"pairs",
")",
"c",
".",
"executemany",
"(",
"\"INSERT OR REPLACE INTO documents VALUES (?,?,?,?)\"",
",",
"pairs",
")",
"pbar",
".",
"update",
"(",
")",
"logger",
".",
"info",
"(",
"'Read %d docs.'",
"%",
"count",
")",
"logger",
".",
"info",
"(",
"'Committing...'",
")",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"close",
"(",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/build_db.py#L98-L139 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.__len__ | (self) | return len(self.data) | The number of tokens. | The number of tokens. | [
"The",
"number",
"of",
"tokens",
"."
] | def __len__(self):
"""The number of tokens."""
return len(self.data) | [
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"data",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L32-L34 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.slice | (self, i=None, j=None) | return new_tokens | Return a view of the list of tokens from [i, j). | Return a view of the list of tokens from [i, j). | [
"Return",
"a",
"view",
"of",
"the",
"list",
"of",
"tokens",
"from",
"[",
"i",
"j",
")",
"."
] | def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens | [
"def",
"slice",
"(",
"self",
",",
"i",
"=",
"None",
",",
"j",
"=",
"None",
")",
":",
"new_tokens",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"new_tokens",
".",
"data",
"=",
"self",
".",
"data",
"[",
"i",
":",
"j",
"]",
"return",
"new_tokens"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L36-L40 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.untokenize | (self) | return ''.join([t[self.TEXT_WS] for t in self.data]).strip() | Returns the original text (with whitespace reinserted). | Returns the original text (with whitespace reinserted). | [
"Returns",
"the",
"original",
"text",
"(",
"with",
"whitespace",
"reinserted",
")",
"."
] | def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip() | [
"def",
"untokenize",
"(",
"self",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"t",
"[",
"self",
".",
"TEXT_WS",
"]",
"for",
"t",
"in",
"self",
".",
"data",
"]",
")",
".",
"strip",
"(",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L42-L44 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.words | (self, uncased=False) | Returns a list of the text of each token
Args:
uncased: lower cases text | Returns a list of the text of each token
Args:
uncased: lower cases text | [
"Returns",
"a",
"list",
"of",
"the",
"text",
"of",
"each",
"token",
"Args",
":",
"uncased",
":",
"lower",
"cases",
"text"
] | def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data] | [
"def",
"words",
"(",
"self",
",",
"uncased",
"=",
"False",
")",
":",
"if",
"uncased",
":",
"return",
"[",
"t",
"[",
"self",
".",
"TEXT",
"]",
".",
"lower",
"(",
")",
"for",
"t",
"in",
"self",
".",
"data",
"]",
"else",
":",
"return",
"[",
"t",
"[",
"self",
".",
"TEXT",
"]",
"for",
"t",
"in",
"self",
".",
"data",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L46-L54 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.offsets | (self) | return [t[self.SPAN] for t in self.data] | Returns a list of [start, end) character offsets of each token. | Returns a list of [start, end) character offsets of each token. | [
"Returns",
"a",
"list",
"of",
"[",
"start",
"end",
")",
"character",
"offsets",
"of",
"each",
"token",
"."
] | def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data] | [
"def",
"offsets",
"(",
"self",
")",
":",
"return",
"[",
"t",
"[",
"self",
".",
"SPAN",
"]",
"for",
"t",
"in",
"self",
".",
"data",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L56-L58 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.pos | (self) | return [t[self.POS] for t in self.data] | Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included. | Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included. | [
"Returns",
"a",
"list",
"of",
"part",
"-",
"of",
"-",
"speech",
"tags",
"of",
"each",
"token",
".",
"Returns",
"None",
"if",
"this",
"annotation",
"was",
"not",
"included",
"."
] | def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data] | [
"def",
"pos",
"(",
"self",
")",
":",
"if",
"'pos'",
"not",
"in",
"self",
".",
"annotators",
":",
"return",
"None",
"return",
"[",
"t",
"[",
"self",
".",
"POS",
"]",
"for",
"t",
"in",
"self",
".",
"data",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L60-L66 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.lemmas | (self) | return [t[self.LEMMA] for t in self.data] | Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included. | Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included. | [
"Returns",
"a",
"list",
"of",
"the",
"lemmatized",
"text",
"of",
"each",
"token",
".",
"Returns",
"None",
"if",
"this",
"annotation",
"was",
"not",
"included",
"."
] | def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data] | [
"def",
"lemmas",
"(",
"self",
")",
":",
"if",
"'lemma'",
"not",
"in",
"self",
".",
"annotators",
":",
"return",
"None",
"return",
"[",
"t",
"[",
"self",
".",
"LEMMA",
"]",
"for",
"t",
"in",
"self",
".",
"data",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L68-L74 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.entities | (self) | return [t[self.NER] for t in self.data] | Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included. | Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included. | [
"Returns",
"a",
"list",
"of",
"named",
"-",
"entity",
"-",
"recognition",
"tags",
"of",
"each",
"token",
".",
"Returns",
"None",
"if",
"this",
"annotation",
"was",
"not",
"included",
"."
] | def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data] | [
"def",
"entities",
"(",
"self",
")",
":",
"if",
"'ner'",
"not",
"in",
"self",
".",
"annotators",
":",
"return",
"None",
"return",
"[",
"t",
"[",
"self",
".",
"NER",
"]",
"for",
"t",
"in",
"self",
".",
"data",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L76-L82 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.ngrams | (self, n=1, uncased=False, filter_fn=None, as_strings=True) | return ngrams | Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list | Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list | [
"Returns",
"a",
"list",
"of",
"all",
"ngrams",
"from",
"length",
"1",
"to",
"n",
".",
"Args",
":",
"n",
":",
"upper",
"limit",
"of",
"ngram",
"length",
"uncased",
":",
"lower",
"cases",
"text",
"filter_fn",
":",
"user",
"function",
"that",
"takes",
"in",
"an",
"ngram",
"list",
"and",
"returns",
"True",
"or",
"False",
"to",
"keep",
"or",
"not",
"keep",
"the",
"ngram",
"as_string",
":",
"return",
"the",
"ngram",
"as",
"a",
"string",
"vs",
"list"
] | def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams | [
"def",
"ngrams",
"(",
"self",
",",
"n",
"=",
"1",
",",
"uncased",
"=",
"False",
",",
"filter_fn",
"=",
"None",
",",
"as_strings",
"=",
"True",
")",
":",
"def",
"_skip",
"(",
"gram",
")",
":",
"if",
"not",
"filter_fn",
":",
"return",
"False",
"return",
"filter_fn",
"(",
"gram",
")",
"words",
"=",
"self",
".",
"words",
"(",
"uncased",
")",
"ngrams",
"=",
"[",
"(",
"s",
",",
"e",
"+",
"1",
")",
"for",
"s",
"in",
"range",
"(",
"len",
"(",
"words",
")",
")",
"for",
"e",
"in",
"range",
"(",
"s",
",",
"min",
"(",
"s",
"+",
"n",
",",
"len",
"(",
"words",
")",
")",
")",
"if",
"not",
"_skip",
"(",
"words",
"[",
"s",
":",
"e",
"+",
"1",
"]",
")",
"]",
"# Concatenate into strings",
"if",
"as_strings",
":",
"ngrams",
"=",
"[",
"'{}'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"words",
"[",
"s",
":",
"e",
"]",
")",
")",
"for",
"(",
"s",
",",
"e",
")",
"in",
"ngrams",
"]",
"return",
"ngrams"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L84-L108 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | Tokens.entity_groups | (self) | return groups | Group consecutive entity tokens with the same NER tag. | Group consecutive entity tokens with the same NER tag. | [
"Group",
"consecutive",
"entity",
"tokens",
"with",
"the",
"same",
"NER",
"tag",
"."
] | def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups | [
"def",
"entity_groups",
"(",
"self",
")",
":",
"entities",
"=",
"self",
".",
"entities",
"(",
")",
"if",
"not",
"entities",
":",
"return",
"None",
"non_ent",
"=",
"self",
".",
"opts",
".",
"get",
"(",
"'non_ent'",
",",
"'O'",
")",
"groups",
"=",
"[",
"]",
"idx",
"=",
"0",
"while",
"idx",
"<",
"len",
"(",
"entities",
")",
":",
"ner_tag",
"=",
"entities",
"[",
"idx",
"]",
"# Check for entity tag",
"if",
"ner_tag",
"!=",
"non_ent",
":",
"# Chomp the sequence",
"start",
"=",
"idx",
"while",
"(",
"idx",
"<",
"len",
"(",
"entities",
")",
"and",
"entities",
"[",
"idx",
"]",
"==",
"ner_tag",
")",
":",
"idx",
"+=",
"1",
"groups",
".",
"append",
"(",
"(",
"self",
".",
"slice",
"(",
"start",
",",
"idx",
")",
".",
"untokenize",
"(",
")",
",",
"ner_tag",
")",
")",
"else",
":",
"idx",
"+=",
"1",
"return",
"groups"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L110-L129 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tokenizers.py | python | SimpleTokenizer.__init__ | (self, **kwargs) | Args:
annotators: None or empty set (only tokenizes). | Args:
annotators: None or empty set (only tokenizes). | [
"Args",
":",
"annotators",
":",
"None",
"or",
"empty",
"set",
"(",
"only",
"tokenizes",
")",
"."
] | def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set() | [
"def",
"__init__",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_regexp",
"=",
"regex",
".",
"compile",
"(",
"'(%s)|(%s)'",
"%",
"(",
"self",
".",
"ALPHA_NUM",
",",
"self",
".",
"NON_WS",
")",
",",
"flags",
"=",
"regex",
".",
"IGNORECASE",
"+",
"regex",
".",
"UNICODE",
"+",
"regex",
".",
"MULTILINE",
")",
"if",
"len",
"(",
"kwargs",
".",
"get",
"(",
"'annotators'",
",",
"{",
"}",
")",
")",
">",
"0",
":",
"logger",
".",
"warning",
"(",
"'%s only tokenizes! Skipping annotators: %s'",
"%",
"(",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"kwargs",
".",
"get",
"(",
"'annotators'",
")",
")",
")",
"self",
".",
"annotators",
"=",
"set",
"(",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tokenizers.py#L151-L163 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/utils.py | python | normalize | (text) | return text[0].capitalize() + text[1:] | Resolve different type of unicode encodings / capitarization in HotpotQA data. | Resolve different type of unicode encodings / capitarization in HotpotQA data. | [
"Resolve",
"different",
"type",
"of",
"unicode",
"encodings",
"/",
"capitarization",
"in",
"HotpotQA",
"data",
"."
] | def normalize(text):
"""Resolve different type of unicode encodings / capitarization in HotpotQA data."""
text = unicodedata.normalize('NFD', text)
return text[0].capitalize() + text[1:] | [
"def",
"normalize",
"(",
"text",
")",
":",
"text",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFD'",
",",
"text",
")",
"return",
"text",
"[",
"0",
"]",
".",
"capitalize",
"(",
")",
"+",
"text",
"[",
"1",
":",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/utils.py#L17-L20 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/utils.py | python | process_jsonlines | (filename) | return extracted_items | This is process_jsonlines method for extracted Wikipedia file.
After extracting items by using Wikiextractor (with `--json` and `--links` options),
you will get the files named with wiki_xx, where each line contains the information of each article.
e.g.,
{"id": "316", "url": "https://en.wikipedia.org/wiki?curid=316", "title": "Academy Award for Best Production Design",
"text": "Academy Award for Best Production Design\n\nThe <a href=\"Academy%20Awards\">Academy Award</a> for
Best Production Design recognizes achievement for <a href=\"art%20direction\">art direction</a> \n\n"}
This function takes these input and extract items.
Each article contains one or more than one paragraphs, and each paragraphs are separeated by \n\n. | This is process_jsonlines method for extracted Wikipedia file.
After extracting items by using Wikiextractor (with `--json` and `--links` options),
you will get the files named with wiki_xx, where each line contains the information of each article.
e.g.,
{"id": "316", "url": "https://en.wikipedia.org/wiki?curid=316", "title": "Academy Award for Best Production Design",
"text": "Academy Award for Best Production Design\n\nThe <a href=\"Academy%20Awards\">Academy Award</a> for
Best Production Design recognizes achievement for <a href=\"art%20direction\">art direction</a> \n\n"}
This function takes these input and extract items.
Each article contains one or more than one paragraphs, and each paragraphs are separeated by \n\n. | [
"This",
"is",
"process_jsonlines",
"method",
"for",
"extracted",
"Wikipedia",
"file",
".",
"After",
"extracting",
"items",
"by",
"using",
"Wikiextractor",
"(",
"with",
"--",
"json",
"and",
"--",
"links",
"options",
")",
"you",
"will",
"get",
"the",
"files",
"named",
"with",
"wiki_xx",
"where",
"each",
"line",
"contains",
"the",
"information",
"of",
"each",
"article",
".",
"e",
".",
"g",
".",
"{",
"id",
":",
"316",
"url",
":",
"https",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki?curid",
"=",
"316",
"title",
":",
"Academy",
"Award",
"for",
"Best",
"Production",
"Design",
"text",
":",
"Academy",
"Award",
"for",
"Best",
"Production",
"Design",
"\\",
"n",
"\\",
"nThe",
"<a",
"href",
"=",
"\\",
"Academy%20Awards",
"\\",
">",
"Academy",
"Award<",
"/",
"a",
">",
"for",
"Best",
"Production",
"Design",
"recognizes",
"achievement",
"for",
"<a",
"href",
"=",
"\\",
"art%20direction",
"\\",
">",
"art",
"direction<",
"/",
"a",
">",
"\\",
"n",
"\\",
"n",
"}",
"This",
"function",
"takes",
"these",
"input",
"and",
"extract",
"items",
".",
"Each",
"article",
"contains",
"one",
"or",
"more",
"than",
"one",
"paragraphs",
"and",
"each",
"paragraphs",
"are",
"separeated",
"by",
"\\",
"n",
"\\",
"n",
"."
] | def process_jsonlines(filename):
"""
This is process_jsonlines method for extracted Wikipedia file.
After extracting items by using Wikiextractor (with `--json` and `--links` options),
you will get the files named with wiki_xx, where each line contains the information of each article.
e.g.,
{"id": "316", "url": "https://en.wikipedia.org/wiki?curid=316", "title": "Academy Award for Best Production Design",
"text": "Academy Award for Best Production Design\n\nThe <a href=\"Academy%20Awards\">Academy Award</a> for
Best Production Design recognizes achievement for <a href=\"art%20direction\">art direction</a> \n\n"}
This function takes these input and extract items.
Each article contains one or more than one paragraphs, and each paragraphs are separeated by \n\n.
"""
# item should be nested list
extracted_items = []
with jsonlines.open(filename) as reader:
for obj in reader:
wiki_id = obj["id"]
title = obj["title"]
title_id = make_wiki_id(title, 0)
text_with_links = obj["text"]
hyper_linked_titles_text = ""
# When we consider the whole article as a document unit (e.g., SQuAD Open, Natural Questions Open)
# we'll keep the links with the original articles, and dynamically process and extract the links
# when we process with our selector.
extracted_items.append({"wiki_id": wiki_id, "title": title_id,
"plain_text": text_with_links,
"hyper_linked_titles": hyper_linked_titles_text,
"original_title": title})
return extracted_items | [
"def",
"process_jsonlines",
"(",
"filename",
")",
":",
"# item should be nested list",
"extracted_items",
"=",
"[",
"]",
"with",
"jsonlines",
".",
"open",
"(",
"filename",
")",
"as",
"reader",
":",
"for",
"obj",
"in",
"reader",
":",
"wiki_id",
"=",
"obj",
"[",
"\"id\"",
"]",
"title",
"=",
"obj",
"[",
"\"title\"",
"]",
"title_id",
"=",
"make_wiki_id",
"(",
"title",
",",
"0",
")",
"text_with_links",
"=",
"obj",
"[",
"\"text\"",
"]",
"hyper_linked_titles_text",
"=",
"\"\"",
"# When we consider the whole article as a document unit (e.g., SQuAD Open, Natural Questions Open)",
"# we'll keep the links with the original articles, and dynamically process and extract the links",
"# when we process with our selector.",
"extracted_items",
".",
"append",
"(",
"{",
"\"wiki_id\"",
":",
"wiki_id",
",",
"\"title\"",
":",
"title_id",
",",
"\"plain_text\"",
":",
"text_with_links",
",",
"\"hyper_linked_titles\"",
":",
"hyper_linked_titles_text",
",",
"\"original_title\"",
":",
"title",
"}",
")",
"return",
"extracted_items"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/utils.py#L42-L72 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/utils.py | python | process_jsonlines_hotpotqa | (filename) | return extracted_items | This is process_jsonlines method for intro-only processed_wikipedia file.
The item example:
{"id": "45668011", "url": "https://en.wikipedia.org/wiki?curid=45668011", "title": "Flouch Roundabout",
"text": ["Flouch Roundabout is a roundabout near Penistone, South Yorkshire, England, where the A628 meets the A616."],
"charoffset": [[[0, 6],...]]
"text_with_links" : ["Flouch Roundabout is a roundabout near <a href=\"Penistone\">Penistone</a>,
<a href=\"South%20Yorkshire\">South Yorkshire</a>, England, where the <a href=\"A628%20road\">A628</a>
meets the <a href=\"A616%20road\">A616</a>."],
"charoffset_with_links": [[[0, 6], ... [213, 214]]]} | This is process_jsonlines method for intro-only processed_wikipedia file.
The item example:
{"id": "45668011", "url": "https://en.wikipedia.org/wiki?curid=45668011", "title": "Flouch Roundabout",
"text": ["Flouch Roundabout is a roundabout near Penistone, South Yorkshire, England, where the A628 meets the A616."],
"charoffset": [[[0, 6],...]]
"text_with_links" : ["Flouch Roundabout is a roundabout near <a href=\"Penistone\">Penistone</a>,
<a href=\"South%20Yorkshire\">South Yorkshire</a>, England, where the <a href=\"A628%20road\">A628</a>
meets the <a href=\"A616%20road\">A616</a>."],
"charoffset_with_links": [[[0, 6], ... [213, 214]]]} | [
"This",
"is",
"process_jsonlines",
"method",
"for",
"intro",
"-",
"only",
"processed_wikipedia",
"file",
".",
"The",
"item",
"example",
":",
"{",
"id",
":",
"45668011",
"url",
":",
"https",
":",
"//",
"en",
".",
"wikipedia",
".",
"org",
"/",
"wiki?curid",
"=",
"45668011",
"title",
":",
"Flouch",
"Roundabout",
"text",
":",
"[",
"Flouch",
"Roundabout",
"is",
"a",
"roundabout",
"near",
"Penistone",
"South",
"Yorkshire",
"England",
"where",
"the",
"A628",
"meets",
"the",
"A616",
".",
"]",
"charoffset",
":",
"[[[",
"0",
"6",
"]",
"...",
"]]",
"text_with_links",
":",
"[",
"Flouch",
"Roundabout",
"is",
"a",
"roundabout",
"near",
"<a",
"href",
"=",
"\\",
"Penistone",
"\\",
">",
"Penistone<",
"/",
"a",
">",
"<a",
"href",
"=",
"\\",
"South%20Yorkshire",
"\\",
">",
"South",
"Yorkshire<",
"/",
"a",
">",
"England",
"where",
"the",
"<a",
"href",
"=",
"\\",
"A628%20road",
"\\",
">",
"A628<",
"/",
"a",
">",
"meets",
"the",
"<a",
"href",
"=",
"\\",
"A616%20road",
"\\",
">",
"A616<",
"/",
"a",
">",
".",
"]",
"charoffset_with_links",
":",
"[[[",
"0",
"6",
"]",
"...",
"[",
"213",
"214",
"]]]",
"}"
] | def process_jsonlines_hotpotqa(filename):
"""
This is process_jsonlines method for intro-only processed_wikipedia file.
The item example:
{"id": "45668011", "url": "https://en.wikipedia.org/wiki?curid=45668011", "title": "Flouch Roundabout",
"text": ["Flouch Roundabout is a roundabout near Penistone, South Yorkshire, England, where the A628 meets the A616."],
"charoffset": [[[0, 6],...]]
"text_with_links" : ["Flouch Roundabout is a roundabout near <a href=\"Penistone\">Penistone</a>,
<a href=\"South%20Yorkshire\">South Yorkshire</a>, England, where the <a href=\"A628%20road\">A628</a>
meets the <a href=\"A616%20road\">A616</a>."],
"charoffset_with_links": [[[0, 6], ... [213, 214]]]}
"""
# item should be nested list
extracted_items = []
with jsonlines.open(filename) as reader:
for obj in reader:
wiki_id = obj["id"]
title = obj["title"]
title_id = make_wiki_id(title, 0)
plain_text = "\t".join(obj["text"])
text_with_links = "\t".join(obj["text_with_links"])
hyper_linked_titles = []
hyper_linked_titles = find_hyper_linked_titles(text_with_links)
if len(hyper_linked_titles) > 0:
hyper_linked_titles_text = "\t".join(hyper_linked_titles)
else:
hyper_linked_titles_text = ""
extracted_items.append({"wiki_id": wiki_id, "title": title_id,
"plain_text": plain_text,
"hyper_linked_titles": hyper_linked_titles_text,
"original_title": title})
return extracted_items | [
"def",
"process_jsonlines_hotpotqa",
"(",
"filename",
")",
":",
"# item should be nested list",
"extracted_items",
"=",
"[",
"]",
"with",
"jsonlines",
".",
"open",
"(",
"filename",
")",
"as",
"reader",
":",
"for",
"obj",
"in",
"reader",
":",
"wiki_id",
"=",
"obj",
"[",
"\"id\"",
"]",
"title",
"=",
"obj",
"[",
"\"title\"",
"]",
"title_id",
"=",
"make_wiki_id",
"(",
"title",
",",
"0",
")",
"plain_text",
"=",
"\"\\t\"",
".",
"join",
"(",
"obj",
"[",
"\"text\"",
"]",
")",
"text_with_links",
"=",
"\"\\t\"",
".",
"join",
"(",
"obj",
"[",
"\"text_with_links\"",
"]",
")",
"hyper_linked_titles",
"=",
"[",
"]",
"hyper_linked_titles",
"=",
"find_hyper_linked_titles",
"(",
"text_with_links",
")",
"if",
"len",
"(",
"hyper_linked_titles",
")",
">",
"0",
":",
"hyper_linked_titles_text",
"=",
"\"\\t\"",
".",
"join",
"(",
"hyper_linked_titles",
")",
"else",
":",
"hyper_linked_titles_text",
"=",
"\"\"",
"extracted_items",
".",
"append",
"(",
"{",
"\"wiki_id\"",
":",
"wiki_id",
",",
"\"title\"",
":",
"title_id",
",",
"\"plain_text\"",
":",
"plain_text",
",",
"\"hyper_linked_titles\"",
":",
"hyper_linked_titles_text",
",",
"\"original_title\"",
":",
"title",
"}",
")",
"return",
"extracted_items"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/utils.py#L74-L107 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/utils.py | python | hash | (token, num_buckets) | return murmurhash3_32(token, positive=True) % num_buckets | Unsigned 32 bit murmurhash for feature hashing. | Unsigned 32 bit murmurhash for feature hashing. | [
"Unsigned",
"32",
"bit",
"murmurhash",
"for",
"feature",
"hashing",
"."
] | def hash(token, num_buckets):
"""Unsigned 32 bit murmurhash for feature hashing."""
return murmurhash3_32(token, positive=True) % num_buckets | [
"def",
"hash",
"(",
"token",
",",
"num_buckets",
")",
":",
"return",
"murmurhash3_32",
"(",
"token",
",",
"positive",
"=",
"True",
")",
"%",
"num_buckets"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/utils.py#L137-L139 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/utils.py | python | filter_word | (text) | return False | Take out english stopwords, punctuation, and compound endings. | Take out english stopwords, punctuation, and compound endings. | [
"Take",
"out",
"english",
"stopwords",
"punctuation",
"and",
"compound",
"endings",
"."
] | def filter_word(text):
"""Take out english stopwords, punctuation, and compound endings."""
text = normalize(text)
if regex.match(r'^\p{P}+$', text):
return True
if text.lower() in STOPWORDS:
return True
return False | [
"def",
"filter_word",
"(",
"text",
")",
":",
"text",
"=",
"normalize",
"(",
"text",
")",
"if",
"regex",
".",
"match",
"(",
"r'^\\p{P}+$'",
",",
"text",
")",
":",
"return",
"True",
"if",
"text",
".",
"lower",
"(",
")",
"in",
"STOPWORDS",
":",
"return",
"True",
"return",
"False"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/utils.py#L167-L174 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/utils.py | python | filter_ngram | (gram, mode='any') | Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens | Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens | [
"Decide",
"whether",
"to",
"keep",
"or",
"discard",
"an",
"n",
"-",
"gram",
".",
"Args",
":",
"gram",
":",
"list",
"of",
"tokens",
"(",
"length",
"N",
")",
"mode",
":",
"Option",
"to",
"throw",
"out",
"ngram",
"if",
"any",
":",
"any",
"single",
"token",
"passes",
"filter_word",
"all",
":",
"all",
"tokens",
"pass",
"filter_word",
"ends",
":",
"book",
"-",
"ended",
"by",
"filterable",
"tokens"
] | def filter_ngram(gram, mode='any'):
"""Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens
"""
filtered = [filter_word(w) for w in gram]
if mode == 'any':
return any(filtered)
elif mode == 'all':
return all(filtered)
elif mode == 'ends':
return filtered[0] or filtered[-1]
else:
raise ValueError('Invalid mode: %s' % mode) | [
"def",
"filter_ngram",
"(",
"gram",
",",
"mode",
"=",
"'any'",
")",
":",
"filtered",
"=",
"[",
"filter_word",
"(",
"w",
")",
"for",
"w",
"in",
"gram",
"]",
"if",
"mode",
"==",
"'any'",
":",
"return",
"any",
"(",
"filtered",
")",
"elif",
"mode",
"==",
"'all'",
":",
"return",
"all",
"(",
"filtered",
")",
"elif",
"mode",
"==",
"'ends'",
":",
"return",
"filtered",
"[",
"0",
"]",
"or",
"filtered",
"[",
"-",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid mode: %s'",
"%",
"mode",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/utils.py#L177-L194 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/utils.py | python | get_field | (d, field_list) | get the subfield associated to a list of elastic fields
E.g. ['file', 'filename'] to d['file']['filename'] | get the subfield associated to a list of elastic fields
E.g. ['file', 'filename'] to d['file']['filename'] | [
"get",
"the",
"subfield",
"associated",
"to",
"a",
"list",
"of",
"elastic",
"fields",
"E",
".",
"g",
".",
"[",
"file",
"filename",
"]",
"to",
"d",
"[",
"file",
"]",
"[",
"filename",
"]"
] | def get_field(d, field_list):
"""get the subfield associated to a list of elastic fields
E.g. ['file', 'filename'] to d['file']['filename']
"""
if isinstance(field_list, str):
return d[field_list]
else:
idx = d.copy()
for field in field_list:
idx = idx[field]
return idx | [
"def",
"get_field",
"(",
"d",
",",
"field_list",
")",
":",
"if",
"isinstance",
"(",
"field_list",
",",
"str",
")",
":",
"return",
"d",
"[",
"field_list",
"]",
"else",
":",
"idx",
"=",
"d",
".",
"copy",
"(",
")",
"for",
"field",
"in",
"field_list",
":",
"idx",
"=",
"idx",
"[",
"field",
"]",
"return",
"idx"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/utils.py#L197-L207 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/utils.py | python | load_para_and_linked_titles_dict_from_tfidf_id | (tfidf_id, db) | return paras_dict, linked_titles_dict | load paragraphs and hyperlinked titles from DB.
This method is mainly for Natural Questions Open benchmark. | load paragraphs and hyperlinked titles from DB.
This method is mainly for Natural Questions Open benchmark. | [
"load",
"paragraphs",
"and",
"hyperlinked",
"titles",
"from",
"DB",
".",
"This",
"method",
"is",
"mainly",
"for",
"Natural",
"Questions",
"Open",
"benchmark",
"."
] | def load_para_and_linked_titles_dict_from_tfidf_id(tfidf_id, db):
"""
load paragraphs and hyperlinked titles from DB.
This method is mainly for Natural Questions Open benchmark.
"""
# will be fixed in the later version; current tfidf weights use indexed titles as keys.
if "_0" not in tfidf_id:
tfidf_id = "{0}_0".format(tfidf_id)
paras, linked_titles = db.get_doc_text_hyper_linked_titles_for_articles(
tfidf_id)
if len(paras) == 0:
logger.warning("{0} is missing".format(tfidf_id))
return [], []
paras_dict = {}
linked_titles_dict = {}
article_name = tfidf_id.split("_0")[0]
# store the para_dict and linked_titles_dict; skip the first para (title)
for para_idx, (para, linked_title_list) in enumerate(zip(paras[1:], linked_titles[1:])):
paras_dict["{0}_{1}".format(article_name, para_idx)] = para
linked_titles_dict["{0}_{1}".format(
article_name, para_idx)] = linked_title_list
return paras_dict, linked_titles_dict | [
"def",
"load_para_and_linked_titles_dict_from_tfidf_id",
"(",
"tfidf_id",
",",
"db",
")",
":",
"# will be fixed in the later version; current tfidf weights use indexed titles as keys.",
"if",
"\"_0\"",
"not",
"in",
"tfidf_id",
":",
"tfidf_id",
"=",
"\"{0}_0\"",
".",
"format",
"(",
"tfidf_id",
")",
"paras",
",",
"linked_titles",
"=",
"db",
".",
"get_doc_text_hyper_linked_titles_for_articles",
"(",
"tfidf_id",
")",
"if",
"len",
"(",
"paras",
")",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"{0} is missing\"",
".",
"format",
"(",
"tfidf_id",
")",
")",
"return",
"[",
"]",
",",
"[",
"]",
"paras_dict",
"=",
"{",
"}",
"linked_titles_dict",
"=",
"{",
"}",
"article_name",
"=",
"tfidf_id",
".",
"split",
"(",
"\"_0\"",
")",
"[",
"0",
"]",
"# store the para_dict and linked_titles_dict; skip the first para (title)",
"for",
"para_idx",
",",
"(",
"para",
",",
"linked_title_list",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"paras",
"[",
"1",
":",
"]",
",",
"linked_titles",
"[",
"1",
":",
"]",
")",
")",
":",
"paras_dict",
"[",
"\"{0}_{1}\"",
".",
"format",
"(",
"article_name",
",",
"para_idx",
")",
"]",
"=",
"para",
"linked_titles_dict",
"[",
"\"{0}_{1}\"",
".",
"format",
"(",
"article_name",
",",
"para_idx",
")",
"]",
"=",
"linked_title_list",
"return",
"paras_dict",
",",
"linked_titles_dict"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/utils.py#L227-L250 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tfidf_doc_ranker.py | python | TfidfDocRanker.__init__ | (self, tfidf_path=None, strict=True) | Args:
tfidf_path: path to saved model file
strict: fail on empty queries or continue (and return empty result) | Args:
tfidf_path: path to saved model file
strict: fail on empty queries or continue (and return empty result) | [
"Args",
":",
"tfidf_path",
":",
"path",
"to",
"saved",
"model",
"file",
"strict",
":",
"fail",
"on",
"empty",
"queries",
"or",
"continue",
"(",
"and",
"return",
"empty",
"result",
")"
] | def __init__(self, tfidf_path=None, strict=True):
"""
Args:
tfidf_path: path to saved model file
strict: fail on empty queries or continue (and return empty result)
"""
# Load from disk
tfidf_path = tfidf_path
logger.info('Loading %s' % tfidf_path)
matrix, metadata = load_sparse_csr(tfidf_path)
self.doc_mat = matrix
self.ngrams = metadata['ngram']
self.hash_size = metadata['hash_size']
self.tokenizer = SimpleTokenizer()
self.doc_freqs = metadata['doc_freqs'].squeeze()
self.doc_dict = metadata['doc_dict']
self.num_docs = len(self.doc_dict[0])
self.strict = strict | [
"def",
"__init__",
"(",
"self",
",",
"tfidf_path",
"=",
"None",
",",
"strict",
"=",
"True",
")",
":",
"# Load from disk",
"tfidf_path",
"=",
"tfidf_path",
"logger",
".",
"info",
"(",
"'Loading %s'",
"%",
"tfidf_path",
")",
"matrix",
",",
"metadata",
"=",
"load_sparse_csr",
"(",
"tfidf_path",
")",
"self",
".",
"doc_mat",
"=",
"matrix",
"self",
".",
"ngrams",
"=",
"metadata",
"[",
"'ngram'",
"]",
"self",
".",
"hash_size",
"=",
"metadata",
"[",
"'hash_size'",
"]",
"self",
".",
"tokenizer",
"=",
"SimpleTokenizer",
"(",
")",
"self",
".",
"doc_freqs",
"=",
"metadata",
"[",
"'doc_freqs'",
"]",
".",
"squeeze",
"(",
")",
"self",
".",
"doc_dict",
"=",
"metadata",
"[",
"'doc_dict'",
"]",
"self",
".",
"num_docs",
"=",
"len",
"(",
"self",
".",
"doc_dict",
"[",
"0",
"]",
")",
"self",
".",
"strict",
"=",
"strict"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tfidf_doc_ranker.py#L31-L48 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tfidf_doc_ranker.py | python | TfidfDocRanker.get_doc_index | (self, doc_id) | return self.doc_dict[0][doc_id] | Convert doc_id --> doc_index | Convert doc_id --> doc_index | [
"Convert",
"doc_id",
"--",
">",
"doc_index"
] | def get_doc_index(self, doc_id):
"""Convert doc_id --> doc_index"""
return self.doc_dict[0][doc_id] | [
"def",
"get_doc_index",
"(",
"self",
",",
"doc_id",
")",
":",
"return",
"self",
".",
"doc_dict",
"[",
"0",
"]",
"[",
"doc_id",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tfidf_doc_ranker.py#L50-L52 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tfidf_doc_ranker.py | python | TfidfDocRanker.get_doc_id | (self, doc_index) | return self.doc_dict[1][doc_index] | Convert doc_index --> doc_id | Convert doc_index --> doc_id | [
"Convert",
"doc_index",
"--",
">",
"doc_id"
] | def get_doc_id(self, doc_index):
"""Convert doc_index --> doc_id"""
return self.doc_dict[1][doc_index] | [
"def",
"get_doc_id",
"(",
"self",
",",
"doc_index",
")",
":",
"return",
"self",
".",
"doc_dict",
"[",
"1",
"]",
"[",
"doc_index",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tfidf_doc_ranker.py#L54-L56 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tfidf_doc_ranker.py | python | TfidfDocRanker.closest_docs | (self, query, k=1) | return doc_ids, doc_scores | Closest docs by dot product between query and documents
in tfidf weighted word vector space. | Closest docs by dot product between query and documents
in tfidf weighted word vector space. | [
"Closest",
"docs",
"by",
"dot",
"product",
"between",
"query",
"and",
"documents",
"in",
"tfidf",
"weighted",
"word",
"vector",
"space",
"."
] | def closest_docs(self, query, k=1):
"""Closest docs by dot product between query and documents
in tfidf weighted word vector space.
"""
spvec = self.text2spvec(query)
res = spvec * self.doc_mat
if len(res.data) <= k:
o_sort = np.argsort(-res.data)
else:
o = np.argpartition(-res.data, k)[0:k]
o_sort = o[np.argsort(-res.data[o])]
doc_scores = res.data[o_sort]
doc_ids = [self.get_doc_id(i) for i in res.indices[o_sort]]
return doc_ids, doc_scores | [
"def",
"closest_docs",
"(",
"self",
",",
"query",
",",
"k",
"=",
"1",
")",
":",
"spvec",
"=",
"self",
".",
"text2spvec",
"(",
"query",
")",
"res",
"=",
"spvec",
"*",
"self",
".",
"doc_mat",
"if",
"len",
"(",
"res",
".",
"data",
")",
"<=",
"k",
":",
"o_sort",
"=",
"np",
".",
"argsort",
"(",
"-",
"res",
".",
"data",
")",
"else",
":",
"o",
"=",
"np",
".",
"argpartition",
"(",
"-",
"res",
".",
"data",
",",
"k",
")",
"[",
"0",
":",
"k",
"]",
"o_sort",
"=",
"o",
"[",
"np",
".",
"argsort",
"(",
"-",
"res",
".",
"data",
"[",
"o",
"]",
")",
"]",
"doc_scores",
"=",
"res",
".",
"data",
"[",
"o_sort",
"]",
"doc_ids",
"=",
"[",
"self",
".",
"get_doc_id",
"(",
"i",
")",
"for",
"i",
"in",
"res",
".",
"indices",
"[",
"o_sort",
"]",
"]",
"return",
"doc_ids",
",",
"doc_scores"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tfidf_doc_ranker.py#L58-L73 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tfidf_doc_ranker.py | python | TfidfDocRanker.batch_closest_docs | (self, queries, k=1, num_workers=None) | return results | Process a batch of closest_docs requests multithreaded.
Note: we can use plain threads here as scipy is outside of the GIL. | Process a batch of closest_docs requests multithreaded.
Note: we can use plain threads here as scipy is outside of the GIL. | [
"Process",
"a",
"batch",
"of",
"closest_docs",
"requests",
"multithreaded",
".",
"Note",
":",
"we",
"can",
"use",
"plain",
"threads",
"here",
"as",
"scipy",
"is",
"outside",
"of",
"the",
"GIL",
"."
] | def batch_closest_docs(self, queries, k=1, num_workers=None):
"""Process a batch of closest_docs requests multithreaded.
Note: we can use plain threads here as scipy is outside of the GIL.
"""
with ThreadPool(num_workers) as threads:
closest_docs = partial(self.closest_docs, k=k)
results = threads.map(closest_docs, queries)
return results | [
"def",
"batch_closest_docs",
"(",
"self",
",",
"queries",
",",
"k",
"=",
"1",
",",
"num_workers",
"=",
"None",
")",
":",
"with",
"ThreadPool",
"(",
"num_workers",
")",
"as",
"threads",
":",
"closest_docs",
"=",
"partial",
"(",
"self",
".",
"closest_docs",
",",
"k",
"=",
"k",
")",
"results",
"=",
"threads",
".",
"map",
"(",
"closest_docs",
",",
"queries",
")",
"return",
"results"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tfidf_doc_ranker.py#L75-L82 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tfidf_doc_ranker.py | python | TfidfDocRanker.parse | (self, query) | return tokens.ngrams(n=self.ngrams, uncased=True,
filter_fn=filter_ngram) | Parse the query into tokens (either ngrams or tokens). | Parse the query into tokens (either ngrams or tokens). | [
"Parse",
"the",
"query",
"into",
"tokens",
"(",
"either",
"ngrams",
"or",
"tokens",
")",
"."
] | def parse(self, query):
"""Parse the query into tokens (either ngrams or tokens)."""
tokens = self.tokenizer.tokenize(query)
return tokens.ngrams(n=self.ngrams, uncased=True,
filter_fn=filter_ngram) | [
"def",
"parse",
"(",
"self",
",",
"query",
")",
":",
"tokens",
"=",
"self",
".",
"tokenizer",
".",
"tokenize",
"(",
"query",
")",
"return",
"tokens",
".",
"ngrams",
"(",
"n",
"=",
"self",
".",
"ngrams",
",",
"uncased",
"=",
"True",
",",
"filter_fn",
"=",
"filter_ngram",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tfidf_doc_ranker.py#L84-L88 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | retriever/tfidf_doc_ranker.py | python | TfidfDocRanker.text2spvec | (self, query) | return spvec | Create a sparse tfidf-weighted word vector from query.
tfidf = log(tf + 1) * log((N - Nt + 0.5) / (Nt + 0.5)) | Create a sparse tfidf-weighted word vector from query. | [
"Create",
"a",
"sparse",
"tfidf",
"-",
"weighted",
"word",
"vector",
"from",
"query",
"."
] | def text2spvec(self, query):
"""Create a sparse tfidf-weighted word vector from query.
tfidf = log(tf + 1) * log((N - Nt + 0.5) / (Nt + 0.5))
"""
# Get hashed ngrams
# TODO: do we need to have normalize?
words = self.parse(normalize(query))
wids = [hash(w, self.hash_size) for w in words]
if len(wids) == 0:
if self.strict:
raise RuntimeError('No valid word in: %s' % query)
else:
logger.warning('No valid word in: %s' % query)
return sp.csr_matrix((1, self.hash_size))
# Count TF
wids_unique, wids_counts = np.unique(wids, return_counts=True)
tfs = np.log1p(wids_counts)
# Count IDF
Ns = self.doc_freqs[wids_unique]
idfs = np.log((self.num_docs - Ns + 0.5) / (Ns + 0.5))
idfs[idfs < 0] = 0
# TF-IDF
data = np.multiply(tfs, idfs)
# One row, sparse csr matrix
indptr = np.array([0, len(wids_unique)])
spvec = sp.csr_matrix(
(data, wids_unique, indptr), shape=(1, self.hash_size)
)
return spvec | [
"def",
"text2spvec",
"(",
"self",
",",
"query",
")",
":",
"# Get hashed ngrams",
"# TODO: do we need to have normalize?",
"words",
"=",
"self",
".",
"parse",
"(",
"normalize",
"(",
"query",
")",
")",
"wids",
"=",
"[",
"hash",
"(",
"w",
",",
"self",
".",
"hash_size",
")",
"for",
"w",
"in",
"words",
"]",
"if",
"len",
"(",
"wids",
")",
"==",
"0",
":",
"if",
"self",
".",
"strict",
":",
"raise",
"RuntimeError",
"(",
"'No valid word in: %s'",
"%",
"query",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'No valid word in: %s'",
"%",
"query",
")",
"return",
"sp",
".",
"csr_matrix",
"(",
"(",
"1",
",",
"self",
".",
"hash_size",
")",
")",
"# Count TF",
"wids_unique",
",",
"wids_counts",
"=",
"np",
".",
"unique",
"(",
"wids",
",",
"return_counts",
"=",
"True",
")",
"tfs",
"=",
"np",
".",
"log1p",
"(",
"wids_counts",
")",
"# Count IDF",
"Ns",
"=",
"self",
".",
"doc_freqs",
"[",
"wids_unique",
"]",
"idfs",
"=",
"np",
".",
"log",
"(",
"(",
"self",
".",
"num_docs",
"-",
"Ns",
"+",
"0.5",
")",
"/",
"(",
"Ns",
"+",
"0.5",
")",
")",
"idfs",
"[",
"idfs",
"<",
"0",
"]",
"=",
"0",
"# TF-IDF",
"data",
"=",
"np",
".",
"multiply",
"(",
"tfs",
",",
"idfs",
")",
"# One row, sparse csr matrix",
"indptr",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"len",
"(",
"wids_unique",
")",
"]",
")",
"spvec",
"=",
"sp",
".",
"csr_matrix",
"(",
"(",
"data",
",",
"wids_unique",
",",
"indptr",
")",
",",
"shape",
"=",
"(",
"1",
",",
"self",
".",
"hash_size",
")",
")",
"return",
"spvec"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/retriever/tfidf_doc_ranker.py#L90-L125 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | read_squad_examples | (input_file, is_training, version_2_with_negative, max_answer_len=100000, skip_negatives=False) | return examples | Read a SQuAD json file into a list of SquadExample. | Read a SQuAD json file into a list of SquadExample. | [
"Read",
"a",
"SQuAD",
"json",
"file",
"into",
"a",
"list",
"of",
"SquadExample",
"."
] | def read_squad_examples(input_file, is_training, version_2_with_negative, max_answer_len=100000, skip_negatives=False):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = str(qa["id"])
# this is temporary added to see whether reducing the negatives
# improves the performance.
if skip_negatives is True and "_NEGATIVE_" in qas_id:
continue
if "FAKE2" in qas_id:
continue
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if is_impossible is False:
switch = 0
else:
switch = 1
# if (len(qa["answers"]) != 1) and (not is_impossible):
# raise ValueError(
# "For training, each question should have exactly 1 answer.")
if not is_impossible:
answers = qa['answers']
if type(answers) == list:
answer = qa["answers"][0]
else:
answer = answers
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset +
answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
if len(orig_answer_text.split()) > max_answer_len:
logger.info(
"Omitting a long answer: '%s'", orig_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
if len(qa["answers"]) > 0:
answer = qa["answers"][0]
# Make sure that answer text will be preserved for
# yes/no.
if answer["text"] in ["yes", "no"]:
orig_answer_text = answer["text"]
if not is_training:
switch = None
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
switch=switch,
is_impossible=is_impossible)
examples.append(example)
return examples | [
"def",
"read_squad_examples",
"(",
"input_file",
",",
"is_training",
",",
"version_2_with_negative",
",",
"max_answer_len",
"=",
"100000",
",",
"skip_negatives",
"=",
"False",
")",
":",
"with",
"open",
"(",
"input_file",
",",
"\"r\"",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"reader",
":",
"input_data",
"=",
"json",
".",
"load",
"(",
"reader",
")",
"[",
"\"data\"",
"]",
"def",
"is_whitespace",
"(",
"c",
")",
":",
"if",
"c",
"==",
"\" \"",
"or",
"c",
"==",
"\"\\t\"",
"or",
"c",
"==",
"\"\\r\"",
"or",
"c",
"==",
"\"\\n\"",
"or",
"ord",
"(",
"c",
")",
"==",
"0x202F",
":",
"return",
"True",
"return",
"False",
"examples",
"=",
"[",
"]",
"for",
"entry",
"in",
"input_data",
":",
"for",
"paragraph",
"in",
"entry",
"[",
"\"paragraphs\"",
"]",
":",
"paragraph_text",
"=",
"paragraph",
"[",
"\"context\"",
"]",
"doc_tokens",
"=",
"[",
"]",
"char_to_word_offset",
"=",
"[",
"]",
"prev_is_whitespace",
"=",
"True",
"for",
"c",
"in",
"paragraph_text",
":",
"if",
"is_whitespace",
"(",
"c",
")",
":",
"prev_is_whitespace",
"=",
"True",
"else",
":",
"if",
"prev_is_whitespace",
":",
"doc_tokens",
".",
"append",
"(",
"c",
")",
"else",
":",
"doc_tokens",
"[",
"-",
"1",
"]",
"+=",
"c",
"prev_is_whitespace",
"=",
"False",
"char_to_word_offset",
".",
"append",
"(",
"len",
"(",
"doc_tokens",
")",
"-",
"1",
")",
"for",
"qa",
"in",
"paragraph",
"[",
"\"qas\"",
"]",
":",
"qas_id",
"=",
"str",
"(",
"qa",
"[",
"\"id\"",
"]",
")",
"# this is temporary added to see whether reducing the negatives",
"# improves the performance.",
"if",
"skip_negatives",
"is",
"True",
"and",
"\"_NEGATIVE_\"",
"in",
"qas_id",
":",
"continue",
"if",
"\"FAKE2\"",
"in",
"qas_id",
":",
"continue",
"question_text",
"=",
"qa",
"[",
"\"question\"",
"]",
"start_position",
"=",
"None",
"end_position",
"=",
"None",
"orig_answer_text",
"=",
"None",
"is_impossible",
"=",
"False",
"if",
"is_training",
":",
"if",
"version_2_with_negative",
":",
"is_impossible",
"=",
"qa",
"[",
"\"is_impossible\"",
"]",
"if",
"is_impossible",
"is",
"False",
":",
"switch",
"=",
"0",
"else",
":",
"switch",
"=",
"1",
"# if (len(qa[\"answers\"]) != 1) and (not is_impossible):",
"# raise ValueError(",
"# \"For training, each question should have exactly 1 answer.\")",
"if",
"not",
"is_impossible",
":",
"answers",
"=",
"qa",
"[",
"'answers'",
"]",
"if",
"type",
"(",
"answers",
")",
"==",
"list",
":",
"answer",
"=",
"qa",
"[",
"\"answers\"",
"]",
"[",
"0",
"]",
"else",
":",
"answer",
"=",
"answers",
"orig_answer_text",
"=",
"answer",
"[",
"\"text\"",
"]",
"answer_offset",
"=",
"answer",
"[",
"\"answer_start\"",
"]",
"answer_length",
"=",
"len",
"(",
"orig_answer_text",
")",
"start_position",
"=",
"char_to_word_offset",
"[",
"answer_offset",
"]",
"end_position",
"=",
"char_to_word_offset",
"[",
"answer_offset",
"+",
"answer_length",
"-",
"1",
"]",
"# Only add answers where the text can be exactly recovered from the",
"# document. If this CAN'T happen it's likely due to weird Unicode",
"# stuff so we will just skip the example.",
"#",
"# Note that this means for training mode, every example is NOT",
"# guaranteed to be preserved.",
"actual_text",
"=",
"\" \"",
".",
"join",
"(",
"doc_tokens",
"[",
"start_position",
":",
"(",
"end_position",
"+",
"1",
")",
"]",
")",
"cleaned_answer_text",
"=",
"\" \"",
".",
"join",
"(",
"whitespace_tokenize",
"(",
"orig_answer_text",
")",
")",
"if",
"actual_text",
".",
"find",
"(",
"cleaned_answer_text",
")",
"==",
"-",
"1",
":",
"logger",
".",
"warning",
"(",
"\"Could not find answer: '%s' vs. '%s'\"",
",",
"actual_text",
",",
"cleaned_answer_text",
")",
"continue",
"if",
"len",
"(",
"orig_answer_text",
".",
"split",
"(",
")",
")",
">",
"max_answer_len",
":",
"logger",
".",
"info",
"(",
"\"Omitting a long answer: '%s'\"",
",",
"orig_answer_text",
")",
"continue",
"else",
":",
"start_position",
"=",
"-",
"1",
"end_position",
"=",
"-",
"1",
"orig_answer_text",
"=",
"\"\"",
"if",
"len",
"(",
"qa",
"[",
"\"answers\"",
"]",
")",
">",
"0",
":",
"answer",
"=",
"qa",
"[",
"\"answers\"",
"]",
"[",
"0",
"]",
"# Make sure that answer text will be preserved for",
"# yes/no.",
"if",
"answer",
"[",
"\"text\"",
"]",
"in",
"[",
"\"yes\"",
",",
"\"no\"",
"]",
":",
"orig_answer_text",
"=",
"answer",
"[",
"\"text\"",
"]",
"if",
"not",
"is_training",
":",
"switch",
"=",
"None",
"example",
"=",
"SquadExample",
"(",
"qas_id",
"=",
"qas_id",
",",
"question_text",
"=",
"question_text",
",",
"doc_tokens",
"=",
"doc_tokens",
",",
"orig_answer_text",
"=",
"orig_answer_text",
",",
"start_position",
"=",
"start_position",
",",
"end_position",
"=",
"end_position",
",",
"switch",
"=",
"switch",
",",
"is_impossible",
"=",
"is_impossible",
")",
"examples",
".",
"append",
"(",
"example",
")",
"return",
"examples"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L101-L206 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | convert_examples_to_features | (examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True,
quiet=False) | return features | Loads a data file into a list of `InputBatch`s. | Loads a data file into a list of `InputBatch`s. | [
"Loads",
"a",
"data",
"file",
"into",
"a",
"list",
"of",
"InputBatch",
"s",
"."
] | def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True,
quiet=False):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# Query
for token in query_tokens:
tokens.append(token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(sequence_b_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
switch = None
if is_training and not span_is_impossible:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
switch = 1
elif is_training and not span_is_impossible:
switch = 0
# The questions whose ``is_impossible'' are originally True should
# be 1.
if example.is_impossible is True:
switch = 1
if example_index < 20 and not quiet:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logger.info("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(
tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
switch=switch,
is_impossible=span_is_impossible))
unique_id += 1
return features | [
"def",
"convert_examples_to_features",
"(",
"examples",
",",
"tokenizer",
",",
"max_seq_length",
",",
"doc_stride",
",",
"max_query_length",
",",
"is_training",
",",
"cls_token_at_end",
"=",
"False",
",",
"cls_token",
"=",
"'[CLS]'",
",",
"sep_token",
"=",
"'[SEP]'",
",",
"pad_token",
"=",
"0",
",",
"sequence_a_segment_id",
"=",
"0",
",",
"sequence_b_segment_id",
"=",
"1",
",",
"cls_token_segment_id",
"=",
"0",
",",
"pad_token_segment_id",
"=",
"0",
",",
"mask_padding_with_zero",
"=",
"True",
",",
"quiet",
"=",
"False",
")",
":",
"unique_id",
"=",
"1000000000",
"features",
"=",
"[",
"]",
"for",
"(",
"example_index",
",",
"example",
")",
"in",
"enumerate",
"(",
"examples",
")",
":",
"query_tokens",
"=",
"tokenizer",
".",
"tokenize",
"(",
"example",
".",
"question_text",
")",
"if",
"len",
"(",
"query_tokens",
")",
">",
"max_query_length",
":",
"query_tokens",
"=",
"query_tokens",
"[",
"0",
":",
"max_query_length",
"]",
"tok_to_orig_index",
"=",
"[",
"]",
"orig_to_tok_index",
"=",
"[",
"]",
"all_doc_tokens",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"token",
")",
"in",
"enumerate",
"(",
"example",
".",
"doc_tokens",
")",
":",
"orig_to_tok_index",
".",
"append",
"(",
"len",
"(",
"all_doc_tokens",
")",
")",
"sub_tokens",
"=",
"tokenizer",
".",
"tokenize",
"(",
"token",
")",
"for",
"sub_token",
"in",
"sub_tokens",
":",
"tok_to_orig_index",
".",
"append",
"(",
"i",
")",
"all_doc_tokens",
".",
"append",
"(",
"sub_token",
")",
"tok_start_position",
"=",
"None",
"tok_end_position",
"=",
"None",
"if",
"is_training",
"and",
"example",
".",
"is_impossible",
":",
"tok_start_position",
"=",
"-",
"1",
"tok_end_position",
"=",
"-",
"1",
"if",
"is_training",
"and",
"not",
"example",
".",
"is_impossible",
":",
"tok_start_position",
"=",
"orig_to_tok_index",
"[",
"example",
".",
"start_position",
"]",
"if",
"example",
".",
"end_position",
"<",
"len",
"(",
"example",
".",
"doc_tokens",
")",
"-",
"1",
":",
"tok_end_position",
"=",
"orig_to_tok_index",
"[",
"example",
".",
"end_position",
"+",
"1",
"]",
"-",
"1",
"else",
":",
"tok_end_position",
"=",
"len",
"(",
"all_doc_tokens",
")",
"-",
"1",
"(",
"tok_start_position",
",",
"tok_end_position",
")",
"=",
"_improve_answer_span",
"(",
"all_doc_tokens",
",",
"tok_start_position",
",",
"tok_end_position",
",",
"tokenizer",
",",
"example",
".",
"orig_answer_text",
")",
"# The -3 accounts for [CLS], [SEP] and [SEP]",
"max_tokens_for_doc",
"=",
"max_seq_length",
"-",
"len",
"(",
"query_tokens",
")",
"-",
"3",
"# We can have documents that are longer than the maximum sequence length.",
"# To deal with this we do a sliding window approach, where we take chunks",
"# of the up to our max length with a stride of `doc_stride`.",
"_DocSpan",
"=",
"collections",
".",
"namedtuple",
"(",
"# pylint: disable=invalid-name",
"\"DocSpan\"",
",",
"[",
"\"start\"",
",",
"\"length\"",
"]",
")",
"doc_spans",
"=",
"[",
"]",
"start_offset",
"=",
"0",
"while",
"start_offset",
"<",
"len",
"(",
"all_doc_tokens",
")",
":",
"length",
"=",
"len",
"(",
"all_doc_tokens",
")",
"-",
"start_offset",
"if",
"length",
">",
"max_tokens_for_doc",
":",
"length",
"=",
"max_tokens_for_doc",
"doc_spans",
".",
"append",
"(",
"_DocSpan",
"(",
"start",
"=",
"start_offset",
",",
"length",
"=",
"length",
")",
")",
"if",
"start_offset",
"+",
"length",
"==",
"len",
"(",
"all_doc_tokens",
")",
":",
"break",
"start_offset",
"+=",
"min",
"(",
"length",
",",
"doc_stride",
")",
"for",
"(",
"doc_span_index",
",",
"doc_span",
")",
"in",
"enumerate",
"(",
"doc_spans",
")",
":",
"tokens",
"=",
"[",
"]",
"token_to_orig_map",
"=",
"{",
"}",
"token_is_max_context",
"=",
"{",
"}",
"segment_ids",
"=",
"[",
"]",
"p_mask",
"=",
"[",
"]",
"# CLS token at the beginning",
"if",
"not",
"cls_token_at_end",
":",
"tokens",
".",
"append",
"(",
"cls_token",
")",
"segment_ids",
".",
"append",
"(",
"cls_token_segment_id",
")",
"p_mask",
".",
"append",
"(",
"0",
")",
"cls_index",
"=",
"0",
"# Query",
"for",
"token",
"in",
"query_tokens",
":",
"tokens",
".",
"append",
"(",
"token",
")",
"segment_ids",
".",
"append",
"(",
"sequence_a_segment_id",
")",
"p_mask",
".",
"append",
"(",
"1",
")",
"# SEP token",
"tokens",
".",
"append",
"(",
"sep_token",
")",
"segment_ids",
".",
"append",
"(",
"sequence_a_segment_id",
")",
"p_mask",
".",
"append",
"(",
"1",
")",
"# Paragraph",
"for",
"i",
"in",
"range",
"(",
"doc_span",
".",
"length",
")",
":",
"split_token_index",
"=",
"doc_span",
".",
"start",
"+",
"i",
"token_to_orig_map",
"[",
"len",
"(",
"tokens",
")",
"]",
"=",
"tok_to_orig_index",
"[",
"split_token_index",
"]",
"is_max_context",
"=",
"_check_is_max_context",
"(",
"doc_spans",
",",
"doc_span_index",
",",
"split_token_index",
")",
"token_is_max_context",
"[",
"len",
"(",
"tokens",
")",
"]",
"=",
"is_max_context",
"tokens",
".",
"append",
"(",
"all_doc_tokens",
"[",
"split_token_index",
"]",
")",
"segment_ids",
".",
"append",
"(",
"sequence_b_segment_id",
")",
"p_mask",
".",
"append",
"(",
"0",
")",
"paragraph_len",
"=",
"doc_span",
".",
"length",
"# SEP token",
"tokens",
".",
"append",
"(",
"sep_token",
")",
"segment_ids",
".",
"append",
"(",
"sequence_b_segment_id",
")",
"p_mask",
".",
"append",
"(",
"1",
")",
"# CLS token at the end",
"if",
"cls_token_at_end",
":",
"tokens",
".",
"append",
"(",
"cls_token",
")",
"segment_ids",
".",
"append",
"(",
"cls_token_segment_id",
")",
"p_mask",
".",
"append",
"(",
"0",
")",
"cls_index",
"=",
"len",
"(",
"tokens",
")",
"-",
"1",
"# Index of classification token",
"input_ids",
"=",
"tokenizer",
".",
"convert_tokens_to_ids",
"(",
"tokens",
")",
"# The mask has 1 for real tokens and 0 for padding tokens. Only real",
"# tokens are attended to.",
"input_mask",
"=",
"[",
"1",
"if",
"mask_padding_with_zero",
"else",
"0",
"]",
"*",
"len",
"(",
"input_ids",
")",
"# Zero-pad up to the sequence length.",
"while",
"len",
"(",
"input_ids",
")",
"<",
"max_seq_length",
":",
"input_ids",
".",
"append",
"(",
"pad_token",
")",
"input_mask",
".",
"append",
"(",
"0",
"if",
"mask_padding_with_zero",
"else",
"1",
")",
"segment_ids",
".",
"append",
"(",
"pad_token_segment_id",
")",
"p_mask",
".",
"append",
"(",
"1",
")",
"assert",
"len",
"(",
"input_ids",
")",
"==",
"max_seq_length",
"assert",
"len",
"(",
"input_mask",
")",
"==",
"max_seq_length",
"assert",
"len",
"(",
"segment_ids",
")",
"==",
"max_seq_length",
"span_is_impossible",
"=",
"example",
".",
"is_impossible",
"start_position",
"=",
"None",
"end_position",
"=",
"None",
"switch",
"=",
"None",
"if",
"is_training",
"and",
"not",
"span_is_impossible",
":",
"doc_start",
"=",
"doc_span",
".",
"start",
"doc_end",
"=",
"doc_span",
".",
"start",
"+",
"doc_span",
".",
"length",
"-",
"1",
"out_of_span",
"=",
"False",
"if",
"not",
"(",
"tok_start_position",
">=",
"doc_start",
"and",
"tok_end_position",
"<=",
"doc_end",
")",
":",
"out_of_span",
"=",
"True",
"if",
"out_of_span",
":",
"start_position",
"=",
"0",
"end_position",
"=",
"0",
"span_is_impossible",
"=",
"True",
"else",
":",
"doc_offset",
"=",
"len",
"(",
"query_tokens",
")",
"+",
"2",
"start_position",
"=",
"tok_start_position",
"-",
"doc_start",
"+",
"doc_offset",
"end_position",
"=",
"tok_end_position",
"-",
"doc_start",
"+",
"doc_offset",
"if",
"is_training",
"and",
"span_is_impossible",
":",
"start_position",
"=",
"cls_index",
"end_position",
"=",
"cls_index",
"switch",
"=",
"1",
"elif",
"is_training",
"and",
"not",
"span_is_impossible",
":",
"switch",
"=",
"0",
"# The questions whose ``is_impossible'' are originally True should",
"# be 1.",
"if",
"example",
".",
"is_impossible",
"is",
"True",
":",
"switch",
"=",
"1",
"if",
"example_index",
"<",
"20",
"and",
"not",
"quiet",
":",
"logger",
".",
"info",
"(",
"\"*** Example ***\"",
")",
"logger",
".",
"info",
"(",
"\"unique_id: %s\"",
"%",
"(",
"unique_id",
")",
")",
"logger",
".",
"info",
"(",
"\"example_index: %s\"",
"%",
"(",
"example_index",
")",
")",
"logger",
".",
"info",
"(",
"\"doc_span_index: %s\"",
"%",
"(",
"doc_span_index",
")",
")",
"logger",
".",
"info",
"(",
"\"tokens: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"tokens",
")",
")",
"logger",
".",
"info",
"(",
"\"token_to_orig_map: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"\"%d:%d\"",
"%",
"(",
"x",
",",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"token_to_orig_map",
".",
"items",
"(",
")",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"token_is_max_context: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"\"%d:%s\"",
"%",
"(",
"x",
",",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"token_is_max_context",
".",
"items",
"(",
")",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"input_ids: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"input_ids",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"input_mask: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"input_mask",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"segment_ids: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"segment_ids",
"]",
")",
")",
"if",
"is_training",
"and",
"span_is_impossible",
":",
"logger",
".",
"info",
"(",
"\"impossible example\"",
")",
"if",
"is_training",
"and",
"not",
"span_is_impossible",
":",
"answer_text",
"=",
"\" \"",
".",
"join",
"(",
"tokens",
"[",
"start_position",
":",
"(",
"end_position",
"+",
"1",
")",
"]",
")",
"logger",
".",
"info",
"(",
"\"start_position: %d\"",
"%",
"(",
"start_position",
")",
")",
"logger",
".",
"info",
"(",
"\"end_position: %d\"",
"%",
"(",
"end_position",
")",
")",
"logger",
".",
"info",
"(",
"\"answer: %s\"",
"%",
"(",
"answer_text",
")",
")",
"features",
".",
"append",
"(",
"InputFeatures",
"(",
"unique_id",
"=",
"unique_id",
",",
"example_index",
"=",
"example_index",
",",
"doc_span_index",
"=",
"doc_span_index",
",",
"tokens",
"=",
"tokens",
",",
"token_to_orig_map",
"=",
"token_to_orig_map",
",",
"token_is_max_context",
"=",
"token_is_max_context",
",",
"input_ids",
"=",
"input_ids",
",",
"input_mask",
"=",
"input_mask",
",",
"segment_ids",
"=",
"segment_ids",
",",
"cls_index",
"=",
"cls_index",
",",
"p_mask",
"=",
"p_mask",
",",
"paragraph_len",
"=",
"paragraph_len",
",",
"start_position",
"=",
"start_position",
",",
"end_position",
"=",
"end_position",
",",
"switch",
"=",
"switch",
",",
"is_impossible",
"=",
"span_is_impossible",
")",
")",
"unique_id",
"+=",
"1",
"return",
"features"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L209-L420 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | convert_examples_to_features_yes_no | (examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True) | return features | Loads a data file into a list of `InputBatch`s. | Loads a data file into a list of `InputBatch`s. | [
"Loads",
"a",
"data",
"file",
"into",
"a",
"list",
"of",
"InputBatch",
"s",
"."
] | def convert_examples_to_features_yes_no(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0)
# (not sure why...)
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# Query
for token in query_tokens:
tokens.append(token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(sequence_b_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
switch = None
if is_training and not span_is_impossible:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
switch = 1
elif is_training and not span_is_impossible:
switch = 0
# The questions whose ``is_impossible'' are originally True should
# be 1. Change switch to 2 or 3 if the answer is yes/no.
if example.is_impossible is True:
if example.orig_answer_text == "yes":
switch = 2
elif example.orig_answer_text == "no":
switch = 3
else:
switch = 1
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logger.info("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(
tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
switch=switch,
is_impossible=span_is_impossible))
unique_id += 1
return features | [
"def",
"convert_examples_to_features_yes_no",
"(",
"examples",
",",
"tokenizer",
",",
"max_seq_length",
",",
"doc_stride",
",",
"max_query_length",
",",
"is_training",
",",
"cls_token_at_end",
"=",
"False",
",",
"cls_token",
"=",
"'[CLS]'",
",",
"sep_token",
"=",
"'[SEP]'",
",",
"pad_token",
"=",
"0",
",",
"sequence_a_segment_id",
"=",
"0",
",",
"sequence_b_segment_id",
"=",
"1",
",",
"cls_token_segment_id",
"=",
"0",
",",
"pad_token_segment_id",
"=",
"0",
",",
"mask_padding_with_zero",
"=",
"True",
")",
":",
"unique_id",
"=",
"1000000000",
"features",
"=",
"[",
"]",
"for",
"(",
"example_index",
",",
"example",
")",
"in",
"enumerate",
"(",
"examples",
")",
":",
"query_tokens",
"=",
"tokenizer",
".",
"tokenize",
"(",
"example",
".",
"question_text",
")",
"if",
"len",
"(",
"query_tokens",
")",
">",
"max_query_length",
":",
"query_tokens",
"=",
"query_tokens",
"[",
"0",
":",
"max_query_length",
"]",
"tok_to_orig_index",
"=",
"[",
"]",
"orig_to_tok_index",
"=",
"[",
"]",
"all_doc_tokens",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"token",
")",
"in",
"enumerate",
"(",
"example",
".",
"doc_tokens",
")",
":",
"orig_to_tok_index",
".",
"append",
"(",
"len",
"(",
"all_doc_tokens",
")",
")",
"sub_tokens",
"=",
"tokenizer",
".",
"tokenize",
"(",
"token",
")",
"for",
"sub_token",
"in",
"sub_tokens",
":",
"tok_to_orig_index",
".",
"append",
"(",
"i",
")",
"all_doc_tokens",
".",
"append",
"(",
"sub_token",
")",
"tok_start_position",
"=",
"None",
"tok_end_position",
"=",
"None",
"if",
"is_training",
"and",
"example",
".",
"is_impossible",
":",
"tok_start_position",
"=",
"-",
"1",
"tok_end_position",
"=",
"-",
"1",
"if",
"is_training",
"and",
"not",
"example",
".",
"is_impossible",
":",
"tok_start_position",
"=",
"orig_to_tok_index",
"[",
"example",
".",
"start_position",
"]",
"if",
"example",
".",
"end_position",
"<",
"len",
"(",
"example",
".",
"doc_tokens",
")",
"-",
"1",
":",
"tok_end_position",
"=",
"orig_to_tok_index",
"[",
"example",
".",
"end_position",
"+",
"1",
"]",
"-",
"1",
"else",
":",
"tok_end_position",
"=",
"len",
"(",
"all_doc_tokens",
")",
"-",
"1",
"(",
"tok_start_position",
",",
"tok_end_position",
")",
"=",
"_improve_answer_span",
"(",
"all_doc_tokens",
",",
"tok_start_position",
",",
"tok_end_position",
",",
"tokenizer",
",",
"example",
".",
"orig_answer_text",
")",
"# The -3 accounts for [CLS], [SEP] and [SEP]",
"max_tokens_for_doc",
"=",
"max_seq_length",
"-",
"len",
"(",
"query_tokens",
")",
"-",
"3",
"# We can have documents that are longer than the maximum sequence length.",
"# To deal with this we do a sliding window approach, where we take chunks",
"# of the up to our max length with a stride of `doc_stride`.",
"_DocSpan",
"=",
"collections",
".",
"namedtuple",
"(",
"# pylint: disable=invalid-name",
"\"DocSpan\"",
",",
"[",
"\"start\"",
",",
"\"length\"",
"]",
")",
"doc_spans",
"=",
"[",
"]",
"start_offset",
"=",
"0",
"while",
"start_offset",
"<",
"len",
"(",
"all_doc_tokens",
")",
":",
"length",
"=",
"len",
"(",
"all_doc_tokens",
")",
"-",
"start_offset",
"if",
"length",
">",
"max_tokens_for_doc",
":",
"length",
"=",
"max_tokens_for_doc",
"doc_spans",
".",
"append",
"(",
"_DocSpan",
"(",
"start",
"=",
"start_offset",
",",
"length",
"=",
"length",
")",
")",
"if",
"start_offset",
"+",
"length",
"==",
"len",
"(",
"all_doc_tokens",
")",
":",
"break",
"start_offset",
"+=",
"min",
"(",
"length",
",",
"doc_stride",
")",
"for",
"(",
"doc_span_index",
",",
"doc_span",
")",
"in",
"enumerate",
"(",
"doc_spans",
")",
":",
"tokens",
"=",
"[",
"]",
"token_to_orig_map",
"=",
"{",
"}",
"token_is_max_context",
"=",
"{",
"}",
"segment_ids",
"=",
"[",
"]",
"# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)",
"# Original TF implem also keep the classification token (set to 0)",
"# (not sure why...)",
"p_mask",
"=",
"[",
"]",
"# CLS token at the beginning",
"if",
"not",
"cls_token_at_end",
":",
"tokens",
".",
"append",
"(",
"cls_token",
")",
"segment_ids",
".",
"append",
"(",
"cls_token_segment_id",
")",
"p_mask",
".",
"append",
"(",
"0",
")",
"cls_index",
"=",
"0",
"# Query",
"for",
"token",
"in",
"query_tokens",
":",
"tokens",
".",
"append",
"(",
"token",
")",
"segment_ids",
".",
"append",
"(",
"sequence_a_segment_id",
")",
"p_mask",
".",
"append",
"(",
"1",
")",
"# SEP token",
"tokens",
".",
"append",
"(",
"sep_token",
")",
"segment_ids",
".",
"append",
"(",
"sequence_a_segment_id",
")",
"p_mask",
".",
"append",
"(",
"1",
")",
"# Paragraph",
"for",
"i",
"in",
"range",
"(",
"doc_span",
".",
"length",
")",
":",
"split_token_index",
"=",
"doc_span",
".",
"start",
"+",
"i",
"token_to_orig_map",
"[",
"len",
"(",
"tokens",
")",
"]",
"=",
"tok_to_orig_index",
"[",
"split_token_index",
"]",
"is_max_context",
"=",
"_check_is_max_context",
"(",
"doc_spans",
",",
"doc_span_index",
",",
"split_token_index",
")",
"token_is_max_context",
"[",
"len",
"(",
"tokens",
")",
"]",
"=",
"is_max_context",
"tokens",
".",
"append",
"(",
"all_doc_tokens",
"[",
"split_token_index",
"]",
")",
"segment_ids",
".",
"append",
"(",
"sequence_b_segment_id",
")",
"p_mask",
".",
"append",
"(",
"0",
")",
"paragraph_len",
"=",
"doc_span",
".",
"length",
"# SEP token",
"tokens",
".",
"append",
"(",
"sep_token",
")",
"segment_ids",
".",
"append",
"(",
"sequence_b_segment_id",
")",
"p_mask",
".",
"append",
"(",
"1",
")",
"# CLS token at the end",
"if",
"cls_token_at_end",
":",
"tokens",
".",
"append",
"(",
"cls_token",
")",
"segment_ids",
".",
"append",
"(",
"cls_token_segment_id",
")",
"p_mask",
".",
"append",
"(",
"0",
")",
"cls_index",
"=",
"len",
"(",
"tokens",
")",
"-",
"1",
"# Index of classification token",
"input_ids",
"=",
"tokenizer",
".",
"convert_tokens_to_ids",
"(",
"tokens",
")",
"# The mask has 1 for real tokens and 0 for padding tokens. Only real",
"# tokens are attended to.",
"input_mask",
"=",
"[",
"1",
"if",
"mask_padding_with_zero",
"else",
"0",
"]",
"*",
"len",
"(",
"input_ids",
")",
"# Zero-pad up to the sequence length.",
"while",
"len",
"(",
"input_ids",
")",
"<",
"max_seq_length",
":",
"input_ids",
".",
"append",
"(",
"pad_token",
")",
"input_mask",
".",
"append",
"(",
"0",
"if",
"mask_padding_with_zero",
"else",
"1",
")",
"segment_ids",
".",
"append",
"(",
"pad_token_segment_id",
")",
"p_mask",
".",
"append",
"(",
"1",
")",
"assert",
"len",
"(",
"input_ids",
")",
"==",
"max_seq_length",
"assert",
"len",
"(",
"input_mask",
")",
"==",
"max_seq_length",
"assert",
"len",
"(",
"segment_ids",
")",
"==",
"max_seq_length",
"span_is_impossible",
"=",
"example",
".",
"is_impossible",
"start_position",
"=",
"None",
"end_position",
"=",
"None",
"switch",
"=",
"None",
"if",
"is_training",
"and",
"not",
"span_is_impossible",
":",
"doc_start",
"=",
"doc_span",
".",
"start",
"doc_end",
"=",
"doc_span",
".",
"start",
"+",
"doc_span",
".",
"length",
"-",
"1",
"out_of_span",
"=",
"False",
"if",
"not",
"(",
"tok_start_position",
">=",
"doc_start",
"and",
"tok_end_position",
"<=",
"doc_end",
")",
":",
"out_of_span",
"=",
"True",
"if",
"out_of_span",
":",
"start_position",
"=",
"0",
"end_position",
"=",
"0",
"span_is_impossible",
"=",
"True",
"else",
":",
"doc_offset",
"=",
"len",
"(",
"query_tokens",
")",
"+",
"2",
"start_position",
"=",
"tok_start_position",
"-",
"doc_start",
"+",
"doc_offset",
"end_position",
"=",
"tok_end_position",
"-",
"doc_start",
"+",
"doc_offset",
"if",
"is_training",
"and",
"span_is_impossible",
":",
"start_position",
"=",
"cls_index",
"end_position",
"=",
"cls_index",
"switch",
"=",
"1",
"elif",
"is_training",
"and",
"not",
"span_is_impossible",
":",
"switch",
"=",
"0",
"# The questions whose ``is_impossible'' are originally True should",
"# be 1. Change switch to 2 or 3 if the answer is yes/no.",
"if",
"example",
".",
"is_impossible",
"is",
"True",
":",
"if",
"example",
".",
"orig_answer_text",
"==",
"\"yes\"",
":",
"switch",
"=",
"2",
"elif",
"example",
".",
"orig_answer_text",
"==",
"\"no\"",
":",
"switch",
"=",
"3",
"else",
":",
"switch",
"=",
"1",
"if",
"example_index",
"<",
"20",
":",
"logger",
".",
"info",
"(",
"\"*** Example ***\"",
")",
"logger",
".",
"info",
"(",
"\"unique_id: %s\"",
"%",
"(",
"unique_id",
")",
")",
"logger",
".",
"info",
"(",
"\"example_index: %s\"",
"%",
"(",
"example_index",
")",
")",
"logger",
".",
"info",
"(",
"\"doc_span_index: %s\"",
"%",
"(",
"doc_span_index",
")",
")",
"logger",
".",
"info",
"(",
"\"tokens: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"tokens",
")",
")",
"logger",
".",
"info",
"(",
"\"token_to_orig_map: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"\"%d:%d\"",
"%",
"(",
"x",
",",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"token_to_orig_map",
".",
"items",
"(",
")",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"token_is_max_context: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"\"%d:%s\"",
"%",
"(",
"x",
",",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"token_is_max_context",
".",
"items",
"(",
")",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"input_ids: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"input_ids",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"input_mask: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"input_mask",
"]",
")",
")",
"logger",
".",
"info",
"(",
"\"segment_ids: %s\"",
"%",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"segment_ids",
"]",
")",
")",
"if",
"is_training",
"and",
"span_is_impossible",
":",
"logger",
".",
"info",
"(",
"\"impossible example\"",
")",
"if",
"is_training",
"and",
"not",
"span_is_impossible",
":",
"answer_text",
"=",
"\" \"",
".",
"join",
"(",
"tokens",
"[",
"start_position",
":",
"(",
"end_position",
"+",
"1",
")",
"]",
")",
"logger",
".",
"info",
"(",
"\"start_position: %d\"",
"%",
"(",
"start_position",
")",
")",
"logger",
".",
"info",
"(",
"\"end_position: %d\"",
"%",
"(",
"end_position",
")",
")",
"logger",
".",
"info",
"(",
"\"answer: %s\"",
"%",
"(",
"answer_text",
")",
")",
"features",
".",
"append",
"(",
"InputFeatures",
"(",
"unique_id",
"=",
"unique_id",
",",
"example_index",
"=",
"example_index",
",",
"doc_span_index",
"=",
"doc_span_index",
",",
"tokens",
"=",
"tokens",
",",
"token_to_orig_map",
"=",
"token_to_orig_map",
",",
"token_is_max_context",
"=",
"token_is_max_context",
",",
"input_ids",
"=",
"input_ids",
",",
"input_mask",
"=",
"input_mask",
",",
"segment_ids",
"=",
"segment_ids",
",",
"cls_index",
"=",
"cls_index",
",",
"p_mask",
"=",
"p_mask",
",",
"paragraph_len",
"=",
"paragraph_len",
",",
"start_position",
"=",
"start_position",
",",
"end_position",
"=",
"end_position",
",",
"switch",
"=",
"switch",
",",
"is_impossible",
"=",
"span_is_impossible",
")",
")",
"unique_id",
"+=",
"1",
"return",
"features"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L424-L643 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | _improve_answer_span | (doc_tokens, input_start, input_end, tokenizer,
orig_answer_text) | return (input_start, input_end) | Returns tokenized answer spans that better match the annotated answer. | Returns tokenized answer spans that better match the annotated answer. | [
"Returns",
"tokenized",
"answer",
"spans",
"that",
"better",
"match",
"the",
"annotated",
"answer",
"."
] | def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end) | [
"def",
"_improve_answer_span",
"(",
"doc_tokens",
",",
"input_start",
",",
"input_end",
",",
"tokenizer",
",",
"orig_answer_text",
")",
":",
"# The SQuAD annotations are character based. We first project them to",
"# whitespace-tokenized words. But then after WordPiece tokenization, we can",
"# often find a \"better match\". For example:",
"#",
"# Question: What year was John Smith born?",
"# Context: The leader was John Smith (1895-1943).",
"# Answer: 1895",
"#",
"# The original whitespace-tokenized answer will be \"(1895-1943).\". However",
"# after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match",
"# the exact answer, 1895.",
"#",
"# However, this is not always possible. Consider the following:",
"#",
"# Question: What country is the top exporter of electornics?",
"# Context: The Japanese electronics industry is the lagest in the world.",
"# Answer: Japan",
"#",
"# In this case, the annotator chose \"Japan\" as a character sub-span of",
"# the word \"Japanese\". Since our WordPiece tokenizer does not split",
"# \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare",
"# in SQuAD, but does happen.",
"tok_answer_text",
"=",
"\" \"",
".",
"join",
"(",
"tokenizer",
".",
"tokenize",
"(",
"orig_answer_text",
")",
")",
"for",
"new_start",
"in",
"range",
"(",
"input_start",
",",
"input_end",
"+",
"1",
")",
":",
"for",
"new_end",
"in",
"range",
"(",
"input_end",
",",
"new_start",
"-",
"1",
",",
"-",
"1",
")",
":",
"text_span",
"=",
"\" \"",
".",
"join",
"(",
"doc_tokens",
"[",
"new_start",
":",
"(",
"new_end",
"+",
"1",
")",
"]",
")",
"if",
"text_span",
"==",
"tok_answer_text",
":",
"return",
"(",
"new_start",
",",
"new_end",
")",
"return",
"(",
"input_start",
",",
"input_end",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L646-L680 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | _check_is_max_context | (doc_spans, cur_span_index, position) | return cur_span_index == best_span_index | Check if this is the 'max context' doc span for the token. | Check if this is the 'max context' doc span for the token. | [
"Check",
"if",
"this",
"is",
"the",
"max",
"context",
"doc",
"span",
"for",
"the",
"token",
"."
] | def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + \
0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index | [
"def",
"_check_is_max_context",
"(",
"doc_spans",
",",
"cur_span_index",
",",
"position",
")",
":",
"# Because of the sliding window approach taken to scoring documents, a single",
"# token can appear in multiple documents. E.g.",
"# Doc: the man went to the store and bought a gallon of milk",
"# Span A: the man went to the",
"# Span B: to the store and bought",
"# Span C: and bought a gallon of",
"# ...",
"#",
"# Now the word 'bought' will have two scores from spans B and C. We only",
"# want to consider the score with \"maximum context\", which we define as",
"# the *minimum* of its left and right context (the *sum* of left and",
"# right context will always be the same, of course).",
"#",
"# In the example the maximum context for 'bought' would be span C since",
"# it has 1 left context and 3 right context, while span B has 4 left context",
"# and 0 right context.",
"best_score",
"=",
"None",
"best_span_index",
"=",
"None",
"for",
"(",
"span_index",
",",
"doc_span",
")",
"in",
"enumerate",
"(",
"doc_spans",
")",
":",
"end",
"=",
"doc_span",
".",
"start",
"+",
"doc_span",
".",
"length",
"-",
"1",
"if",
"position",
"<",
"doc_span",
".",
"start",
":",
"continue",
"if",
"position",
">",
"end",
":",
"continue",
"num_left_context",
"=",
"position",
"-",
"doc_span",
".",
"start",
"num_right_context",
"=",
"end",
"-",
"position",
"score",
"=",
"min",
"(",
"num_left_context",
",",
"num_right_context",
")",
"+",
"0.01",
"*",
"doc_span",
".",
"length",
"if",
"best_score",
"is",
"None",
"or",
"score",
">",
"best_score",
":",
"best_score",
"=",
"score",
"best_span_index",
"=",
"span_index",
"return",
"cur_span_index",
"==",
"best_span_index"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L683-L718 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | write_predictions_yes_no_no_empty_answer | (all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, verbose_logging,
version_2_with_negative, null_score_diff_threshold, no_masking=False) | Write final predictions to the json file and log-odds of null if needed. | Write final predictions to the json file and log-odds of null if needed. | [
"Write",
"final",
"predictions",
"to",
"the",
"json",
"file",
"and",
"log",
"-",
"odds",
"of",
"null",
"if",
"needed",
"."
] | def write_predictions_yes_no_no_empty_answer(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, verbose_logging,
version_2_with_negative, null_score_diff_threshold, no_masking=False):
"""Write final predictions to the json file and log-odds of null if needed."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
if no_masking is True:
feature_null_score = result.start_logits[0] + \
result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
# if we could have irrelevant answers, get the min score of
# irrelevant
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if no_masking is True:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit", "no_answer_logit", "switch", "switch_logits"])
no_answer_logit = result.switch_logits[1]
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(
pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(
orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(
tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
no_answer_logit=no_answer_logit,
switch=np.argmax(result.switch_logits),
switch_logits=result.switch_logits
))
# if we didn't include the empty option in the n-best, include it
if no_masking is True:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit,
no_answer_logit=no_answer_logit,
switch=np.argmax(result.switch_logits),
switch_logits=result.switch_logits
))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid
# failure.
if no_masking is True:
if len(nbest) == 1:
nbest.insert(0,
_NbestPrediction(text="", start_logit=0.0, end_logit=0.0, no_answer_logit=1.0, switch=1, switch_logits=[0.0, 0.0, 0.0, 0.0]))
else:
if len(nbest) == 0:
nbest.insert(0,
_NbestPrediction(text="", start_logit=0.0, end_logit=0.0, no_answer_logit=1.0, switch=1, switch_logits=[0.0, 0.0, 0.0, 0.0]))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="", start_logit=0.0, end_logit=0.0, no_answer_logit=1.0, switch=1, switch_logits=[0.0, 0.0, 0.0, 0.0]))
assert len(nbest) >= 1
total_scores = []
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
output["no_answer_prob"] = entry.no_answer_logit
output["switch"] = entry.switch
output["switch_scores"] = entry.switch_logits
nbest_json.append(output)
assert len(nbest_json) >= 1
# if the n-best is high enough, pick up no answer.
possible_answers = np.argsort(
nbest_json[0]["switch_scores"])[::-1]
if possible_answers[0] == 1:
all_predictions[example.qas_id] = switch_answers(
possible_answers[1], nbest_json[0]["text"])
else:
all_predictions[example.qas_id] = switch_answers(
possible_answers[0], nbest_json[0]["text"])
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n") | [
"def",
"write_predictions_yes_no_no_empty_answer",
"(",
"all_examples",
",",
"all_features",
",",
"all_results",
",",
"n_best_size",
",",
"max_answer_length",
",",
"do_lower_case",
",",
"output_prediction_file",
",",
"output_nbest_file",
",",
"output_null_log_odds_file",
",",
"verbose_logging",
",",
"version_2_with_negative",
",",
"null_score_diff_threshold",
",",
"no_masking",
"=",
"False",
")",
":",
"logger",
".",
"info",
"(",
"\"Writing predictions to: %s\"",
"%",
"(",
"output_prediction_file",
")",
")",
"logger",
".",
"info",
"(",
"\"Writing nbest to: %s\"",
"%",
"(",
"output_nbest_file",
")",
")",
"example_index_to_features",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"feature",
"in",
"all_features",
":",
"example_index_to_features",
"[",
"feature",
".",
"example_index",
"]",
".",
"append",
"(",
"feature",
")",
"unique_id_to_result",
"=",
"{",
"}",
"for",
"result",
"in",
"all_results",
":",
"unique_id_to_result",
"[",
"result",
".",
"unique_id",
"]",
"=",
"result",
"_PrelimPrediction",
"=",
"collections",
".",
"namedtuple",
"(",
"# pylint: disable=invalid-name",
"\"PrelimPrediction\"",
",",
"[",
"\"feature_index\"",
",",
"\"start_index\"",
",",
"\"end_index\"",
",",
"\"start_logit\"",
",",
"\"end_logit\"",
"]",
")",
"all_predictions",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"all_nbest_json",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"(",
"example_index",
",",
"example",
")",
"in",
"enumerate",
"(",
"all_examples",
")",
":",
"features",
"=",
"example_index_to_features",
"[",
"example_index",
"]",
"prelim_predictions",
"=",
"[",
"]",
"score_null",
"=",
"1000000",
"# large and positive",
"min_null_feature_index",
"=",
"0",
"# the paragraph slice with min null score",
"null_start_logit",
"=",
"0",
"# the start logit at the slice with min null score",
"null_end_logit",
"=",
"0",
"# the end logit at the slice with min null score",
"for",
"(",
"feature_index",
",",
"feature",
")",
"in",
"enumerate",
"(",
"features",
")",
":",
"result",
"=",
"unique_id_to_result",
"[",
"feature",
".",
"unique_id",
"]",
"start_indexes",
"=",
"_get_best_indexes",
"(",
"result",
".",
"start_logits",
",",
"n_best_size",
")",
"end_indexes",
"=",
"_get_best_indexes",
"(",
"result",
".",
"end_logits",
",",
"n_best_size",
")",
"if",
"no_masking",
"is",
"True",
":",
"feature_null_score",
"=",
"result",
".",
"start_logits",
"[",
"0",
"]",
"+",
"result",
".",
"end_logits",
"[",
"0",
"]",
"if",
"feature_null_score",
"<",
"score_null",
":",
"score_null",
"=",
"feature_null_score",
"min_null_feature_index",
"=",
"feature_index",
"null_start_logit",
"=",
"result",
".",
"start_logits",
"[",
"0",
"]",
"null_end_logit",
"=",
"result",
".",
"end_logits",
"[",
"0",
"]",
"# if we could have irrelevant answers, get the min score of",
"# irrelevant",
"for",
"start_index",
"in",
"start_indexes",
":",
"for",
"end_index",
"in",
"end_indexes",
":",
"# We could hypothetically create invalid predictions, e.g., predict",
"# that the start of the span is in the question. We throw out all",
"# invalid predictions.",
"if",
"start_index",
">=",
"len",
"(",
"feature",
".",
"tokens",
")",
":",
"continue",
"if",
"end_index",
">=",
"len",
"(",
"feature",
".",
"tokens",
")",
":",
"continue",
"if",
"start_index",
"not",
"in",
"feature",
".",
"token_to_orig_map",
":",
"continue",
"if",
"end_index",
"not",
"in",
"feature",
".",
"token_to_orig_map",
":",
"continue",
"if",
"not",
"feature",
".",
"token_is_max_context",
".",
"get",
"(",
"start_index",
",",
"False",
")",
":",
"continue",
"if",
"end_index",
"<",
"start_index",
":",
"continue",
"length",
"=",
"end_index",
"-",
"start_index",
"+",
"1",
"if",
"length",
">",
"max_answer_length",
":",
"continue",
"prelim_predictions",
".",
"append",
"(",
"_PrelimPrediction",
"(",
"feature_index",
"=",
"feature_index",
",",
"start_index",
"=",
"start_index",
",",
"end_index",
"=",
"end_index",
",",
"start_logit",
"=",
"result",
".",
"start_logits",
"[",
"start_index",
"]",
",",
"end_logit",
"=",
"result",
".",
"end_logits",
"[",
"end_index",
"]",
")",
")",
"if",
"no_masking",
"is",
"True",
":",
"prelim_predictions",
".",
"append",
"(",
"_PrelimPrediction",
"(",
"feature_index",
"=",
"min_null_feature_index",
",",
"start_index",
"=",
"0",
",",
"end_index",
"=",
"0",
",",
"start_logit",
"=",
"null_start_logit",
",",
"end_logit",
"=",
"null_end_logit",
")",
")",
"prelim_predictions",
"=",
"sorted",
"(",
"prelim_predictions",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
".",
"start_logit",
"+",
"x",
".",
"end_logit",
")",
",",
"reverse",
"=",
"True",
")",
"_NbestPrediction",
"=",
"collections",
".",
"namedtuple",
"(",
"# pylint: disable=invalid-name",
"\"NbestPrediction\"",
",",
"[",
"\"text\"",
",",
"\"start_logit\"",
",",
"\"end_logit\"",
",",
"\"no_answer_logit\"",
",",
"\"switch\"",
",",
"\"switch_logits\"",
"]",
")",
"no_answer_logit",
"=",
"result",
".",
"switch_logits",
"[",
"1",
"]",
"seen_predictions",
"=",
"{",
"}",
"nbest",
"=",
"[",
"]",
"for",
"pred",
"in",
"prelim_predictions",
":",
"if",
"len",
"(",
"nbest",
")",
">=",
"n_best_size",
":",
"break",
"feature",
"=",
"features",
"[",
"pred",
".",
"feature_index",
"]",
"if",
"pred",
".",
"start_index",
">",
"0",
":",
"# this is a non-null prediction",
"tok_tokens",
"=",
"feature",
".",
"tokens",
"[",
"pred",
".",
"start_index",
":",
"(",
"pred",
".",
"end_index",
"+",
"1",
")",
"]",
"orig_doc_start",
"=",
"feature",
".",
"token_to_orig_map",
"[",
"pred",
".",
"start_index",
"]",
"orig_doc_end",
"=",
"feature",
".",
"token_to_orig_map",
"[",
"pred",
".",
"end_index",
"]",
"orig_tokens",
"=",
"example",
".",
"doc_tokens",
"[",
"orig_doc_start",
":",
"(",
"orig_doc_end",
"+",
"1",
")",
"]",
"tok_text",
"=",
"\" \"",
".",
"join",
"(",
"tok_tokens",
")",
"# De-tokenize WordPieces that have been split off.",
"tok_text",
"=",
"tok_text",
".",
"replace",
"(",
"\" ##\"",
",",
"\"\"",
")",
"tok_text",
"=",
"tok_text",
".",
"replace",
"(",
"\"##\"",
",",
"\"\"",
")",
"# Clean whitespace",
"tok_text",
"=",
"tok_text",
".",
"strip",
"(",
")",
"tok_text",
"=",
"\" \"",
".",
"join",
"(",
"tok_text",
".",
"split",
"(",
")",
")",
"orig_text",
"=",
"\" \"",
".",
"join",
"(",
"orig_tokens",
")",
"final_text",
"=",
"get_final_text",
"(",
"tok_text",
",",
"orig_text",
",",
"do_lower_case",
",",
"verbose_logging",
")",
"if",
"final_text",
"in",
"seen_predictions",
":",
"continue",
"seen_predictions",
"[",
"final_text",
"]",
"=",
"True",
"else",
":",
"final_text",
"=",
"\"\"",
"seen_predictions",
"[",
"final_text",
"]",
"=",
"True",
"nbest",
".",
"append",
"(",
"_NbestPrediction",
"(",
"text",
"=",
"final_text",
",",
"start_logit",
"=",
"pred",
".",
"start_logit",
",",
"end_logit",
"=",
"pred",
".",
"end_logit",
",",
"no_answer_logit",
"=",
"no_answer_logit",
",",
"switch",
"=",
"np",
".",
"argmax",
"(",
"result",
".",
"switch_logits",
")",
",",
"switch_logits",
"=",
"result",
".",
"switch_logits",
")",
")",
"# if we didn't include the empty option in the n-best, include it",
"if",
"no_masking",
"is",
"True",
":",
"if",
"\"\"",
"not",
"in",
"seen_predictions",
":",
"nbest",
".",
"append",
"(",
"_NbestPrediction",
"(",
"text",
"=",
"\"\"",
",",
"start_logit",
"=",
"null_start_logit",
",",
"end_logit",
"=",
"null_end_logit",
",",
"no_answer_logit",
"=",
"no_answer_logit",
",",
"switch",
"=",
"np",
".",
"argmax",
"(",
"result",
".",
"switch_logits",
")",
",",
"switch_logits",
"=",
"result",
".",
"switch_logits",
")",
")",
"# In very rare edge cases we could only have single null prediction.",
"# So we just create a nonce prediction in this case to avoid",
"# failure.",
"if",
"no_masking",
"is",
"True",
":",
"if",
"len",
"(",
"nbest",
")",
"==",
"1",
":",
"nbest",
".",
"insert",
"(",
"0",
",",
"_NbestPrediction",
"(",
"text",
"=",
"\"\"",
",",
"start_logit",
"=",
"0.0",
",",
"end_logit",
"=",
"0.0",
",",
"no_answer_logit",
"=",
"1.0",
",",
"switch",
"=",
"1",
",",
"switch_logits",
"=",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
")",
")",
"else",
":",
"if",
"len",
"(",
"nbest",
")",
"==",
"0",
":",
"nbest",
".",
"insert",
"(",
"0",
",",
"_NbestPrediction",
"(",
"text",
"=",
"\"\"",
",",
"start_logit",
"=",
"0.0",
",",
"end_logit",
"=",
"0.0",
",",
"no_answer_logit",
"=",
"1.0",
",",
"switch",
"=",
"1",
",",
"switch_logits",
"=",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
")",
")",
"# In very rare edge cases we could have no valid predictions. So we",
"# just create a nonce prediction in this case to avoid failure.",
"if",
"not",
"nbest",
":",
"nbest",
".",
"append",
"(",
"_NbestPrediction",
"(",
"text",
"=",
"\"\"",
",",
"start_logit",
"=",
"0.0",
",",
"end_logit",
"=",
"0.0",
",",
"no_answer_logit",
"=",
"1.0",
",",
"switch",
"=",
"1",
",",
"switch_logits",
"=",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
")",
")",
"assert",
"len",
"(",
"nbest",
")",
">=",
"1",
"total_scores",
"=",
"[",
"]",
"for",
"entry",
"in",
"nbest",
":",
"total_scores",
".",
"append",
"(",
"entry",
".",
"start_logit",
"+",
"entry",
".",
"end_logit",
")",
"probs",
"=",
"_compute_softmax",
"(",
"total_scores",
")",
"nbest_json",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"entry",
")",
"in",
"enumerate",
"(",
"nbest",
")",
":",
"output",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"output",
"[",
"\"text\"",
"]",
"=",
"entry",
".",
"text",
"output",
"[",
"\"probability\"",
"]",
"=",
"probs",
"[",
"i",
"]",
"output",
"[",
"\"start_logit\"",
"]",
"=",
"entry",
".",
"start_logit",
"output",
"[",
"\"end_logit\"",
"]",
"=",
"entry",
".",
"end_logit",
"output",
"[",
"\"no_answer_prob\"",
"]",
"=",
"entry",
".",
"no_answer_logit",
"output",
"[",
"\"switch\"",
"]",
"=",
"entry",
".",
"switch",
"output",
"[",
"\"switch_scores\"",
"]",
"=",
"entry",
".",
"switch_logits",
"nbest_json",
".",
"append",
"(",
"output",
")",
"assert",
"len",
"(",
"nbest_json",
")",
">=",
"1",
"# if the n-best is high enough, pick up no answer.",
"possible_answers",
"=",
"np",
".",
"argsort",
"(",
"nbest_json",
"[",
"0",
"]",
"[",
"\"switch_scores\"",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"if",
"possible_answers",
"[",
"0",
"]",
"==",
"1",
":",
"all_predictions",
"[",
"example",
".",
"qas_id",
"]",
"=",
"switch_answers",
"(",
"possible_answers",
"[",
"1",
"]",
",",
"nbest_json",
"[",
"0",
"]",
"[",
"\"text\"",
"]",
")",
"else",
":",
"all_predictions",
"[",
"example",
".",
"qas_id",
"]",
"=",
"switch_answers",
"(",
"possible_answers",
"[",
"0",
"]",
",",
"nbest_json",
"[",
"0",
"]",
"[",
"\"text\"",
"]",
")",
"all_nbest_json",
"[",
"example",
".",
"qas_id",
"]",
"=",
"nbest_json",
"with",
"open",
"(",
"output_prediction_file",
",",
"\"w\"",
")",
"as",
"writer",
":",
"writer",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"all_predictions",
",",
"indent",
"=",
"4",
")",
"+",
"\"\\n\"",
")"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L724-L923 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | get_final_text | (pred_text, orig_text, do_lower_case, verbose_logging=False) | return output_text | Project the tokenized prediction back to the original text. | Project the tokenized prediction back to the original text. | [
"Project",
"the",
"tokenized",
"prediction",
"back",
"to",
"the",
"original",
"text",
"."
] | def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text | [
"def",
"get_final_text",
"(",
"pred_text",
",",
"orig_text",
",",
"do_lower_case",
",",
"verbose_logging",
"=",
"False",
")",
":",
"# When we created the data, we kept track of the alignment between original",
"# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So",
"# now `orig_text` contains the span of our original text corresponding to the",
"# span that we predicted.",
"#",
"# However, `orig_text` may contain extra characters that we don't want in",
"# our prediction.",
"#",
"# For example, let's say:",
"# pred_text = steve smith",
"# orig_text = Steve Smith's",
"#",
"# We don't want to return `orig_text` because it contains the extra \"'s\".",
"#",
"# We don't want to return `pred_text` because it's already been normalized",
"# (the SQuAD eval script also does punctuation stripping/lower casing but",
"# our tokenizer does additional normalization like stripping accent",
"# characters).",
"#",
"# What we really want to return is \"Steve Smith\".",
"#",
"# Therefore, we have to apply a semi-complicated alignment heuristic between",
"# `pred_text` and `orig_text` to get a character-to-character alignment. This",
"# can fail in certain cases in which case we just return `orig_text`.",
"def",
"_strip_spaces",
"(",
"text",
")",
":",
"ns_chars",
"=",
"[",
"]",
"ns_to_s_map",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"(",
"i",
",",
"c",
")",
"in",
"enumerate",
"(",
"text",
")",
":",
"if",
"c",
"==",
"\" \"",
":",
"continue",
"ns_to_s_map",
"[",
"len",
"(",
"ns_chars",
")",
"]",
"=",
"i",
"ns_chars",
".",
"append",
"(",
"c",
")",
"ns_text",
"=",
"\"\"",
".",
"join",
"(",
"ns_chars",
")",
"return",
"(",
"ns_text",
",",
"ns_to_s_map",
")",
"# We first tokenize `orig_text`, strip whitespace from the result",
"# and `pred_text`, and check if they are the same length. If they are",
"# NOT the same length, the heuristic has failed. If they are the same",
"# length, we assume the characters are one-to-one aligned.",
"tokenizer",
"=",
"BasicTokenizer",
"(",
"do_lower_case",
"=",
"do_lower_case",
")",
"tok_text",
"=",
"\" \"",
".",
"join",
"(",
"tokenizer",
".",
"tokenize",
"(",
"orig_text",
")",
")",
"start_position",
"=",
"tok_text",
".",
"find",
"(",
"pred_text",
")",
"if",
"start_position",
"==",
"-",
"1",
":",
"if",
"verbose_logging",
":",
"logger",
".",
"info",
"(",
"\"Unable to find text: '%s' in '%s'\"",
"%",
"(",
"pred_text",
",",
"orig_text",
")",
")",
"return",
"orig_text",
"end_position",
"=",
"start_position",
"+",
"len",
"(",
"pred_text",
")",
"-",
"1",
"(",
"orig_ns_text",
",",
"orig_ns_to_s_map",
")",
"=",
"_strip_spaces",
"(",
"orig_text",
")",
"(",
"tok_ns_text",
",",
"tok_ns_to_s_map",
")",
"=",
"_strip_spaces",
"(",
"tok_text",
")",
"if",
"len",
"(",
"orig_ns_text",
")",
"!=",
"len",
"(",
"tok_ns_text",
")",
":",
"if",
"verbose_logging",
":",
"logger",
".",
"info",
"(",
"\"Length not equal after stripping spaces: '%s' vs '%s'\"",
",",
"orig_ns_text",
",",
"tok_ns_text",
")",
"return",
"orig_text",
"# We then project the characters in `pred_text` back to `orig_text` using",
"# the character-to-character alignment.",
"tok_s_to_ns_map",
"=",
"{",
"}",
"for",
"(",
"i",
",",
"tok_index",
")",
"in",
"tok_ns_to_s_map",
".",
"items",
"(",
")",
":",
"tok_s_to_ns_map",
"[",
"tok_index",
"]",
"=",
"i",
"orig_start_position",
"=",
"None",
"if",
"start_position",
"in",
"tok_s_to_ns_map",
":",
"ns_start_position",
"=",
"tok_s_to_ns_map",
"[",
"start_position",
"]",
"if",
"ns_start_position",
"in",
"orig_ns_to_s_map",
":",
"orig_start_position",
"=",
"orig_ns_to_s_map",
"[",
"ns_start_position",
"]",
"if",
"orig_start_position",
"is",
"None",
":",
"if",
"verbose_logging",
":",
"logger",
".",
"info",
"(",
"\"Couldn't map start position\"",
")",
"return",
"orig_text",
"orig_end_position",
"=",
"None",
"if",
"end_position",
"in",
"tok_s_to_ns_map",
":",
"ns_end_position",
"=",
"tok_s_to_ns_map",
"[",
"end_position",
"]",
"if",
"ns_end_position",
"in",
"orig_ns_to_s_map",
":",
"orig_end_position",
"=",
"orig_ns_to_s_map",
"[",
"ns_end_position",
"]",
"if",
"orig_end_position",
"is",
"None",
":",
"if",
"verbose_logging",
":",
"logger",
".",
"info",
"(",
"\"Couldn't map end position\"",
")",
"return",
"orig_text",
"output_text",
"=",
"orig_text",
"[",
"orig_start_position",
":",
"(",
"orig_end_position",
"+",
"1",
")",
"]",
"return",
"output_text"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L1160-L1253 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | _get_best_indexes | (logits, n_best_size) | return best_indexes | Get the n-best logits from a list. | Get the n-best logits from a list. | [
"Get",
"the",
"n",
"-",
"best",
"logits",
"from",
"a",
"list",
"."
] | def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(
enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes | [
"def",
"_get_best_indexes",
"(",
"logits",
",",
"n_best_size",
")",
":",
"index_and_score",
"=",
"sorted",
"(",
"enumerate",
"(",
"logits",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"best_indexes",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"index_and_score",
")",
")",
":",
"if",
"i",
">=",
"n_best_size",
":",
"break",
"best_indexes",
".",
"append",
"(",
"index_and_score",
"[",
"i",
"]",
"[",
"0",
"]",
")",
"return",
"best_indexes"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L1256-L1266 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/rc_utils.py | python | _compute_softmax | (scores) | return probs | Compute softmax probability over raw logits. | Compute softmax probability over raw logits. | [
"Compute",
"softmax",
"probability",
"over",
"raw",
"logits",
"."
] | def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs | [
"def",
"_compute_softmax",
"(",
"scores",
")",
":",
"if",
"not",
"scores",
":",
"return",
"[",
"]",
"max_score",
"=",
"None",
"for",
"score",
"in",
"scores",
":",
"if",
"max_score",
"is",
"None",
"or",
"score",
">",
"max_score",
":",
"max_score",
"=",
"score",
"exp_scores",
"=",
"[",
"]",
"total_sum",
"=",
"0.0",
"for",
"score",
"in",
"scores",
":",
"x",
"=",
"math",
".",
"exp",
"(",
"score",
"-",
"max_score",
")",
"exp_scores",
".",
"append",
"(",
"x",
")",
"total_sum",
"+=",
"x",
"probs",
"=",
"[",
"]",
"for",
"score",
"in",
"exp_scores",
":",
"probs",
".",
"append",
"(",
"score",
"/",
"total_sum",
")",
"return",
"probs"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/rc_utils.py#L1269-L1289 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | reader/modeling_reader.py | python | BERTLayerNorm.__init__ | (self, config, variance_epsilon=1e-12) | Construct a layernorm module in the TF style (epsilon inside the square root). | Construct a layernorm module in the TF style (epsilon inside the square root). | [
"Construct",
"a",
"layernorm",
"module",
"in",
"the",
"TF",
"style",
"(",
"epsilon",
"inside",
"the",
"square",
"root",
")",
"."
] | def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BERTLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon | [
"def",
"__init__",
"(",
"self",
",",
"config",
",",
"variance_epsilon",
"=",
"1e-12",
")",
":",
"super",
"(",
"BERTLayerNorm",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"gamma",
"=",
"nn",
".",
"Parameter",
"(",
"torch",
".",
"ones",
"(",
"config",
".",
"hidden_size",
")",
")",
"self",
".",
"beta",
"=",
"nn",
".",
"Parameter",
"(",
"torch",
".",
"zeros",
"(",
"config",
".",
"hidden_size",
")",
")",
"self",
".",
"variance_epsilon",
"=",
"variance_epsilon"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/reader/modeling_reader.py#L8-L14 |
||
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | pipeline/graph_retriever.py | python | create_examples | (jsn, graph_retriever_config) | return examples | Find the mximum size of the initial context (links are not included) | Find the mximum size of the initial context (links are not included) | [
"Find",
"the",
"mximum",
"size",
"of",
"the",
"initial",
"context",
"(",
"links",
"are",
"not",
"included",
")"
] | def create_examples(jsn, graph_retriever_config):
task = graph_retriever_config.task
examples = []
'''
Find the mximum size of the initial context (links are not included)
'''
graph_retriever_config.max_context_size = 0
for data in jsn:
guid = data['q_id']
question = data['question']
context = data['context'] # {context title: paragraph}
all_linked_paras_dic = {} # {context title: {linked title: paragraph}}
'''
Use TagMe-based context at test time.
'''
if graph_retriever_config.tagme:
assert 'tagged_context' in data
'''
Reformat "tagged_context" if needed (c.f. the "context" case above)
'''
if type(data['tagged_context']) == list:
tagged_context = {c[0]: c[1] for c in data['tagged_context']}
data['tagged_context'] = tagged_context
'''
Append valid paragraphs from "tagged_context" to "context"
'''
for tagged_title in data['tagged_context']:
tagged_text = data['tagged_context'][tagged_title]
if tagged_title not in context and tagged_title is not None and tagged_title.strip() != '' and tagged_text is not None and tagged_text.strip() != '':
context[tagged_title] = tagged_text
'''
Clean "context" by removing invalid paragraphs
'''
removed_keys = []
for title in context:
if title is None or title.strip() == '' or context[title] is None or context[title].strip() == '':
removed_keys.append(title)
for key in removed_keys:
context.pop(key)
all_paras = {}
for title in context:
all_paras[title] = context[title]
if graph_retriever_config.expand_links:
expand_links(context, all_linked_paras_dic, all_paras)
graph_retriever_config.max_context_size = max(graph_retriever_config.max_context_size, len(context))
examples.append(InputExample(guid = guid,
q = question,
c = context,
para_dic = all_linked_paras_dic,
s_g = None, r_g = None, all_r_g = None,
all_paras = all_paras))
return examples | [
"def",
"create_examples",
"(",
"jsn",
",",
"graph_retriever_config",
")",
":",
"task",
"=",
"graph_retriever_config",
".",
"task",
"examples",
"=",
"[",
"]",
"graph_retriever_config",
".",
"max_context_size",
"=",
"0",
"for",
"data",
"in",
"jsn",
":",
"guid",
"=",
"data",
"[",
"'q_id'",
"]",
"question",
"=",
"data",
"[",
"'question'",
"]",
"context",
"=",
"data",
"[",
"'context'",
"]",
"# {context title: paragraph}",
"all_linked_paras_dic",
"=",
"{",
"}",
"# {context title: {linked title: paragraph}}",
"'''\n Use TagMe-based context at test time.\n '''",
"if",
"graph_retriever_config",
".",
"tagme",
":",
"assert",
"'tagged_context'",
"in",
"data",
"'''\n Reformat \"tagged_context\" if needed (c.f. the \"context\" case above)\n '''",
"if",
"type",
"(",
"data",
"[",
"'tagged_context'",
"]",
")",
"==",
"list",
":",
"tagged_context",
"=",
"{",
"c",
"[",
"0",
"]",
":",
"c",
"[",
"1",
"]",
"for",
"c",
"in",
"data",
"[",
"'tagged_context'",
"]",
"}",
"data",
"[",
"'tagged_context'",
"]",
"=",
"tagged_context",
"'''\n Append valid paragraphs from \"tagged_context\" to \"context\"\n '''",
"for",
"tagged_title",
"in",
"data",
"[",
"'tagged_context'",
"]",
":",
"tagged_text",
"=",
"data",
"[",
"'tagged_context'",
"]",
"[",
"tagged_title",
"]",
"if",
"tagged_title",
"not",
"in",
"context",
"and",
"tagged_title",
"is",
"not",
"None",
"and",
"tagged_title",
".",
"strip",
"(",
")",
"!=",
"''",
"and",
"tagged_text",
"is",
"not",
"None",
"and",
"tagged_text",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"context",
"[",
"tagged_title",
"]",
"=",
"tagged_text",
"'''\n Clean \"context\" by removing invalid paragraphs\n '''",
"removed_keys",
"=",
"[",
"]",
"for",
"title",
"in",
"context",
":",
"if",
"title",
"is",
"None",
"or",
"title",
".",
"strip",
"(",
")",
"==",
"''",
"or",
"context",
"[",
"title",
"]",
"is",
"None",
"or",
"context",
"[",
"title",
"]",
".",
"strip",
"(",
")",
"==",
"''",
":",
"removed_keys",
".",
"append",
"(",
"title",
")",
"for",
"key",
"in",
"removed_keys",
":",
"context",
".",
"pop",
"(",
"key",
")",
"all_paras",
"=",
"{",
"}",
"for",
"title",
"in",
"context",
":",
"all_paras",
"[",
"title",
"]",
"=",
"context",
"[",
"title",
"]",
"if",
"graph_retriever_config",
".",
"expand_links",
":",
"expand_links",
"(",
"context",
",",
"all_linked_paras_dic",
",",
"all_paras",
")",
"graph_retriever_config",
".",
"max_context_size",
"=",
"max",
"(",
"graph_retriever_config",
".",
"max_context_size",
",",
"len",
"(",
"context",
")",
")",
"examples",
".",
"append",
"(",
"InputExample",
"(",
"guid",
"=",
"guid",
",",
"q",
"=",
"question",
",",
"c",
"=",
"context",
",",
"para_dic",
"=",
"all_linked_paras_dic",
",",
"s_g",
"=",
"None",
",",
"r_g",
"=",
"None",
",",
"all_r_g",
"=",
"None",
",",
"all_paras",
"=",
"all_paras",
")",
")",
"return",
"examples"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/pipeline/graph_retriever.py#L24-L89 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | pipeline/graph_retriever.py | python | convert_examples_to_features | (examples, max_seq_length, max_para_num, graph_retriever_config, tokenizer) | return features | Loads a data file into a list of `InputBatch`s. | Loads a data file into a list of `InputBatch`s. | [
"Loads",
"a",
"data",
"file",
"into",
"a",
"list",
"of",
"InputBatch",
"s",
"."
] | def convert_examples_to_features(examples, max_seq_length, max_para_num, graph_retriever_config, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
max_para_num = graph_retriever_config.max_context_size
graph_retriever_config.max_para_num = max(graph_retriever_config.max_para_num, max_para_num)
max_steps = graph_retriever_config.max_select_num
DUMMY = [0] * max_seq_length
features = []
for (ex_index, example) in enumerate(examples):
tokens_q = tokenize_question(example.question, tokenizer)
title2index = {}
input_ids = []
input_masks = []
segment_ids = []
titles_list = list(example.context.keys())
for p in titles_list:
if len(input_ids) == max_para_num:
break
if p in title2index:
continue
title2index[p] = len(title2index)
example.title_order.append(p)
p = example.context[p]
input_ids_, input_masks_, segment_ids_ = tokenize_paragraph(p, tokens_q, max_seq_length, tokenizer)
input_ids.append(input_ids_)
input_masks.append(input_masks_)
segment_ids.append(segment_ids_)
num_paragraphs_no_links = len(input_ids)
assert len(input_ids) <= max_para_num
num_paragraphs = len(input_ids)
output_masks = [([1.0] * len(input_ids) + [0.0] * (max_para_num - len(input_ids) + 1)) for _ in range(max_para_num + 2)]
assert len(example.context) == num_paragraphs_no_links
for i in range(len(output_masks[0])):
if i >= num_paragraphs_no_links:
output_masks[0][i] = 0.0
for i in range(len(input_ids)):
output_masks[i+1][i] = 0.0
padding = [DUMMY] * (max_para_num - len(input_ids))
input_ids += padding
input_masks += padding
segment_ids += padding
features.append(
InputFeatures(input_ids=input_ids,
input_masks=input_masks,
segment_ids=segment_ids,
output_masks = output_masks,
num_paragraphs = num_paragraphs,
num_steps = -1,
ex_index = ex_index))
return features | [
"def",
"convert_examples_to_features",
"(",
"examples",
",",
"max_seq_length",
",",
"max_para_num",
",",
"graph_retriever_config",
",",
"tokenizer",
")",
":",
"max_para_num",
"=",
"graph_retriever_config",
".",
"max_context_size",
"graph_retriever_config",
".",
"max_para_num",
"=",
"max",
"(",
"graph_retriever_config",
".",
"max_para_num",
",",
"max_para_num",
")",
"max_steps",
"=",
"graph_retriever_config",
".",
"max_select_num",
"DUMMY",
"=",
"[",
"0",
"]",
"*",
"max_seq_length",
"features",
"=",
"[",
"]",
"for",
"(",
"ex_index",
",",
"example",
")",
"in",
"enumerate",
"(",
"examples",
")",
":",
"tokens_q",
"=",
"tokenize_question",
"(",
"example",
".",
"question",
",",
"tokenizer",
")",
"title2index",
"=",
"{",
"}",
"input_ids",
"=",
"[",
"]",
"input_masks",
"=",
"[",
"]",
"segment_ids",
"=",
"[",
"]",
"titles_list",
"=",
"list",
"(",
"example",
".",
"context",
".",
"keys",
"(",
")",
")",
"for",
"p",
"in",
"titles_list",
":",
"if",
"len",
"(",
"input_ids",
")",
"==",
"max_para_num",
":",
"break",
"if",
"p",
"in",
"title2index",
":",
"continue",
"title2index",
"[",
"p",
"]",
"=",
"len",
"(",
"title2index",
")",
"example",
".",
"title_order",
".",
"append",
"(",
"p",
")",
"p",
"=",
"example",
".",
"context",
"[",
"p",
"]",
"input_ids_",
",",
"input_masks_",
",",
"segment_ids_",
"=",
"tokenize_paragraph",
"(",
"p",
",",
"tokens_q",
",",
"max_seq_length",
",",
"tokenizer",
")",
"input_ids",
".",
"append",
"(",
"input_ids_",
")",
"input_masks",
".",
"append",
"(",
"input_masks_",
")",
"segment_ids",
".",
"append",
"(",
"segment_ids_",
")",
"num_paragraphs_no_links",
"=",
"len",
"(",
"input_ids",
")",
"assert",
"len",
"(",
"input_ids",
")",
"<=",
"max_para_num",
"num_paragraphs",
"=",
"len",
"(",
"input_ids",
")",
"output_masks",
"=",
"[",
"(",
"[",
"1.0",
"]",
"*",
"len",
"(",
"input_ids",
")",
"+",
"[",
"0.0",
"]",
"*",
"(",
"max_para_num",
"-",
"len",
"(",
"input_ids",
")",
"+",
"1",
")",
")",
"for",
"_",
"in",
"range",
"(",
"max_para_num",
"+",
"2",
")",
"]",
"assert",
"len",
"(",
"example",
".",
"context",
")",
"==",
"num_paragraphs_no_links",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"output_masks",
"[",
"0",
"]",
")",
")",
":",
"if",
"i",
">=",
"num_paragraphs_no_links",
":",
"output_masks",
"[",
"0",
"]",
"[",
"i",
"]",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"input_ids",
")",
")",
":",
"output_masks",
"[",
"i",
"+",
"1",
"]",
"[",
"i",
"]",
"=",
"0.0",
"padding",
"=",
"[",
"DUMMY",
"]",
"*",
"(",
"max_para_num",
"-",
"len",
"(",
"input_ids",
")",
")",
"input_ids",
"+=",
"padding",
"input_masks",
"+=",
"padding",
"segment_ids",
"+=",
"padding",
"features",
".",
"append",
"(",
"InputFeatures",
"(",
"input_ids",
"=",
"input_ids",
",",
"input_masks",
"=",
"input_masks",
",",
"segment_ids",
"=",
"segment_ids",
",",
"output_masks",
"=",
"output_masks",
",",
"num_paragraphs",
"=",
"num_paragraphs",
",",
"num_steps",
"=",
"-",
"1",
",",
"ex_index",
"=",
"ex_index",
")",
")",
"return",
"features"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/pipeline/graph_retriever.py#L91-L158 |
|
AkariAsai/learning_to_retrieve_reasoning_paths | a020d52cfbbb7d7fca9fa25361e549c85e81875c | pipeline/tfidf_retriever.py | python | TfidfRetriever.get_article_tfidf_with_hyperlinked_titles | (self, q_id,question, args) | Retrieve articles with their corresponding hyperlinked titles.
Due to efficiency, we sample top k articles, and then sample top l paragraphs from each article.
(so, eventually we get k*l paragraphs with tfidf-based pruning.)
We also store the hyperlinked titles for each paragraph. | Retrieve articles with their corresponding hyperlinked titles.
Due to efficiency, we sample top k articles, and then sample top l paragraphs from each article.
(so, eventually we get k*l paragraphs with tfidf-based pruning.)
We also store the hyperlinked titles for each paragraph. | [
"Retrieve",
"articles",
"with",
"their",
"corresponding",
"hyperlinked",
"titles",
".",
"Due",
"to",
"efficiency",
"we",
"sample",
"top",
"k",
"articles",
"and",
"then",
"sample",
"top",
"l",
"paragraphs",
"from",
"each",
"article",
".",
"(",
"so",
"eventually",
"we",
"get",
"k",
"*",
"l",
"paragraphs",
"with",
"tfidf",
"-",
"based",
"pruning",
".",
")",
"We",
"also",
"store",
"the",
"hyperlinked",
"titles",
"for",
"each",
"paragraph",
"."
] | def get_article_tfidf_with_hyperlinked_titles(self, q_id,question, args):
"""
Retrieve articles with their corresponding hyperlinked titles.
Due to efficiency, we sample top k articles, and then sample top l paragraphs from each article.
(so, eventually we get k*l paragraphs with tfidf-based pruning.)
We also store the hyperlinked titles for each paragraph.
"""
tfidf_limit, pruning_l, prune_after_agg = args.tfidf_limit, args.pruning_l, args.prune_after_agg
doc_names, _ = self.ranker.closest_docs(question, k=tfidf_limit)
context, hyper_linked_titles = self.load_sampled_para_text_and_linked_titles(
doc_names, question, pruning_l, prune_after_agg)
if args.tagme is True and args.tagme_api_key is not None:
# if add TagMe
tagged_context = self.load_sampled_tagged_para_text(
question, pruning_l, args.tagme_api_key)
return [{"question": question,
"context": context,
"tagged_context": tagged_context,
"all_linked_para_title_dic": hyper_linked_titles,
"q_id": q_id}]
else:
return [{"question": question,
"context": context,
"all_linked_para_title_dic": hyper_linked_titles,
"q_id": q_id}] | [
"def",
"get_article_tfidf_with_hyperlinked_titles",
"(",
"self",
",",
"q_id",
",",
"question",
",",
"args",
")",
":",
"tfidf_limit",
",",
"pruning_l",
",",
"prune_after_agg",
"=",
"args",
".",
"tfidf_limit",
",",
"args",
".",
"pruning_l",
",",
"args",
".",
"prune_after_agg",
"doc_names",
",",
"_",
"=",
"self",
".",
"ranker",
".",
"closest_docs",
"(",
"question",
",",
"k",
"=",
"tfidf_limit",
")",
"context",
",",
"hyper_linked_titles",
"=",
"self",
".",
"load_sampled_para_text_and_linked_titles",
"(",
"doc_names",
",",
"question",
",",
"pruning_l",
",",
"prune_after_agg",
")",
"if",
"args",
".",
"tagme",
"is",
"True",
"and",
"args",
".",
"tagme_api_key",
"is",
"not",
"None",
":",
"# if add TagMe",
"tagged_context",
"=",
"self",
".",
"load_sampled_tagged_para_text",
"(",
"question",
",",
"pruning_l",
",",
"args",
".",
"tagme_api_key",
")",
"return",
"[",
"{",
"\"question\"",
":",
"question",
",",
"\"context\"",
":",
"context",
",",
"\"tagged_context\"",
":",
"tagged_context",
",",
"\"all_linked_para_title_dic\"",
":",
"hyper_linked_titles",
",",
"\"q_id\"",
":",
"q_id",
"}",
"]",
"else",
":",
"return",
"[",
"{",
"\"question\"",
":",
"question",
",",
"\"context\"",
":",
"context",
",",
"\"all_linked_para_title_dic\"",
":",
"hyper_linked_titles",
",",
"\"q_id\"",
":",
"q_id",
"}",
"]"
] | https://github.com/AkariAsai/learning_to_retrieve_reasoning_paths/blob/a020d52cfbbb7d7fca9fa25361e549c85e81875c/pipeline/tfidf_retriever.py#L132-L159 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/preprocess.py | python | check_existing_pt_files | (opt) | Checking if there are existing .pt files to avoid tampering | Checking if there are existing .pt files to avoid tampering | [
"Checking",
"if",
"there",
"are",
"existing",
".",
"pt",
"files",
"to",
"avoid",
"tampering"
] | def check_existing_pt_files(opt):
""" Checking if there are existing .pt files to avoid tampering """
# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt
# when training, so check to avoid tampering with existing pt files
# or mixing them up.
for t in ['train', 'valid', 'vocab']:
pattern = opt.save_data + '.' + t + '*.pt'
if glob.glob(pattern):
sys.stderr.write("Please backup existing pt file: %s, "
"to avoid tampering!\n" % pattern)
sys.exit(1) | [
"def",
"check_existing_pt_files",
"(",
"opt",
")",
":",
"# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt",
"# when training, so check to avoid tampering with existing pt files",
"# or mixing them up.",
"for",
"t",
"in",
"[",
"'train'",
",",
"'valid'",
",",
"'vocab'",
"]",
":",
"pattern",
"=",
"opt",
".",
"save_data",
"+",
"'.'",
"+",
"t",
"+",
"'*.pt'",
"if",
"glob",
".",
"glob",
"(",
"pattern",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Please backup existing pt file: %s, \"",
"\"to avoid tampering!\\n\"",
"%",
"pattern",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/preprocess.py#L20-L30 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/preprocess.py | python | parse_args | () | return opt | Parsing arguments | Parsing arguments | [
"Parsing",
"arguments"
] | def parse_args():
""" Parsing arguments """
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.preprocess_opts(parser)
opt = parser.parse_args()
torch.manual_seed(opt.seed)
check_existing_pt_files(opt)
return opt | [
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'preprocess.py'",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"opts",
".",
"add_md_help_argument",
"(",
"parser",
")",
"opts",
".",
"preprocess_opts",
"(",
"parser",
")",
"opt",
"=",
"parser",
".",
"parse_args",
"(",
")",
"torch",
".",
"manual_seed",
"(",
"opt",
".",
"seed",
")",
"check_existing_pt_files",
"(",
"opt",
")",
"return",
"opt"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/preprocess.py#L33-L47 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/preprocess.py | python | build_save_in_shards | (src_corpus, tgt_corpus, fields,
corpus_type, opt) | return ret_list | Divide the big corpus into shards, and build dataset separately.
This is currently only for data_type=='text'.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
To tackle this, we only read in part of the corpus file of size
`max_shard_size`(actually it is multiples of 64 bytes that equals
or is slightly larger than this size), and process it into dataset,
then write it to disk along the way. By doing this, we only focus on
part of the corpus at any moment, thus effectively reducing memory use.
According to test, this method can reduce memory footprint by ~50%.
Note! As we process along the shards, previous shards might still
stay in memory, but since we are done with them, and no more
reference to them, if there is memory tight situation, the OS could
easily reclaim these memory.
If `max_shard_size` is 0 or is larger than the corpus size, it is
effectively preprocessed into one dataset, i.e. no sharding.
NOTE! `max_shard_size` is measuring the input corpus size, not the
output pt file size. So a shard pt file consists of examples of size
2 * `max_shard_size`(source + target). | Divide the big corpus into shards, and build dataset separately.
This is currently only for data_type=='text'. | [
"Divide",
"the",
"big",
"corpus",
"into",
"shards",
"and",
"build",
"dataset",
"separately",
".",
"This",
"is",
"currently",
"only",
"for",
"data_type",
"==",
"text",
"."
] | def build_save_in_shards(src_corpus, tgt_corpus, fields,
corpus_type, opt):
"""
Divide the big corpus into shards, and build dataset separately.
This is currently only for data_type=='text'.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
To tackle this, we only read in part of the corpus file of size
`max_shard_size`(actually it is multiples of 64 bytes that equals
or is slightly larger than this size), and process it into dataset,
then write it to disk along the way. By doing this, we only focus on
part of the corpus at any moment, thus effectively reducing memory use.
According to test, this method can reduce memory footprint by ~50%.
Note! As we process along the shards, previous shards might still
stay in memory, but since we are done with them, and no more
reference to them, if there is memory tight situation, the OS could
easily reclaim these memory.
If `max_shard_size` is 0 or is larger than the corpus size, it is
effectively preprocessed into one dataset, i.e. no sharding.
NOTE! `max_shard_size` is measuring the input corpus size, not the
output pt file size. So a shard pt file consists of examples of size
2 * `max_shard_size`(source + target).
"""
corpus_size = os.path.getsize(src_corpus)
if corpus_size > 10 * (1024 ** 2) and opt.max_shard_size == 0:
logger.info("Warning. The corpus %s is larger than 10M bytes, "
"you can set '-max_shard_size' to process it by "
"small shards to use less memory." % src_corpus)
if opt.max_shard_size != 0:
logger.info(' * divide corpus into shards and build dataset '
'separately (shard_size = %d bytes).'
% opt.max_shard_size)
ret_list = []
src_iter = inputters.ShardedTextCorpusIterator(
src_corpus, opt.src_seq_length_trunc,
"src", opt.max_shard_size)
tgt_iter = inputters.ShardedTextCorpusIterator(
tgt_corpus, opt.tgt_seq_length_trunc,
"tgt", opt.max_shard_size,
assoc_iter=src_iter)
index = 0
while not src_iter.hit_end():
index += 1
dataset = inputters.TextDataset(
fields, src_iter, tgt_iter,
src_iter.num_feats, tgt_iter.num_feats,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
dynamic_dict=opt.dynamic_dict)
# We save fields in vocab.pt separately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.{:d}.pt".format(
opt.save_data, corpus_type, index)
logger.info(" * saving %s data shard to %s."
% (corpus_type, pt_file))
torch.save(dataset, pt_file)
ret_list.append(pt_file)
return ret_list | [
"def",
"build_save_in_shards",
"(",
"src_corpus",
",",
"tgt_corpus",
",",
"fields",
",",
"corpus_type",
",",
"opt",
")",
":",
"corpus_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"src_corpus",
")",
"if",
"corpus_size",
">",
"10",
"*",
"(",
"1024",
"**",
"2",
")",
"and",
"opt",
".",
"max_shard_size",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"\"Warning. The corpus %s is larger than 10M bytes, \"",
"\"you can set '-max_shard_size' to process it by \"",
"\"small shards to use less memory.\"",
"%",
"src_corpus",
")",
"if",
"opt",
".",
"max_shard_size",
"!=",
"0",
":",
"logger",
".",
"info",
"(",
"' * divide corpus into shards and build dataset '",
"'separately (shard_size = %d bytes).'",
"%",
"opt",
".",
"max_shard_size",
")",
"ret_list",
"=",
"[",
"]",
"src_iter",
"=",
"inputters",
".",
"ShardedTextCorpusIterator",
"(",
"src_corpus",
",",
"opt",
".",
"src_seq_length_trunc",
",",
"\"src\"",
",",
"opt",
".",
"max_shard_size",
")",
"tgt_iter",
"=",
"inputters",
".",
"ShardedTextCorpusIterator",
"(",
"tgt_corpus",
",",
"opt",
".",
"tgt_seq_length_trunc",
",",
"\"tgt\"",
",",
"opt",
".",
"max_shard_size",
",",
"assoc_iter",
"=",
"src_iter",
")",
"index",
"=",
"0",
"while",
"not",
"src_iter",
".",
"hit_end",
"(",
")",
":",
"index",
"+=",
"1",
"dataset",
"=",
"inputters",
".",
"TextDataset",
"(",
"fields",
",",
"src_iter",
",",
"tgt_iter",
",",
"src_iter",
".",
"num_feats",
",",
"tgt_iter",
".",
"num_feats",
",",
"src_seq_length",
"=",
"opt",
".",
"src_seq_length",
",",
"tgt_seq_length",
"=",
"opt",
".",
"tgt_seq_length",
",",
"dynamic_dict",
"=",
"opt",
".",
"dynamic_dict",
")",
"# We save fields in vocab.pt separately, so make it empty.",
"dataset",
".",
"fields",
"=",
"[",
"]",
"pt_file",
"=",
"\"{:s}.{:s}.{:d}.pt\"",
".",
"format",
"(",
"opt",
".",
"save_data",
",",
"corpus_type",
",",
"index",
")",
"logger",
".",
"info",
"(",
"\" * saving %s data shard to %s.\"",
"%",
"(",
"corpus_type",
",",
"pt_file",
")",
")",
"torch",
".",
"save",
"(",
"dataset",
",",
"pt_file",
")",
"ret_list",
".",
"append",
"(",
"pt_file",
")",
"return",
"ret_list"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/preprocess.py#L50-L120 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/preprocess.py | python | build_save_in_shards_using_shards_size | (src_corpus, tgt_corpus, fields,
corpus_type, opt) | return ret_list | Divide src_corpus and tgt_corpus into smaller multiples
src_copus and tgt corpus files, then build shards, each
shard will have opt.shard_size samples except last shard.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file. | Divide src_corpus and tgt_corpus into smaller multiples
src_copus and tgt corpus files, then build shards, each
shard will have opt.shard_size samples except last shard. | [
"Divide",
"src_corpus",
"and",
"tgt_corpus",
"into",
"smaller",
"multiples",
"src_copus",
"and",
"tgt",
"corpus",
"files",
"then",
"build",
"shards",
"each",
"shard",
"will",
"have",
"opt",
".",
"shard_size",
"samples",
"except",
"last",
"shard",
"."
] | def build_save_in_shards_using_shards_size(src_corpus, tgt_corpus, fields,
corpus_type, opt):
"""
Divide src_corpus and tgt_corpus into smaller multiples
src_copus and tgt corpus files, then build shards, each
shard will have opt.shard_size samples except last shard.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
"""
src_data = open(src_corpus, "r", encoding="utf-8").readlines()
tgt_data = open(tgt_corpus, "r", encoding="utf-8").readlines()
src_corpus = "".join(src_corpus.split(".")[:-1])
tgt_corpus = "".join(tgt_corpus.split(".")[:-1])
for x in range(int(len(src_data) / opt.shard_size)):
open(src_corpus + ".{0}.txt".format(x), "w",
encoding="utf-8").writelines(
src_data[x * opt.shard_size: (x + 1) * opt.shard_size])
open(tgt_corpus + ".{0}.txt".format(x), "w",
encoding="utf-8").writelines(
tgt_data[x * opt.shard_size: (x + 1) * opt.shard_size])
src_list = sorted(glob.glob(src_corpus + '.*.txt'))
tgt_list = sorted(glob.glob(tgt_corpus + '.*.txt'))
ret_list = []
for index, src in enumerate(src_list):
dataset = inputters.build_dataset(
fields, opt.data_type,
src_path=src,
tgt_path=tgt_list[index],
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
image_channel_size=opt.image_channel_size
)
pt_file = "{:s}.{:s}.{:d}.pt".format(
opt.save_data, corpus_type, index)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
logger.info(" * saving %sth %s data image shard to %s."
% (index, corpus_type, pt_file))
torch.save(dataset, pt_file)
ret_list.append(pt_file)
del dataset.examples
gc.collect()
del dataset
gc.collect()
return ret_list | [
"def",
"build_save_in_shards_using_shards_size",
"(",
"src_corpus",
",",
"tgt_corpus",
",",
"fields",
",",
"corpus_type",
",",
"opt",
")",
":",
"src_data",
"=",
"open",
"(",
"src_corpus",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"readlines",
"(",
")",
"tgt_data",
"=",
"open",
"(",
"tgt_corpus",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"readlines",
"(",
")",
"src_corpus",
"=",
"\"\"",
".",
"join",
"(",
"src_corpus",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"tgt_corpus",
"=",
"\"\"",
".",
"join",
"(",
"tgt_corpus",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"len",
"(",
"src_data",
")",
"/",
"opt",
".",
"shard_size",
")",
")",
":",
"open",
"(",
"src_corpus",
"+",
"\".{0}.txt\"",
".",
"format",
"(",
"x",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"writelines",
"(",
"src_data",
"[",
"x",
"*",
"opt",
".",
"shard_size",
":",
"(",
"x",
"+",
"1",
")",
"*",
"opt",
".",
"shard_size",
"]",
")",
"open",
"(",
"tgt_corpus",
"+",
"\".{0}.txt\"",
".",
"format",
"(",
"x",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"writelines",
"(",
"tgt_data",
"[",
"x",
"*",
"opt",
".",
"shard_size",
":",
"(",
"x",
"+",
"1",
")",
"*",
"opt",
".",
"shard_size",
"]",
")",
"src_list",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"src_corpus",
"+",
"'.*.txt'",
")",
")",
"tgt_list",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"tgt_corpus",
"+",
"'.*.txt'",
")",
")",
"ret_list",
"=",
"[",
"]",
"for",
"index",
",",
"src",
"in",
"enumerate",
"(",
"src_list",
")",
":",
"dataset",
"=",
"inputters",
".",
"build_dataset",
"(",
"fields",
",",
"opt",
".",
"data_type",
",",
"src_path",
"=",
"src",
",",
"tgt_path",
"=",
"tgt_list",
"[",
"index",
"]",
",",
"src_dir",
"=",
"opt",
".",
"src_dir",
",",
"src_seq_length",
"=",
"opt",
".",
"src_seq_length",
",",
"tgt_seq_length",
"=",
"opt",
".",
"tgt_seq_length",
",",
"src_seq_length_trunc",
"=",
"opt",
".",
"src_seq_length_trunc",
",",
"tgt_seq_length_trunc",
"=",
"opt",
".",
"tgt_seq_length_trunc",
",",
"dynamic_dict",
"=",
"opt",
".",
"dynamic_dict",
",",
"sample_rate",
"=",
"opt",
".",
"sample_rate",
",",
"window_size",
"=",
"opt",
".",
"window_size",
",",
"window_stride",
"=",
"opt",
".",
"window_stride",
",",
"window",
"=",
"opt",
".",
"window",
",",
"image_channel_size",
"=",
"opt",
".",
"image_channel_size",
")",
"pt_file",
"=",
"\"{:s}.{:s}.{:d}.pt\"",
".",
"format",
"(",
"opt",
".",
"save_data",
",",
"corpus_type",
",",
"index",
")",
"# We save fields in vocab.pt seperately, so make it empty.",
"dataset",
".",
"fields",
"=",
"[",
"]",
"logger",
".",
"info",
"(",
"\" * saving %sth %s data image shard to %s.\"",
"%",
"(",
"index",
",",
"corpus_type",
",",
"pt_file",
")",
")",
"torch",
".",
"save",
"(",
"dataset",
",",
"pt_file",
")",
"ret_list",
".",
"append",
"(",
"pt_file",
")",
"del",
"dataset",
".",
"examples",
"gc",
".",
"collect",
"(",
")",
"del",
"dataset",
"gc",
".",
"collect",
"(",
")",
"return",
"ret_list"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/preprocess.py#L123-L188 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/preprocess.py | python | build_save_dataset | (corpus_type, fields, opt) | return [pt_file] | Building and saving the dataset | Building and saving the dataset | [
"Building",
"and",
"saving",
"the",
"dataset"
] | def build_save_dataset(corpus_type, fields, opt):
""" Building and saving the dataset """
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
src_corpus = opt.train_src
tgt_corpus = opt.train_tgt
else:
src_corpus = opt.valid_src
tgt_corpus = opt.valid_tgt
# Currently we only do preprocess sharding for corpus: data_type=='text'.
if opt.data_type == 'text':
return build_save_in_shards(
src_corpus, tgt_corpus, fields,
corpus_type, opt)
if (opt.shard_size > 0):
return build_save_in_shards_using_shards_size(src_corpus,
tgt_corpus,
fields,
corpus_type,
opt)
# For data_type == 'img' or 'audio', currently we don't do
# preprocess sharding. We only build a monolithic dataset.
# But since the interfaces are uniform, it would be not hard
# to do this should users need this feature.
dataset = inputters.build_dataset(
fields, opt.data_type,
src_path=src_corpus,
tgt_path=tgt_corpus,
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
image_channel_size=opt.image_channel_size)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.pt".format(opt.save_data, corpus_type)
logger.info(" * saving %s dataset to %s." % (corpus_type, pt_file))
torch.save(dataset, pt_file)
return [pt_file] | [
"def",
"build_save_dataset",
"(",
"corpus_type",
",",
"fields",
",",
"opt",
")",
":",
"assert",
"corpus_type",
"in",
"[",
"'train'",
",",
"'valid'",
"]",
"if",
"corpus_type",
"==",
"'train'",
":",
"src_corpus",
"=",
"opt",
".",
"train_src",
"tgt_corpus",
"=",
"opt",
".",
"train_tgt",
"else",
":",
"src_corpus",
"=",
"opt",
".",
"valid_src",
"tgt_corpus",
"=",
"opt",
".",
"valid_tgt",
"# Currently we only do preprocess sharding for corpus: data_type=='text'.",
"if",
"opt",
".",
"data_type",
"==",
"'text'",
":",
"return",
"build_save_in_shards",
"(",
"src_corpus",
",",
"tgt_corpus",
",",
"fields",
",",
"corpus_type",
",",
"opt",
")",
"if",
"(",
"opt",
".",
"shard_size",
">",
"0",
")",
":",
"return",
"build_save_in_shards_using_shards_size",
"(",
"src_corpus",
",",
"tgt_corpus",
",",
"fields",
",",
"corpus_type",
",",
"opt",
")",
"# For data_type == 'img' or 'audio', currently we don't do",
"# preprocess sharding. We only build a monolithic dataset.",
"# But since the interfaces are uniform, it would be not hard",
"# to do this should users need this feature.",
"dataset",
"=",
"inputters",
".",
"build_dataset",
"(",
"fields",
",",
"opt",
".",
"data_type",
",",
"src_path",
"=",
"src_corpus",
",",
"tgt_path",
"=",
"tgt_corpus",
",",
"src_dir",
"=",
"opt",
".",
"src_dir",
",",
"src_seq_length",
"=",
"opt",
".",
"src_seq_length",
",",
"tgt_seq_length",
"=",
"opt",
".",
"tgt_seq_length",
",",
"src_seq_length_trunc",
"=",
"opt",
".",
"src_seq_length_trunc",
",",
"tgt_seq_length_trunc",
"=",
"opt",
".",
"tgt_seq_length_trunc",
",",
"dynamic_dict",
"=",
"opt",
".",
"dynamic_dict",
",",
"sample_rate",
"=",
"opt",
".",
"sample_rate",
",",
"window_size",
"=",
"opt",
".",
"window_size",
",",
"window_stride",
"=",
"opt",
".",
"window_stride",
",",
"window",
"=",
"opt",
".",
"window",
",",
"image_channel_size",
"=",
"opt",
".",
"image_channel_size",
")",
"# We save fields in vocab.pt seperately, so make it empty.",
"dataset",
".",
"fields",
"=",
"[",
"]",
"pt_file",
"=",
"\"{:s}.{:s}.pt\"",
".",
"format",
"(",
"opt",
".",
"save_data",
",",
"corpus_type",
")",
"logger",
".",
"info",
"(",
"\" * saving %s dataset to %s.\"",
"%",
"(",
"corpus_type",
",",
"pt_file",
")",
")",
"torch",
".",
"save",
"(",
"dataset",
",",
"pt_file",
")",
"return",
"[",
"pt_file",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/preprocess.py#L191-L242 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/preprocess.py | python | build_save_vocab | (train_dataset, fields, opt) | Building and saving the vocab | Building and saving the vocab | [
"Building",
"and",
"saving",
"the",
"vocab"
] | def build_save_vocab(train_dataset, fields, opt):
""" Building and saving the vocab """
fields = inputters.build_vocab(train_dataset, fields, opt.data_type,
opt.share_vocab,
opt.src_vocab,
opt.src_vocab_size,
opt.src_words_min_frequency,
opt.tgt_vocab,
opt.tgt_vocab_size,
opt.tgt_words_min_frequency)
# Can't save fields, so remove/reconstruct at training time.
vocab_file = opt.save_data + '.vocab.pt'
torch.save(inputters.save_fields_to_vocab(fields), vocab_file) | [
"def",
"build_save_vocab",
"(",
"train_dataset",
",",
"fields",
",",
"opt",
")",
":",
"fields",
"=",
"inputters",
".",
"build_vocab",
"(",
"train_dataset",
",",
"fields",
",",
"opt",
".",
"data_type",
",",
"opt",
".",
"share_vocab",
",",
"opt",
".",
"src_vocab",
",",
"opt",
".",
"src_vocab_size",
",",
"opt",
".",
"src_words_min_frequency",
",",
"opt",
".",
"tgt_vocab",
",",
"opt",
".",
"tgt_vocab_size",
",",
"opt",
".",
"tgt_words_min_frequency",
")",
"# Can't save fields, so remove/reconstruct at training time.",
"vocab_file",
"=",
"opt",
".",
"save_data",
"+",
"'.vocab.pt'",
"torch",
".",
"save",
"(",
"inputters",
".",
"save_fields_to_vocab",
"(",
"fields",
")",
",",
"vocab_file",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/preprocess.py#L245-L258 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/train.py | python | run | (opt, device_id, error_queue) | run process | run process | [
"run",
"process"
] | def run(opt, device_id, error_queue):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc())) | [
"def",
"run",
"(",
"opt",
",",
"device_id",
",",
"error_queue",
")",
":",
"try",
":",
"gpu_rank",
"=",
"onmt",
".",
"utils",
".",
"distributed",
".",
"multi_init",
"(",
"opt",
",",
"device_id",
")",
"if",
"gpu_rank",
"!=",
"opt",
".",
"gpu_ranks",
"[",
"device_id",
"]",
":",
"raise",
"AssertionError",
"(",
"\"An error occurred in \\\n Distributed initialization\"",
")",
"single_main",
"(",
"opt",
",",
"device_id",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"# killed by parent, do nothing",
"except",
"Exception",
":",
"# propagate exception to parent process, keeping original traceback",
"import",
"traceback",
"error_queue",
".",
"put",
"(",
"(",
"opt",
".",
"gpu_ranks",
"[",
"device_id",
"]",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/train.py#L56-L69 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/train.py | python | ErrorHandler.__init__ | (self, error_queue) | init error handler | init error handler | [
"init",
"error",
"handler"
] | def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler) | [
"def",
"__init__",
"(",
"self",
",",
"error_queue",
")",
":",
"import",
"signal",
"import",
"threading",
"self",
".",
"error_queue",
"=",
"error_queue",
"self",
".",
"children_pids",
"=",
"[",
"]",
"self",
".",
"error_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"error_listener",
",",
"daemon",
"=",
"True",
")",
"self",
".",
"error_thread",
".",
"start",
"(",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGUSR1",
",",
"self",
".",
"signal_handler",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/train.py#L76-L85 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/train.py | python | ErrorHandler.add_child | (self, pid) | error handler | error handler | [
"error",
"handler"
] | def add_child(self, pid):
""" error handler """
self.children_pids.append(pid) | [
"def",
"add_child",
"(",
"self",
",",
"pid",
")",
":",
"self",
".",
"children_pids",
".",
"append",
"(",
"pid",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/train.py#L87-L89 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/train.py | python | ErrorHandler.error_listener | (self) | error listener | error listener | [
"error",
"listener"
] | def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1) | [
"def",
"error_listener",
"(",
"self",
")",
":",
"(",
"rank",
",",
"original_trace",
")",
"=",
"self",
".",
"error_queue",
".",
"get",
"(",
")",
"self",
".",
"error_queue",
".",
"put",
"(",
"(",
"rank",
",",
"original_trace",
")",
")",
"os",
".",
"kill",
"(",
"os",
".",
"getpid",
"(",
")",
",",
"signal",
".",
"SIGUSR1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/train.py#L91-L95 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/train.py | python | ErrorHandler.signal_handler | (self, signalnum, stackframe) | signal handler | signal handler | [
"signal",
"handler"
] | def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg) | [
"def",
"signal_handler",
"(",
"self",
",",
"signalnum",
",",
"stackframe",
")",
":",
"for",
"pid",
"in",
"self",
".",
"children_pids",
":",
"os",
".",
"kill",
"(",
"pid",
",",
"signal",
".",
"SIGINT",
")",
"# kill children processes",
"(",
"rank",
",",
"original_trace",
")",
"=",
"self",
".",
"error_queue",
".",
"get",
"(",
")",
"msg",
"=",
"\"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"",
"msg",
"+=",
"original_trace",
"raise",
"Exception",
"(",
"msg",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/train.py#L97-L105 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/model_builder.py | python | build_embeddings | (opt, word_dict, feature_dicts, for_encoder=True) | return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam") | Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder? | Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder? | [
"Build",
"an",
"Embeddings",
"instance",
".",
"Args",
":",
"opt",
":",
"the",
"option",
"in",
"current",
"environment",
".",
"word_dict",
"(",
"Vocab",
")",
":",
"words",
"dictionary",
".",
"feature_dicts",
"(",
"[",
"Vocab",
"]",
"optional",
")",
":",
"a",
"list",
"of",
"feature",
"dictionary",
".",
"for_encoder",
"(",
"bool",
")",
":",
"build",
"Embeddings",
"for",
"encoder",
"or",
"decoder?"
] | def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[inputters.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam") | [
"def",
"build_embeddings",
"(",
"opt",
",",
"word_dict",
",",
"feature_dicts",
",",
"for_encoder",
"=",
"True",
")",
":",
"if",
"for_encoder",
":",
"embedding_dim",
"=",
"opt",
".",
"src_word_vec_size",
"else",
":",
"embedding_dim",
"=",
"opt",
".",
"tgt_word_vec_size",
"word_padding_idx",
"=",
"word_dict",
".",
"stoi",
"[",
"inputters",
".",
"PAD_WORD",
"]",
"num_word_embeddings",
"=",
"len",
"(",
"word_dict",
")",
"feats_padding_idx",
"=",
"[",
"feat_dict",
".",
"stoi",
"[",
"inputters",
".",
"PAD_WORD",
"]",
"for",
"feat_dict",
"in",
"feature_dicts",
"]",
"num_feat_embeddings",
"=",
"[",
"len",
"(",
"feat_dict",
")",
"for",
"feat_dict",
"in",
"feature_dicts",
"]",
"return",
"Embeddings",
"(",
"word_vec_size",
"=",
"embedding_dim",
",",
"position_encoding",
"=",
"opt",
".",
"position_encoding",
",",
"feat_merge",
"=",
"opt",
".",
"feat_merge",
",",
"feat_vec_exponent",
"=",
"opt",
".",
"feat_vec_exponent",
",",
"feat_vec_size",
"=",
"opt",
".",
"feat_vec_size",
",",
"dropout",
"=",
"opt",
".",
"dropout",
",",
"word_padding_idx",
"=",
"word_padding_idx",
",",
"feat_padding_idx",
"=",
"feats_padding_idx",
",",
"word_vocab_size",
"=",
"num_word_embeddings",
",",
"feat_vocab_sizes",
"=",
"num_feat_embeddings",
",",
"sparse",
"=",
"opt",
".",
"optim",
"==",
"\"sparseadam\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/model_builder.py#L29-L61 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/model_builder.py | python | build_encoder | (opt, embeddings) | Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder. | Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder. | [
"Various",
"encoder",
"dispatcher",
"function",
".",
"Args",
":",
"opt",
":",
"the",
"option",
"in",
"current",
"environment",
".",
"embeddings",
"(",
"Embeddings",
")",
":",
"vocab",
"embeddings",
"for",
"this",
"encoder",
"."
] | def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.rnn_size, opt.dropout, embeddings,
opt.bridge) | [
"def",
"build_encoder",
"(",
"opt",
",",
"embeddings",
")",
":",
"if",
"opt",
".",
"encoder_type",
"==",
"\"transformer\"",
":",
"return",
"TransformerEncoder",
"(",
"opt",
".",
"enc_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"heads",
",",
"opt",
".",
"transformer_ff",
",",
"opt",
".",
"dropout",
",",
"embeddings",
")",
"elif",
"opt",
".",
"encoder_type",
"==",
"\"cnn\"",
":",
"return",
"CNNEncoder",
"(",
"opt",
".",
"enc_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"cnn_kernel_width",
",",
"opt",
".",
"dropout",
",",
"embeddings",
")",
"elif",
"opt",
".",
"encoder_type",
"==",
"\"mean\"",
":",
"return",
"MeanEncoder",
"(",
"opt",
".",
"enc_layers",
",",
"embeddings",
")",
"else",
":",
"# \"rnn\" or \"brnn\"",
"return",
"RNNEncoder",
"(",
"opt",
".",
"rnn_type",
",",
"opt",
".",
"brnn",
",",
"opt",
".",
"enc_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"dropout",
",",
"embeddings",
",",
"opt",
".",
"bridge",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/model_builder.py#L64-L85 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/model_builder.py | python | build_decoder | (opt, embeddings) | Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder. | Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder. | [
"Various",
"decoder",
"dispatcher",
"function",
".",
"Args",
":",
"opt",
":",
"the",
"option",
"in",
"current",
"environment",
".",
"embeddings",
"(",
"Embeddings",
")",
":",
"vocab",
"embeddings",
"for",
"this",
"decoder",
"."
] | def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.rnn_size,
opt.heads, opt.transformer_ff,
opt.global_attention, opt.copy_attn,
opt.self_attn_type,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn) | [
"def",
"build_decoder",
"(",
"opt",
",",
"embeddings",
")",
":",
"if",
"opt",
".",
"decoder_type",
"==",
"\"transformer\"",
":",
"return",
"TransformerDecoder",
"(",
"opt",
".",
"dec_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"heads",
",",
"opt",
".",
"transformer_ff",
",",
"opt",
".",
"global_attention",
",",
"opt",
".",
"copy_attn",
",",
"opt",
".",
"self_attn_type",
",",
"opt",
".",
"dropout",
",",
"embeddings",
")",
"elif",
"opt",
".",
"decoder_type",
"==",
"\"cnn\"",
":",
"return",
"CNNDecoder",
"(",
"opt",
".",
"dec_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"global_attention",
",",
"opt",
".",
"copy_attn",
",",
"opt",
".",
"cnn_kernel_width",
",",
"opt",
".",
"dropout",
",",
"embeddings",
")",
"elif",
"opt",
".",
"input_feed",
":",
"return",
"InputFeedRNNDecoder",
"(",
"opt",
".",
"rnn_type",
",",
"opt",
".",
"brnn",
",",
"opt",
".",
"dec_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"global_attention",
",",
"opt",
".",
"global_attention_function",
",",
"opt",
".",
"coverage_attn",
",",
"opt",
".",
"context_gate",
",",
"opt",
".",
"copy_attn",
",",
"opt",
".",
"dropout",
",",
"embeddings",
",",
"opt",
".",
"reuse_copy_attn",
")",
"else",
":",
"return",
"StdRNNDecoder",
"(",
"opt",
".",
"rnn_type",
",",
"opt",
".",
"brnn",
",",
"opt",
".",
"dec_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"global_attention",
",",
"opt",
".",
"global_attention_function",
",",
"opt",
".",
"coverage_attn",
",",
"opt",
".",
"context_gate",
",",
"opt",
".",
"copy_attn",
",",
"opt",
".",
"dropout",
",",
"embeddings",
",",
"opt",
".",
"reuse_copy_attn",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/model_builder.py#L88-L127 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/model_builder.py | python | build_base_model | (model_opt, fields, gpu, checkpoint=None) | return model | Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel. | Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel. | [
"Args",
":",
"model_opt",
":",
"the",
"option",
"loaded",
"from",
"checkpoint",
".",
"fields",
":",
"Field",
"objects",
"for",
"the",
"model",
".",
"gpu",
"(",
"bool",
")",
":",
"whether",
"to",
"use",
"gpu",
".",
"checkpoint",
":",
"the",
"model",
"gnerated",
"by",
"train",
"phase",
"or",
"a",
"resumed",
"snapshot",
"model",
"from",
"a",
"stopped",
"training",
".",
"Returns",
":",
"the",
"NMTModel",
"."
] | def build_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], \
("Unsupported model type %s" % (model_opt.model_type))
# Build encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'src')
src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)
encoder = build_encoder(model_opt, src_embeddings)
elif model_opt.model_type == "img":
if ("image_channel_size" not in model_opt.__dict__):
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.rnn_size,
model_opt.dropout,
image_channel_size)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.rnn_size,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size)
model_parameters = filter(lambda p: p.requires_grad, encoder.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
# Build decoder.
tgt_dict = fields["tgt"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = build_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
if src_dict != tgt_dict:
raise AssertionError('The `-share_vocab` should be set during '
'preprocess if you use share_embeddings!')
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = build_decoder(model_opt, tgt_embeddings)
# Build NMTModel(= encoder + decoder).
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
model.model_type = model_opt.model_type
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)), gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt.rnn_size,
fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
model.load_state_dict(checkpoint['model'])
generator.load_state_dict(checkpoint['generator'])
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
model.to(device)
return model | [
"def",
"build_base_model",
"(",
"model_opt",
",",
"fields",
",",
"gpu",
",",
"checkpoint",
"=",
"None",
")",
":",
"assert",
"model_opt",
".",
"model_type",
"in",
"[",
"\"text\"",
",",
"\"img\"",
",",
"\"audio\"",
"]",
",",
"(",
"\"Unsupported model type %s\"",
"%",
"(",
"model_opt",
".",
"model_type",
")",
")",
"# Build encoder.",
"if",
"model_opt",
".",
"model_type",
"==",
"\"text\"",
":",
"src_dict",
"=",
"fields",
"[",
"\"src\"",
"]",
".",
"vocab",
"feature_dicts",
"=",
"inputters",
".",
"collect_feature_vocabs",
"(",
"fields",
",",
"'src'",
")",
"src_embeddings",
"=",
"build_embeddings",
"(",
"model_opt",
",",
"src_dict",
",",
"feature_dicts",
")",
"encoder",
"=",
"build_encoder",
"(",
"model_opt",
",",
"src_embeddings",
")",
"elif",
"model_opt",
".",
"model_type",
"==",
"\"img\"",
":",
"if",
"(",
"\"image_channel_size\"",
"not",
"in",
"model_opt",
".",
"__dict__",
")",
":",
"image_channel_size",
"=",
"3",
"else",
":",
"image_channel_size",
"=",
"model_opt",
".",
"image_channel_size",
"encoder",
"=",
"ImageEncoder",
"(",
"model_opt",
".",
"enc_layers",
",",
"model_opt",
".",
"brnn",
",",
"model_opt",
".",
"rnn_size",
",",
"model_opt",
".",
"dropout",
",",
"image_channel_size",
")",
"elif",
"model_opt",
".",
"model_type",
"==",
"\"audio\"",
":",
"encoder",
"=",
"AudioEncoder",
"(",
"model_opt",
".",
"enc_layers",
",",
"model_opt",
".",
"brnn",
",",
"model_opt",
".",
"rnn_size",
",",
"model_opt",
".",
"dropout",
",",
"model_opt",
".",
"sample_rate",
",",
"model_opt",
".",
"window_size",
")",
"model_parameters",
"=",
"filter",
"(",
"lambda",
"p",
":",
"p",
".",
"requires_grad",
",",
"encoder",
".",
"parameters",
"(",
")",
")",
"params",
"=",
"sum",
"(",
"[",
"np",
".",
"prod",
"(",
"p",
".",
"size",
"(",
")",
")",
"for",
"p",
"in",
"model_parameters",
"]",
")",
"# Build decoder.",
"tgt_dict",
"=",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
"feature_dicts",
"=",
"inputters",
".",
"collect_feature_vocabs",
"(",
"fields",
",",
"'tgt'",
")",
"tgt_embeddings",
"=",
"build_embeddings",
"(",
"model_opt",
",",
"tgt_dict",
",",
"feature_dicts",
",",
"for_encoder",
"=",
"False",
")",
"# Share the embedding matrix - preprocess with share_vocab required.",
"if",
"model_opt",
".",
"share_embeddings",
":",
"# src/tgt vocab should be the same if `-share_vocab` is specified.",
"if",
"src_dict",
"!=",
"tgt_dict",
":",
"raise",
"AssertionError",
"(",
"'The `-share_vocab` should be set during '",
"'preprocess if you use share_embeddings!'",
")",
"tgt_embeddings",
".",
"word_lut",
".",
"weight",
"=",
"src_embeddings",
".",
"word_lut",
".",
"weight",
"decoder",
"=",
"build_decoder",
"(",
"model_opt",
",",
"tgt_embeddings",
")",
"# Build NMTModel(= encoder + decoder).",
"device",
"=",
"torch",
".",
"device",
"(",
"\"cuda\"",
"if",
"gpu",
"else",
"\"cpu\"",
")",
"model",
"=",
"onmt",
".",
"models",
".",
"NMTModel",
"(",
"encoder",
",",
"decoder",
")",
"model",
".",
"model_type",
"=",
"model_opt",
".",
"model_type",
"# Build Generator.",
"if",
"not",
"model_opt",
".",
"copy_attn",
":",
"if",
"model_opt",
".",
"generator_function",
"==",
"\"sparsemax\"",
":",
"gen_func",
"=",
"onmt",
".",
"modules",
".",
"sparse_activations",
".",
"LogSparsemax",
"(",
"dim",
"=",
"-",
"1",
")",
"else",
":",
"gen_func",
"=",
"nn",
".",
"LogSoftmax",
"(",
"dim",
"=",
"-",
"1",
")",
"generator",
"=",
"nn",
".",
"Sequential",
"(",
"nn",
".",
"Linear",
"(",
"model_opt",
".",
"rnn_size",
",",
"len",
"(",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
")",
")",
",",
"gen_func",
")",
"if",
"model_opt",
".",
"share_decoder_embeddings",
":",
"generator",
"[",
"0",
"]",
".",
"weight",
"=",
"decoder",
".",
"embeddings",
".",
"word_lut",
".",
"weight",
"else",
":",
"generator",
"=",
"CopyGenerator",
"(",
"model_opt",
".",
"rnn_size",
",",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
")",
"# Load the model states from checkpoint or initialize them.",
"if",
"checkpoint",
"is",
"not",
"None",
":",
"model",
".",
"load_state_dict",
"(",
"checkpoint",
"[",
"'model'",
"]",
")",
"generator",
".",
"load_state_dict",
"(",
"checkpoint",
"[",
"'generator'",
"]",
")",
"else",
":",
"if",
"model_opt",
".",
"param_init",
"!=",
"0.0",
":",
"for",
"p",
"in",
"model",
".",
"parameters",
"(",
")",
":",
"p",
".",
"data",
".",
"uniform_",
"(",
"-",
"model_opt",
".",
"param_init",
",",
"model_opt",
".",
"param_init",
")",
"for",
"p",
"in",
"generator",
".",
"parameters",
"(",
")",
":",
"p",
".",
"data",
".",
"uniform_",
"(",
"-",
"model_opt",
".",
"param_init",
",",
"model_opt",
".",
"param_init",
")",
"if",
"model_opt",
".",
"param_init_glorot",
":",
"for",
"p",
"in",
"model",
".",
"parameters",
"(",
")",
":",
"if",
"p",
".",
"dim",
"(",
")",
">",
"1",
":",
"xavier_uniform_",
"(",
"p",
")",
"for",
"p",
"in",
"generator",
".",
"parameters",
"(",
")",
":",
"if",
"p",
".",
"dim",
"(",
")",
">",
"1",
":",
"xavier_uniform_",
"(",
"p",
")",
"if",
"hasattr",
"(",
"model",
".",
"encoder",
",",
"'embeddings'",
")",
":",
"model",
".",
"encoder",
".",
"embeddings",
".",
"load_pretrained_vectors",
"(",
"model_opt",
".",
"pre_word_vecs_enc",
",",
"model_opt",
".",
"fix_word_vecs_enc",
")",
"if",
"hasattr",
"(",
"model",
".",
"decoder",
",",
"'embeddings'",
")",
":",
"model",
".",
"decoder",
".",
"embeddings",
".",
"load_pretrained_vectors",
"(",
"model_opt",
".",
"pre_word_vecs_dec",
",",
"model_opt",
".",
"fix_word_vecs_dec",
")",
"# Add generator to model (this registers it as parameter of model).",
"model",
".",
"generator",
"=",
"generator",
"model",
".",
"to",
"(",
"device",
")",
"return",
"model"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/model_builder.py#L148-L255 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/model_builder.py | python | build_model | (model_opt, opt, fields, checkpoint) | return model | Build the Model | Build the Model | [
"Build",
"the",
"Model"
] | def build_model(model_opt, opt, fields, checkpoint):
""" Build the Model """
logger.info('Building model...')
model = build_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
logger.info(model)
return model | [
"def",
"build_model",
"(",
"model_opt",
",",
"opt",
",",
"fields",
",",
"checkpoint",
")",
":",
"logger",
".",
"info",
"(",
"'Building model...'",
")",
"model",
"=",
"build_base_model",
"(",
"model_opt",
",",
"fields",
",",
"use_gpu",
"(",
"opt",
")",
",",
"checkpoint",
")",
"logger",
".",
"info",
"(",
"model",
")",
"return",
"model"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/model_builder.py#L258-L264 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/trainer.py | python | build_trainer | (opt, device_id, model, fields,
optim, data_type, model_saver=None) | return trainer | Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model | Simplify `Trainer` creation based on user `opt`s* | [
"Simplify",
"Trainer",
"creation",
"based",
"on",
"user",
"opt",
"s",
"*"
] | def build_trainer(opt, device_id, model, fields,
optim, data_type, model_saver=None):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
train_loss = onmt.utils.loss.build_loss_compute(
model, fields["tgt"].vocab, opt)
valid_loss = onmt.utils.loss.build_loss_compute(
model, fields["tgt"].vocab, opt, train=False)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
norm_method = opt.normalization
grad_accum_count = opt.accum_count
n_gpu = opt.world_size
if device_id >= 0:
gpu_rank = opt.gpu_ranks[device_id]
else:
gpu_rank = 0
n_gpu = 0
gpu_verbose_level = opt.gpu_verbose_level
report_manager = onmt.utils.build_report_manager(opt)
trainer = onmt.Trainer(model, train_loss, valid_loss, optim, trunc_size,
shard_size, data_type, norm_method,
grad_accum_count, n_gpu, gpu_rank,
gpu_verbose_level, report_manager,
model_saver=model_saver)
return trainer | [
"def",
"build_trainer",
"(",
"opt",
",",
"device_id",
",",
"model",
",",
"fields",
",",
"optim",
",",
"data_type",
",",
"model_saver",
"=",
"None",
")",
":",
"train_loss",
"=",
"onmt",
".",
"utils",
".",
"loss",
".",
"build_loss_compute",
"(",
"model",
",",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
",",
"opt",
")",
"valid_loss",
"=",
"onmt",
".",
"utils",
".",
"loss",
".",
"build_loss_compute",
"(",
"model",
",",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
",",
"opt",
",",
"train",
"=",
"False",
")",
"trunc_size",
"=",
"opt",
".",
"truncated_decoder",
"# Badly named...",
"shard_size",
"=",
"opt",
".",
"max_generator_batches",
"norm_method",
"=",
"opt",
".",
"normalization",
"grad_accum_count",
"=",
"opt",
".",
"accum_count",
"n_gpu",
"=",
"opt",
".",
"world_size",
"if",
"device_id",
">=",
"0",
":",
"gpu_rank",
"=",
"opt",
".",
"gpu_ranks",
"[",
"device_id",
"]",
"else",
":",
"gpu_rank",
"=",
"0",
"n_gpu",
"=",
"0",
"gpu_verbose_level",
"=",
"opt",
".",
"gpu_verbose_level",
"report_manager",
"=",
"onmt",
".",
"utils",
".",
"build_report_manager",
"(",
"opt",
")",
"trainer",
"=",
"onmt",
".",
"Trainer",
"(",
"model",
",",
"train_loss",
",",
"valid_loss",
",",
"optim",
",",
"trunc_size",
",",
"shard_size",
",",
"data_type",
",",
"norm_method",
",",
"grad_accum_count",
",",
"n_gpu",
",",
"gpu_rank",
",",
"gpu_verbose_level",
",",
"report_manager",
",",
"model_saver",
"=",
"model_saver",
")",
"return",
"trainer"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/trainer.py#L23-L61 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/trainer.py | python | Trainer.train | (self, train_iter_fct, valid_iter_fct, train_steps, valid_steps) | return total_stats | The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct`
Args:
train_iter_fct(function): a function that returns the train
iterator. e.g. something like
train_iter_fct = lambda: generator(*args, **kwargs)
valid_iter_fct(function): same as train_iter_fct, for valid data
train_steps(int):
valid_steps(int):
save_checkpoint_steps(int):
Return:
None | The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct` | [
"The",
"main",
"training",
"loops",
".",
"by",
"iterating",
"over",
"training",
"data",
"(",
"i",
".",
"e",
".",
"train_iter_fct",
")",
"and",
"running",
"validation",
"(",
"i",
".",
"e",
".",
"iterating",
"over",
"valid_iter_fct"
] | def train(self, train_iter_fct, valid_iter_fct, train_steps, valid_steps):
"""
The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct`
Args:
train_iter_fct(function): a function that returns the train
iterator. e.g. something like
train_iter_fct = lambda: generator(*args, **kwargs)
valid_iter_fct(function): same as train_iter_fct, for valid data
train_steps(int):
valid_steps(int):
save_checkpoint_steps(int):
Return:
None
"""
logger.info('Start training...')
step = self.optim._step + 1
true_batchs = []
accum = 0
normalization = 0
train_iter = train_iter_fct()
total_stats = onmt.utils.Statistics()
report_stats = onmt.utils.Statistics()
self._start_report_manager(start_time=total_stats.start_time)
#pdb.set_trace()
while step <= train_steps:
reduce_counter = 0
for i, batch in enumerate(train_iter):
if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):
if self.gpu_verbose_level > 1:
logger.info("GpuRank %d: index: %d accum: %d"
% (self.gpu_rank, i, accum))
true_batchs.append(batch)
if self.norm_method == "tokens":
num_tokens = batch.tgt[1:].ne(
self.train_loss.padding_idx).sum()
normalization += num_tokens.item()
else:
normalization += batch.batch_size
accum += 1
if accum == self.grad_accum_count:
reduce_counter += 1
if self.gpu_verbose_level > 0:
logger.info("GpuRank %d: reduce_counter: %d \
n_minibatch %d"
% (self.gpu_rank, reduce_counter,
len(true_batchs)))
if self.n_gpu > 1:
normalization = sum(onmt.utils.distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
true_batchs, normalization, total_stats,
report_stats)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate,
report_stats)
true_batchs = []
accum = 0
normalization = 0
if (step % valid_steps == 0):
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: validate step %d'
% (self.gpu_rank, step))
valid_iter = valid_iter_fct()
with torch.no_grad():
valid_stats = self.validate(valid_iter)
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: gather valid stat \
step %d' % (self.gpu_rank, step))
valid_stats = self._maybe_gather_stats(valid_stats)
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: report stat step %d'
% (self.gpu_rank, step))
self._report_step(self.optim.learning_rate,
step, valid_stats=valid_stats)
if self.gpu_rank == 0:
self._maybe_save(step)
step += 1
if step > train_steps:
break
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: we completed an epoch \
at step %d' % (self.gpu_rank, step))
train_iter = train_iter_fct()
return total_stats | [
"def",
"train",
"(",
"self",
",",
"train_iter_fct",
",",
"valid_iter_fct",
",",
"train_steps",
",",
"valid_steps",
")",
":",
"logger",
".",
"info",
"(",
"'Start training...'",
")",
"step",
"=",
"self",
".",
"optim",
".",
"_step",
"+",
"1",
"true_batchs",
"=",
"[",
"]",
"accum",
"=",
"0",
"normalization",
"=",
"0",
"train_iter",
"=",
"train_iter_fct",
"(",
")",
"total_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"report_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"self",
".",
"_start_report_manager",
"(",
"start_time",
"=",
"total_stats",
".",
"start_time",
")",
"#pdb.set_trace()",
"while",
"step",
"<=",
"train_steps",
":",
"reduce_counter",
"=",
"0",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"train_iter",
")",
":",
"if",
"self",
".",
"n_gpu",
"==",
"0",
"or",
"(",
"i",
"%",
"self",
".",
"n_gpu",
"==",
"self",
".",
"gpu_rank",
")",
":",
"if",
"self",
".",
"gpu_verbose_level",
">",
"1",
":",
"logger",
".",
"info",
"(",
"\"GpuRank %d: index: %d accum: %d\"",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"i",
",",
"accum",
")",
")",
"true_batchs",
".",
"append",
"(",
"batch",
")",
"if",
"self",
".",
"norm_method",
"==",
"\"tokens\"",
":",
"num_tokens",
"=",
"batch",
".",
"tgt",
"[",
"1",
":",
"]",
".",
"ne",
"(",
"self",
".",
"train_loss",
".",
"padding_idx",
")",
".",
"sum",
"(",
")",
"normalization",
"+=",
"num_tokens",
".",
"item",
"(",
")",
"else",
":",
"normalization",
"+=",
"batch",
".",
"batch_size",
"accum",
"+=",
"1",
"if",
"accum",
"==",
"self",
".",
"grad_accum_count",
":",
"reduce_counter",
"+=",
"1",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"GpuRank %d: reduce_counter: %d \\\n n_minibatch %d\"",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"reduce_counter",
",",
"len",
"(",
"true_batchs",
")",
")",
")",
"if",
"self",
".",
"n_gpu",
">",
"1",
":",
"normalization",
"=",
"sum",
"(",
"onmt",
".",
"utils",
".",
"distributed",
".",
"all_gather_list",
"(",
"normalization",
")",
")",
"self",
".",
"_gradient_accumulation",
"(",
"true_batchs",
",",
"normalization",
",",
"total_stats",
",",
"report_stats",
")",
"report_stats",
"=",
"self",
".",
"_maybe_report_training",
"(",
"step",
",",
"train_steps",
",",
"self",
".",
"optim",
".",
"learning_rate",
",",
"report_stats",
")",
"true_batchs",
"=",
"[",
"]",
"accum",
"=",
"0",
"normalization",
"=",
"0",
"if",
"(",
"step",
"%",
"valid_steps",
"==",
"0",
")",
":",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'GpuRank %d: validate step %d'",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"step",
")",
")",
"valid_iter",
"=",
"valid_iter_fct",
"(",
")",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"valid_stats",
"=",
"self",
".",
"validate",
"(",
"valid_iter",
")",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'GpuRank %d: gather valid stat \\\n step %d'",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"step",
")",
")",
"valid_stats",
"=",
"self",
".",
"_maybe_gather_stats",
"(",
"valid_stats",
")",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'GpuRank %d: report stat step %d'",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"step",
")",
")",
"self",
".",
"_report_step",
"(",
"self",
".",
"optim",
".",
"learning_rate",
",",
"step",
",",
"valid_stats",
"=",
"valid_stats",
")",
"if",
"self",
".",
"gpu_rank",
"==",
"0",
":",
"self",
".",
"_maybe_save",
"(",
"step",
")",
"step",
"+=",
"1",
"if",
"step",
">",
"train_steps",
":",
"break",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'GpuRank %d: we completed an epoch \\\n at step %d'",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"step",
")",
")",
"train_iter",
"=",
"train_iter_fct",
"(",
")",
"return",
"total_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/trainer.py#L118-L217 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/trainer.py | python | Trainer.validate | (self, valid_iter) | return stats | Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics | Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics | [
"Validate",
"model",
".",
"valid_iter",
":",
"validate",
"data",
"iterator",
"Returns",
":",
":",
"obj",
":",
"nmt",
".",
"Statistics",
":",
"validation",
"loss",
"statistics"
] | def validate(self, valid_iter):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = onmt.utils.Statistics()
for batch in valid_iter:
src = inputters.make_features(batch, 'src', self.data_type)
if self.data_type == 'text':
_, src_lengths = batch.src
else:
src_lengths = None
tgt = inputters.make_features(batch, 'tgt')
# F-prop through the model.
outputs, attns, _ = self.model(src, tgt, src_lengths)
# Compute loss.
batch_stats = self.valid_loss.monolithic_compute_loss(
batch, outputs, attns)
# Update statistics.
stats.update(batch_stats)
# Set model back to training mode.
self.model.train()
return stats | [
"def",
"validate",
"(",
"self",
",",
"valid_iter",
")",
":",
"# Set model in validating mode.",
"self",
".",
"model",
".",
"eval",
"(",
")",
"stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"for",
"batch",
"in",
"valid_iter",
":",
"src",
"=",
"inputters",
".",
"make_features",
"(",
"batch",
",",
"'src'",
",",
"self",
".",
"data_type",
")",
"if",
"self",
".",
"data_type",
"==",
"'text'",
":",
"_",
",",
"src_lengths",
"=",
"batch",
".",
"src",
"else",
":",
"src_lengths",
"=",
"None",
"tgt",
"=",
"inputters",
".",
"make_features",
"(",
"batch",
",",
"'tgt'",
")",
"# F-prop through the model.",
"outputs",
",",
"attns",
",",
"_",
"=",
"self",
".",
"model",
"(",
"src",
",",
"tgt",
",",
"src_lengths",
")",
"# Compute loss.",
"batch_stats",
"=",
"self",
".",
"valid_loss",
".",
"monolithic_compute_loss",
"(",
"batch",
",",
"outputs",
",",
"attns",
")",
"# Update statistics.",
"stats",
".",
"update",
"(",
"batch_stats",
")",
"# Set model back to training mode.",
"self",
".",
"model",
".",
"train",
"(",
")",
"return",
"stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/trainer.py#L219-L252 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/trainer.py | python | Trainer._start_report_manager | (self, start_time=None) | Simple function to start report manager (if any) | Simple function to start report manager (if any) | [
"Simple",
"function",
"to",
"start",
"report",
"manager",
"(",
"if",
"any",
")"
] | def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time | [
"def",
"_start_report_manager",
"(",
"self",
",",
"start_time",
"=",
"None",
")",
":",
"if",
"self",
".",
"report_manager",
"is",
"not",
"None",
":",
"if",
"start_time",
"is",
"None",
":",
"self",
".",
"report_manager",
".",
"start",
"(",
")",
"else",
":",
"self",
".",
"report_manager",
".",
"start_time",
"=",
"start_time"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/trainer.py#L320-L328 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/trainer.py | python | Trainer._maybe_gather_stats | (self, stat) | return stat | Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object | Gather statistics in multi-processes cases | [
"Gather",
"statistics",
"in",
"multi",
"-",
"processes",
"cases"
] | def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return onmt.utils.Statistics.all_gather_stats(stat)
return stat | [
"def",
"_maybe_gather_stats",
"(",
"self",
",",
"stat",
")",
":",
"if",
"stat",
"is",
"not",
"None",
"and",
"self",
".",
"n_gpu",
">",
"1",
":",
"return",
"onmt",
".",
"utils",
".",
"Statistics",
".",
"all_gather_stats",
"(",
"stat",
")",
"return",
"stat"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/trainer.py#L330-L343 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/trainer.py | python | Trainer._maybe_report_training | (self, step, num_steps, learning_rate,
report_stats) | Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc | Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc | [
"Simple",
"function",
"to",
"report",
"training",
"stats",
"(",
"if",
"report_manager",
"is",
"set",
")",
"see",
"onmt",
".",
"utils",
".",
"ReportManagerBase",
".",
"report_training",
"for",
"doc"
] | def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1) | [
"def",
"_maybe_report_training",
"(",
"self",
",",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
")",
":",
"if",
"self",
".",
"report_manager",
"is",
"not",
"None",
":",
"return",
"self",
".",
"report_manager",
".",
"report_training",
"(",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
",",
"multigpu",
"=",
"self",
".",
"n_gpu",
">",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/trainer.py#L345-L354 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/trainer.py | python | Trainer._report_step | (self, learning_rate, step, train_stats=None,
valid_stats=None) | Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc | Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc | [
"Simple",
"function",
"to",
"report",
"stats",
"(",
"if",
"report_manager",
"is",
"set",
")",
"see",
"onmt",
".",
"utils",
".",
"ReportManagerBase",
".",
"report_step",
"for",
"doc"
] | def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats) | [
"def",
"_report_step",
"(",
"self",
",",
"learning_rate",
",",
"step",
",",
"train_stats",
"=",
"None",
",",
"valid_stats",
"=",
"None",
")",
":",
"if",
"self",
".",
"report_manager",
"is",
"not",
"None",
":",
"return",
"self",
".",
"report_manager",
".",
"report_step",
"(",
"learning_rate",
",",
"step",
",",
"train_stats",
"=",
"train_stats",
",",
"valid_stats",
"=",
"valid_stats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/trainer.py#L356-L365 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/trainer.py | python | Trainer._maybe_save | (self, step) | Save the model if a model saver is set | Save the model if a model saver is set | [
"Save",
"the",
"model",
"if",
"a",
"model",
"saver",
"is",
"set"
] | def _maybe_save(self, step):
"""
Save the model if a model saver is set
"""
if self.model_saver is not None:
self.model_saver.maybe_save(step) | [
"def",
"_maybe_save",
"(",
"self",
",",
"step",
")",
":",
"if",
"self",
".",
"model_saver",
"is",
"not",
"None",
":",
"self",
".",
"model_saver",
".",
"maybe_save",
"(",
"step",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/trainer.py#L367-L372 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/opts.py | python | model_opts | (parser) | These options are passed to the construction of the model.
Be careful with these as they will be used during translation. | These options are passed to the construction of the model.
Be careful with these as they will be used during translation. | [
"These",
"options",
"are",
"passed",
"to",
"the",
"construction",
"of",
"the",
"model",
".",
"Be",
"careful",
"with",
"these",
"as",
"they",
"will",
"be",
"used",
"during",
"translation",
"."
] | def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Embedding Options
group = parser.add_argument_group('Model-Embeddings')
group.add_argument('-src_word_vec_size', type=int, default=500,
help='Word embedding size for src.')
group.add_argument('-tgt_word_vec_size', type=int, default=500,
help='Word embedding size for tgt.')
group.add_argument('-word_vec_size', type=int, default=-1,
help='Word embedding size for src and tgt.')
group.add_argument('-share_decoder_embeddings', action='store_true',
help="""Use a shared weight matrix for the input and
output word embeddings in the decoder.""")
group.add_argument('-share_embeddings', action='store_true',
help="""Share the word embeddings between encoder
and decoder. Need to use shared dictionary for this
option.""")
group.add_argument('-position_encoding', action='store_true',
help="""Use a sin to mark relative words positions.
Necessary for non-RNN style models.
""")
group = parser.add_argument_group('Model-Embedding Features')
group.add_argument('-feat_merge', type=str, default='concat',
choices=['concat', 'sum', 'mlp'],
help="""Merge action for incorporating features embeddings.
Options [concat|sum|mlp].""")
group.add_argument('-feat_vec_size', type=int, default=-1,
help="""If specified, feature embedding sizes
will be set to this. Otherwise, feat_vec_exponent
will be used.""")
group.add_argument('-feat_vec_exponent', type=float, default=0.7,
help="""If -feat_merge_size is not set, feature
embedding sizes will be set to N^feat_vec_exponent
where N is the number of values the feature takes.""")
# Encoder-Decoder Options
group = parser.add_argument_group('Model- Encoder-Decoder')
group.add_argument('-model_type', default='text',
help="""Type of source model to use. Allows
the system to incorporate non-text inputs.
Options are [text|img|audio].""")
group.add_argument('-encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],
help="""Type of encoder layer to use. Non-RNN layers
are experimental. Options are
[rnn|brnn|mean|transformer|cnn].""")
group.add_argument('-decoder_type', type=str, default='rnn',
choices=['rnn', 'transformer', 'cnn'],
help="""Type of decoder layer to use. Non-RNN layers
are experimental. Options are
[rnn|transformer|cnn].""")
group.add_argument('-layers', type=int, default=-1,
help='Number of layers in enc/dec.')
group.add_argument('-enc_layers', type=int, default=2,
help='Number of layers in the encoder')
group.add_argument('-dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add_argument('-rnn_size', type=int, default=500,
help='Size of rnn hidden states')
group.add_argument('-cnn_kernel_width', type=int, default=3,
help="""Size of windows in the cnn, the kernel_size is
(cnn_kernel_width, 1) in conv layer""")
group.add_argument('-input_feed', type=int, default=1,
help="""Feed the context vector at each time step as
additional input (via concatenation with the word
embeddings) to the decoder.""")
group.add_argument('-bridge', action="store_true",
help="""Have an additional layer between the last encoder
state and the first decoder state""")
group.add_argument('-rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
action=CheckSRU,
help="""The gate type to use in the RNNs""")
# group.add_argument('-residual', action="store_true",
# help="Add residual connections between RNN layers.")
group.add_argument('-brnn', action=DeprecateAction,
help="Deprecated, use `encoder_type`.")
group.add_argument('-context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="""Type of context gate to use.
Do not select for no context gate.""")
# Attention options
group = parser.add_argument_group('Model- Attention')
group.add_argument('-global_attention', type=str, default='general',
choices=['dot', 'general', 'mlp'],
help="""The attention type to use:
dotprod or general (Luong) or MLP (Bahdanau)""")
group.add_argument('-global_attention_function', type=str,
default="softmax", choices=["softmax", "sparsemax"])
group.add_argument('-self_attn_type', type=str, default="scaled-dot",
help="""Self attention type in Transformer decoder
layer -- currently "scaled-dot" or "average" """)
group.add_argument('-heads', type=int, default=8,
help='Number of heads for transformer self-attention')
group.add_argument('-transformer_ff', type=int, default=2048,
help='Size of hidden transformer feed-forward')
# Generator and loss options.
group.add_argument('-copy_attn', action="store_true",
help='Train copy attention layer.')
group.add_argument('-generator_function', default="log_softmax",
choices=["log_softmax", "sparsemax"],
help="""Which function to use for generating
probabilities over the target vocabulary (choices:
log_softmax, sparsemax)""")
group.add_argument('-copy_attn_force', action="store_true",
help='When available, train to copy.')
group.add_argument('-reuse_copy_attn', action="store_true",
help="Reuse standard attention for copy")
group.add_argument('-copy_loss_by_seqlength', action="store_true",
help="Divide copy loss by length of sequence")
group.add_argument('-coverage_attn', action="store_true",
help='Train a coverage attention layer.')
group.add_argument('-lambda_coverage', type=float, default=1,
help='Lambda value for coverage.') | [
"def",
"model_opts",
"(",
"parser",
")",
":",
"# Embedding Options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model-Embeddings'",
")",
"group",
".",
"add_argument",
"(",
"'-src_word_vec_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"500",
",",
"help",
"=",
"'Word embedding size for src.'",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_word_vec_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"500",
",",
"help",
"=",
"'Word embedding size for tgt.'",
")",
"group",
".",
"add_argument",
"(",
"'-word_vec_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"'Word embedding size for src and tgt.'",
")",
"group",
".",
"add_argument",
"(",
"'-share_decoder_embeddings'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Use a shared weight matrix for the input and\n output word embeddings in the decoder.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-share_embeddings'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Share the word embeddings between encoder\n and decoder. Need to use shared dictionary for this\n option.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-position_encoding'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Use a sin to mark relative words positions.\n Necessary for non-RNN style models.\n \"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model-Embedding Features'",
")",
"group",
".",
"add_argument",
"(",
"'-feat_merge'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'concat'",
",",
"choices",
"=",
"[",
"'concat'",
",",
"'sum'",
",",
"'mlp'",
"]",
",",
"help",
"=",
"\"\"\"Merge action for incorporating features embeddings.\n Options [concat|sum|mlp].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-feat_vec_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"\"\"\"If specified, feature embedding sizes\n will be set to this. Otherwise, feat_vec_exponent\n will be used.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-feat_vec_exponent'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.7",
",",
"help",
"=",
"\"\"\"If -feat_merge_size is not set, feature\n embedding sizes will be set to N^feat_vec_exponent\n where N is the number of values the feature takes.\"\"\"",
")",
"# Encoder-Decoder Options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model- Encoder-Decoder'",
")",
"group",
".",
"add_argument",
"(",
"'-model_type'",
",",
"default",
"=",
"'text'",
",",
"help",
"=",
"\"\"\"Type of source model to use. Allows\n the system to incorporate non-text inputs.\n Options are [text|img|audio].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-encoder_type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'rnn'",
",",
"choices",
"=",
"[",
"'rnn'",
",",
"'brnn'",
",",
"'mean'",
",",
"'transformer'",
",",
"'cnn'",
"]",
",",
"help",
"=",
"\"\"\"Type of encoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|brnn|mean|transformer|cnn].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-decoder_type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'rnn'",
",",
"choices",
"=",
"[",
"'rnn'",
",",
"'transformer'",
",",
"'cnn'",
"]",
",",
"help",
"=",
"\"\"\"Type of decoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|transformer|cnn].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-layers'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"'Number of layers in enc/dec.'",
")",
"group",
".",
"add_argument",
"(",
"'-enc_layers'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"2",
",",
"help",
"=",
"'Number of layers in the encoder'",
")",
"group",
".",
"add_argument",
"(",
"'-dec_layers'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"2",
",",
"help",
"=",
"'Number of layers in the decoder'",
")",
"group",
".",
"add_argument",
"(",
"'-rnn_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"500",
",",
"help",
"=",
"'Size of rnn hidden states'",
")",
"group",
".",
"add_argument",
"(",
"'-cnn_kernel_width'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"help",
"=",
"\"\"\"Size of windows in the cnn, the kernel_size is\n (cnn_kernel_width, 1) in conv layer\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-input_feed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"\"\"Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-bridge'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"\"\"Have an additional layer between the last encoder\n state and the first decoder state\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-rnn_type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'LSTM'",
",",
"choices",
"=",
"[",
"'LSTM'",
",",
"'GRU'",
",",
"'SRU'",
"]",
",",
"action",
"=",
"CheckSRU",
",",
"help",
"=",
"\"\"\"The gate type to use in the RNNs\"\"\"",
")",
"# group.add_argument('-residual', action=\"store_true\",",
"# help=\"Add residual connections between RNN layers.\")",
"group",
".",
"add_argument",
"(",
"'-brnn'",
",",
"action",
"=",
"DeprecateAction",
",",
"help",
"=",
"\"Deprecated, use `encoder_type`.\"",
")",
"group",
".",
"add_argument",
"(",
"'-context_gate'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"choices",
"=",
"[",
"'source'",
",",
"'target'",
",",
"'both'",
"]",
",",
"help",
"=",
"\"\"\"Type of context gate to use.\n Do not select for no context gate.\"\"\"",
")",
"# Attention options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model- Attention'",
")",
"group",
".",
"add_argument",
"(",
"'-global_attention'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'general'",
",",
"choices",
"=",
"[",
"'dot'",
",",
"'general'",
",",
"'mlp'",
"]",
",",
"help",
"=",
"\"\"\"The attention type to use:\n dotprod or general (Luong) or MLP (Bahdanau)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-global_attention_function'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"softmax\"",
",",
"choices",
"=",
"[",
"\"softmax\"",
",",
"\"sparsemax\"",
"]",
")",
"group",
".",
"add_argument",
"(",
"'-self_attn_type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"scaled-dot\"",
",",
"help",
"=",
"\"\"\"Self attention type in Transformer decoder\n layer -- currently \"scaled-dot\" or \"average\" \"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-heads'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"8",
",",
"help",
"=",
"'Number of heads for transformer self-attention'",
")",
"group",
".",
"add_argument",
"(",
"'-transformer_ff'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"2048",
",",
"help",
"=",
"'Size of hidden transformer feed-forward'",
")",
"# Generator and loss options.",
"group",
".",
"add_argument",
"(",
"'-copy_attn'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'Train copy attention layer.'",
")",
"group",
".",
"add_argument",
"(",
"'-generator_function'",
",",
"default",
"=",
"\"log_softmax\"",
",",
"choices",
"=",
"[",
"\"log_softmax\"",
",",
"\"sparsemax\"",
"]",
",",
"help",
"=",
"\"\"\"Which function to use for generating\n probabilities over the target vocabulary (choices:\n log_softmax, sparsemax)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-copy_attn_force'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'When available, train to copy.'",
")",
"group",
".",
"add_argument",
"(",
"'-reuse_copy_attn'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Reuse standard attention for copy\"",
")",
"group",
".",
"add_argument",
"(",
"'-copy_loss_by_seqlength'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Divide copy loss by length of sequence\"",
")",
"group",
".",
"add_argument",
"(",
"'-coverage_attn'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'Train a coverage attention layer.'",
")",
"group",
".",
"add_argument",
"(",
"'-lambda_coverage'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"1",
",",
"help",
"=",
"'Lambda value for coverage.'",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/opts.py#L8-L134 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/opts.py | python | preprocess_opts | (parser) | Pre-procesing options | Pre-procesing options | [
"Pre",
"-",
"procesing",
"options"
] | def preprocess_opts(parser):
""" Pre-procesing options """
# Data options
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="""Type of the source input.
Options are [text|img].""")
group.add_argument('-train_src', required=True,
help="Path to the training source data")
group.add_argument('-train_tgt', required=True,
help="Path to the training target data")
group.add_argument('-valid_src', required=True,
help="Path to the validation source data")
group.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
group.add_argument('-src_dir', default="",
help="Source directory for image or audio files.")
group.add_argument('-save_data', required=True,
help="Output file for the prepared data")
group.add_argument('-max_shard_size', type=int, default=0,
help="""For text corpus of large volume, it will
be divided into shards of this size to preprocess.
If 0, the data will be handled as a whole. The unit
is in bytes. Optimal value should be multiples of
64 bytes. A commonly used sharding value is 131072000.
It is recommended to ensure the corpus is shuffled
before sharding.""")
group.add_argument('-shard_size', type=int, default=0,
help="""Divide src_corpus and tgt_corpus into
smaller multiple src_copus and tgt corpus files, then
build shards, each shard will have
opt.shard_size samples except last shard.
shard_size=0 means no segmentation
shard_size>0 means segment dataset into multiple shards,
each shard has shard_size samples""")
# Dictionary options, for text corpus
group = parser.add_argument_group('Vocab')
group.add_argument('-src_vocab', default="",
help="""Path to an existing source vocabulary. Format:
one word per line.""")
group.add_argument('-tgt_vocab', default="",
help="""Path to an existing target vocabulary. Format:
one word per line.""")
group.add_argument('-features_vocabs_prefix', type=str, default='',
help="Path prefix to existing features vocabularies")
group.add_argument('-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
group.add_argument('-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
group.add_argument('-src_words_min_frequency', type=int, default=0)
group.add_argument('-tgt_words_min_frequency', type=int, default=0)
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
# Truncation options, for text corpus
group = parser.add_argument_group('Pruning')
group.add_argument('-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
group.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
group.add_argument('-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
group.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
group.add_argument('-lower', action='store_true', help='lowercase data')
# Data processing options
group = parser.add_argument_group('Random')
group.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
group.add_argument('-seed', type=int, default=3435,
help="Random seed")
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
# Options most relevant to speech
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add_argument('-window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add_argument('-window', default='hamming',
help="Window type for spectrogram generation.")
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""") | [
"def",
"preprocess_opts",
"(",
"parser",
")",
":",
"# Data options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Data'",
")",
"group",
".",
"add_argument",
"(",
"'-data_type'",
",",
"default",
"=",
"\"text\"",
",",
"help",
"=",
"\"\"\"Type of the source input.\n Options are [text|img].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-train_src'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to the training source data\"",
")",
"group",
".",
"add_argument",
"(",
"'-train_tgt'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to the training target data\"",
")",
"group",
".",
"add_argument",
"(",
"'-valid_src'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to the validation source data\"",
")",
"group",
".",
"add_argument",
"(",
"'-valid_tgt'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to the validation target data\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_dir'",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Source directory for image or audio files.\"",
")",
"group",
".",
"add_argument",
"(",
"'-save_data'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Output file for the prepared data\"",
")",
"group",
".",
"add_argument",
"(",
"'-max_shard_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"\"\"For text corpus of large volume, it will\n be divided into shards of this size to preprocess.\n If 0, the data will be handled as a whole. The unit\n is in bytes. Optimal value should be multiples of\n 64 bytes. A commonly used sharding value is 131072000.\n It is recommended to ensure the corpus is shuffled\n before sharding.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-shard_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"\"\"Divide src_corpus and tgt_corpus into\n smaller multiple src_copus and tgt corpus files, then\n build shards, each shard will have\n opt.shard_size samples except last shard.\n shard_size=0 means no segmentation\n shard_size>0 means segment dataset into multiple shards,\n each shard has shard_size samples\"\"\"",
")",
"# Dictionary options, for text corpus",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Vocab'",
")",
"group",
".",
"add_argument",
"(",
"'-src_vocab'",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"\"\"Path to an existing source vocabulary. Format:\n one word per line.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_vocab'",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"\"\"Path to an existing target vocabulary. Format:\n one word per line.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-features_vocabs_prefix'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"\"Path prefix to existing features vocabularies\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_vocab_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50000",
",",
"help",
"=",
"\"Size of the source vocabulary\"",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_vocab_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50000",
",",
"help",
"=",
"\"Size of the target vocabulary\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_words_min_frequency'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_words_min_frequency'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
")",
"group",
".",
"add_argument",
"(",
"'-dynamic_dict'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Create dynamic dictionaries\"",
")",
"group",
".",
"add_argument",
"(",
"'-share_vocab'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Share source and target vocabulary\"",
")",
"# Truncation options, for text corpus",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Pruning'",
")",
"group",
".",
"add_argument",
"(",
"'-src_seq_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50",
",",
"help",
"=",
"\"Maximum source sequence length\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_seq_length_trunc'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"Truncate source sequence length.\"",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_seq_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50",
",",
"help",
"=",
"\"Maximum target sequence length to keep.\"",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_seq_length_trunc'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"Truncate target sequence length.\"",
")",
"group",
".",
"add_argument",
"(",
"'-lower'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'lowercase data'",
")",
"# Data processing options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Random'",
")",
"group",
".",
"add_argument",
"(",
"'-shuffle'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"Shuffle data\"",
")",
"group",
".",
"add_argument",
"(",
"'-seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3435",
",",
"help",
"=",
"\"Random seed\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Logging'",
")",
"group",
".",
"add_argument",
"(",
"'-report_every'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100000",
",",
"help",
"=",
"\"Report status every this many sentences\"",
")",
"group",
".",
"add_argument",
"(",
"'-log_file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Output logs to a file under this path.\"",
")",
"# Options most relevant to speech",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Speech'",
")",
"group",
".",
"add_argument",
"(",
"'-sample_rate'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"16000",
",",
"help",
"=",
"\"Sample rate.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window_size'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".02",
",",
"help",
"=",
"\"Window size for spectrogram in seconds.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window_stride'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".01",
",",
"help",
"=",
"\"Window stride for spectrogram in seconds.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window'",
",",
"default",
"=",
"'hamming'",
",",
"help",
"=",
"\"Window type for spectrogram generation.\"",
")",
"# Option most relevant to image input",
"group",
".",
"add_argument",
"(",
"'-image_channel_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"choices",
"=",
"[",
"3",
",",
"1",
"]",
",",
"help",
"=",
"\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/opts.py#L137-L242 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/opts.py | python | train_opts | (parser) | Training and saving options | Training and saving options | [
"Training",
"and",
"saving",
"options"
] | def train_opts(parser):
""" Training and saving options """
group = parser.add_argument_group('General')
group.add_argument('-data', required=True,
help="""Path prefix to the ".train.pt" and
".valid.pt" file path from preprocess.py""")
group.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_N.pt where N is the number
of steps""")
group.add_argument('-save_checkpoint_steps', type=int, default=5000,
help="""Save a checkpoint every X steps""")
group.add_argument('-keep_checkpoint', type=int, default=-1,
help="""Keep X checkpoints (negative: keep all)""")
# GPU
group.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Deprecated see world_size and gpu_ranks.")
group.add_argument('-gpu_ranks', default=[], nargs='+', type=int,
help="list of ranks of each process.")
group.add_argument('-world_size', default=1, type=int,
help="total number of distributed processes.")
group.add_argument('-gpu_backend', default='nccl', nargs='+', type=str,
help="Type of torch distributed backend")
group.add_argument('-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add_argument('-master_ip', default="localhost", type=str,
help="IP of master for torch.distributed training.")
group.add_argument('-master_port', default=10000, type=int,
help="Port of master for torch.distributed training.")
group.add_argument('-seed', type=int, default=-1,
help="""Random seed used for the experiments
reproducibility.""")
# Init options
group = parser.add_argument_group('Initialization')
group.add_argument('-param_init', type=float, default=0.1,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init).
Use 0 to not use initialization""")
group.add_argument('-param_init_glorot', action='store_true',
help="""Init parameters with xavier_uniform.
Required for transfomer.""")
group.add_argument('-train_from', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model's state_dict.""")
# Pretrained word vectors
group.add_argument('-pre_word_vecs_enc',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the encoder side.
See README for specific formatting instructions.""")
group.add_argument('-pre_word_vecs_dec',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the decoder side.
See README for specific formatting instructions.""")
# Fixed word vectors
group.add_argument('-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
group.add_argument('-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the decoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add_argument('-batch_size', type=int, default=64,
help='Maximum batch size for training')
group.add_argument('-batch_type', default='sents',
choices=["sents", "tokens"],
help="""Batch grouping for batch_size. Standard
is sents. Tokens will do dynamic batching""")
group.add_argument('-normalization', default='sents',
choices=["sents", "tokens"],
help='Normalization method of the gradient.')
group.add_argument('-accum_count', type=int, default=1,
help="""Accumulate gradient this many times.
Approximately equivalent to updating
batch_size * accum_count batches at once.
Recommended for Transformer.""")
group.add_argument('-valid_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add_argument('-valid_batch_size', type=int, default=32,
help='Maximum batch size for validation')
group.add_argument('-max_generator_batches', type=int, default=32,
help="""Maximum batches of words in a sequence to run
the generator on in parallel. Higher is faster, but
uses more memory.""")
group.add_argument('-train_steps', type=int, default=100000,
help='Number of training steps')
group.add_argument('-epochs', type=int, default=0,
help='Deprecated epochs see train_steps')
group.add_argument('-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam',
'sparseadam'],
help="""Optimization method.""")
group.add_argument('-adagrad_accumulator_init', type=float, default=0,
help="""Initializes the accumulator values in adagrad.
Mirrors the initial_accumulator_value option
in the tensorflow adagrad (use 0.1 for their default).
""")
group.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to
max_grad_norm""")
group.add_argument('-dropout', type=float, default=0.3,
help="Dropout probability; applied in LSTM stacks.")
group.add_argument('-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add_argument('-adam_beta1', type=float, default=0.9,
help="""The beta1 parameter used by Adam.
Almost without exception a value of 0.9 is used in
the literature, seemingly giving good results,
so we would discourage changing this value from
the default without due consideration.""")
group.add_argument('-adam_beta2', type=float, default=0.999,
help="""The beta2 parameter used by Adam.
Typically a value of 0.999 is recommended, as this is
the value suggested by the original paper describing
Adam, and is also the value adopted in other frameworks
such as Tensorflow and Kerras, i.e. see:
https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/ .
Whereas recently the paper "Attention is All You Need"
suggested a value of 0.98 for beta2, this parameter may
not work well for normal models / default
baselines.""")
group.add_argument('-label_smoothing', type=float, default=0.0,
help="""Label smoothing value epsilon.
Probabilities of all non-true labels
will be smoothed by epsilon / (vocab_size - 1).
Set to zero to turn off label smoothing.
For more detailed information, see:
https://arxiv.org/abs/1512.00567""")
# learning rate
group = parser.add_argument_group('Optimization- Rate')
group.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate.
Recommended settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
group.add_argument('-learning_rate_decay', type=float, default=0.5,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) steps have gone past
start_decay_steps""")
group.add_argument('-start_decay_steps', type=int, default=50000,
help="""Start decaying every decay_steps after
start_decay_steps""")
group.add_argument('-decay_steps', type=int, default=10000,
help="""Decay every decay_steps""")
group.add_argument('-decay_method', type=str, default="",
choices=['noam'], help="Use a custom decay rate.")
group.add_argument('-warmup_steps', type=int, default=4000,
help="""Number of warmup steps for custom decay.""")
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=50,
help="Print stats at this interval.")
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add_argument('-exp_host', type=str, default="",
help="Send logs to this crayon server.")
group.add_argument('-exp', type=str, default="",
help="Name of the experiment for logging.")
# Use TensorboardX for visualization during training
group.add_argument('-tensorboard', action="store_true",
help="""Use tensorboardX for visualization during training.
Must have the library tensorboardX.""")
group.add_argument("-tensorboard_log_dir", type=str,
default="runs/onmt",
help="""Log directory for Tensorboard.
This is also the name of the run.
""")
group = parser.add_argument_group('Speech')
# Options most relevant to speech
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""") | [
"def",
"train_opts",
"(",
"parser",
")",
":",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'General'",
")",
"group",
".",
"add_argument",
"(",
"'-data'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"\"\"Path prefix to the \".train.pt\" and\n \".valid.pt\" file path from preprocess.py\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-save_model'",
",",
"default",
"=",
"'model'",
",",
"help",
"=",
"\"\"\"Model filename (the model will be saved as\n <save_model>_N.pt where N is the number\n of steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-save_checkpoint_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"5000",
",",
"help",
"=",
"\"\"\"Save a checkpoint every X steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-keep_checkpoint'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"\"\"\"Keep X checkpoints (negative: keep all)\"\"\"",
")",
"# GPU",
"group",
".",
"add_argument",
"(",
"'-gpuid'",
",",
"default",
"=",
"[",
"]",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Deprecated see world_size and gpu_ranks.\"",
")",
"group",
".",
"add_argument",
"(",
"'-gpu_ranks'",
",",
"default",
"=",
"[",
"]",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"list of ranks of each process.\"",
")",
"group",
".",
"add_argument",
"(",
"'-world_size'",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"total number of distributed processes.\"",
")",
"group",
".",
"add_argument",
"(",
"'-gpu_backend'",
",",
"default",
"=",
"'nccl'",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Type of torch distributed backend\"",
")",
"group",
".",
"add_argument",
"(",
"'-gpu_verbose_level'",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Gives more info on each process per GPU.\"",
")",
"group",
".",
"add_argument",
"(",
"'-master_ip'",
",",
"default",
"=",
"\"localhost\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"IP of master for torch.distributed training.\"",
")",
"group",
".",
"add_argument",
"(",
"'-master_port'",
",",
"default",
"=",
"10000",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Port of master for torch.distributed training.\"",
")",
"group",
".",
"add_argument",
"(",
"'-seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"\"\"\"Random seed used for the experiments\n reproducibility.\"\"\"",
")",
"# Init options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Initialization'",
")",
"group",
".",
"add_argument",
"(",
"'-param_init'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.1",
",",
"help",
"=",
"\"\"\"Parameters are initialized over uniform distribution\n with support (-param_init, param_init).\n Use 0 to not use initialization\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-param_init_glorot'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Init parameters with xavier_uniform.\n Required for transfomer.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-train_from'",
",",
"default",
"=",
"''",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"\"\"If training from a checkpoint then this is the\n path to the pretrained model's state_dict.\"\"\"",
")",
"# Pretrained word vectors",
"group",
".",
"add_argument",
"(",
"'-pre_word_vecs_enc'",
",",
"help",
"=",
"\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-pre_word_vecs_dec'",
",",
"help",
"=",
"\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.\"\"\"",
")",
"# Fixed word vectors",
"group",
".",
"add_argument",
"(",
"'-fix_word_vecs_enc'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Fix word embeddings on the encoder side.\"",
")",
"group",
".",
"add_argument",
"(",
"'-fix_word_vecs_dec'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Fix word embeddings on the decoder side.\"",
")",
"# Optimization options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Optimization- Type'",
")",
"group",
".",
"add_argument",
"(",
"'-batch_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"64",
",",
"help",
"=",
"'Maximum batch size for training'",
")",
"group",
".",
"add_argument",
"(",
"'-batch_type'",
",",
"default",
"=",
"'sents'",
",",
"choices",
"=",
"[",
"\"sents\"",
",",
"\"tokens\"",
"]",
",",
"help",
"=",
"\"\"\"Batch grouping for batch_size. Standard\n is sents. Tokens will do dynamic batching\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-normalization'",
",",
"default",
"=",
"'sents'",
",",
"choices",
"=",
"[",
"\"sents\"",
",",
"\"tokens\"",
"]",
",",
"help",
"=",
"'Normalization method of the gradient.'",
")",
"group",
".",
"add_argument",
"(",
"'-accum_count'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"\"\"Accumulate gradient this many times.\n Approximately equivalent to updating\n batch_size * accum_count batches at once.\n Recommended for Transformer.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-valid_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10000",
",",
"help",
"=",
"'Perfom validation every X steps'",
")",
"group",
".",
"add_argument",
"(",
"'-valid_batch_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"32",
",",
"help",
"=",
"'Maximum batch size for validation'",
")",
"group",
".",
"add_argument",
"(",
"'-max_generator_batches'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"32",
",",
"help",
"=",
"\"\"\"Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but\n uses more memory.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-train_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100000",
",",
"help",
"=",
"'Number of training steps'",
")",
"group",
".",
"add_argument",
"(",
"'-epochs'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'Deprecated epochs see train_steps'",
")",
"group",
".",
"add_argument",
"(",
"'-optim'",
",",
"default",
"=",
"'sgd'",
",",
"choices",
"=",
"[",
"'sgd'",
",",
"'adagrad'",
",",
"'adadelta'",
",",
"'adam'",
",",
"'sparseadam'",
"]",
",",
"help",
"=",
"\"\"\"Optimization method.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-adagrad_accumulator_init'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"\"\"Initializes the accumulator values in adagrad.\n Mirrors the initial_accumulator_value option\n in the tensorflow adagrad (use 0.1 for their default).\n \"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-max_grad_norm'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"5",
",",
"help",
"=",
"\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to\n max_grad_norm\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-dropout'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.3",
",",
"help",
"=",
"\"Dropout probability; applied in LSTM stacks.\"",
")",
"group",
".",
"add_argument",
"(",
"'-truncated_decoder'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"\"\"Truncated bptt.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-adam_beta1'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.9",
",",
"help",
"=",
"\"\"\"The beta1 parameter used by Adam.\n Almost without exception a value of 0.9 is used in\n the literature, seemingly giving good results,\n so we would discourage changing this value from\n the default without due consideration.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-adam_beta2'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.999",
",",
"help",
"=",
"\"\"\"The beta2 parameter used by Adam.\n Typically a value of 0.999 is recommended, as this is\n the value suggested by the original paper describing\n Adam, and is also the value adopted in other frameworks\n such as Tensorflow and Kerras, i.e. see:\n https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer\n https://keras.io/optimizers/ .\n Whereas recently the paper \"Attention is All You Need\"\n suggested a value of 0.98 for beta2, this parameter may\n not work well for normal models / default\n baselines.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-label_smoothing'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.0",
",",
"help",
"=",
"\"\"\"Label smoothing value epsilon.\n Probabilities of all non-true labels\n will be smoothed by epsilon / (vocab_size - 1).\n Set to zero to turn off label smoothing.\n For more detailed information, see:\n https://arxiv.org/abs/1512.00567\"\"\"",
")",
"# learning rate",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Optimization- Rate'",
")",
"group",
".",
"add_argument",
"(",
"'-learning_rate'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"1.0",
",",
"help",
"=",
"\"\"\"Starting learning rate.\n Recommended settings: sgd = 1, adagrad = 0.1,\n adadelta = 1, adam = 0.001\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-learning_rate_decay'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.5",
",",
"help",
"=",
"\"\"\"If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) steps have gone past\n start_decay_steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-start_decay_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50000",
",",
"help",
"=",
"\"\"\"Start decaying every decay_steps after\n start_decay_steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-decay_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10000",
",",
"help",
"=",
"\"\"\"Decay every decay_steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-decay_method'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"choices",
"=",
"[",
"'noam'",
"]",
",",
"help",
"=",
"\"Use a custom decay rate.\"",
")",
"group",
".",
"add_argument",
"(",
"'-warmup_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"4000",
",",
"help",
"=",
"\"\"\"Number of warmup steps for custom decay.\"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Logging'",
")",
"group",
".",
"add_argument",
"(",
"'-report_every'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50",
",",
"help",
"=",
"\"Print stats at this interval.\"",
")",
"group",
".",
"add_argument",
"(",
"'-log_file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Output logs to a file under this path.\"",
")",
"group",
".",
"add_argument",
"(",
"'-exp_host'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Send logs to this crayon server.\"",
")",
"group",
".",
"add_argument",
"(",
"'-exp'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Name of the experiment for logging.\"",
")",
"# Use TensorboardX for visualization during training",
"group",
".",
"add_argument",
"(",
"'-tensorboard'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"\"\"Use tensorboardX for visualization during training.\n Must have the library tensorboardX.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"\"-tensorboard_log_dir\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"runs/onmt\"",
",",
"help",
"=",
"\"\"\"Log directory for Tensorboard.\n This is also the name of the run.\n \"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Speech'",
")",
"# Options most relevant to speech",
"group",
".",
"add_argument",
"(",
"'-sample_rate'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"16000",
",",
"help",
"=",
"\"Sample rate.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window_size'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".02",
",",
"help",
"=",
"\"Window size for spectrogram in seconds.\"",
")",
"# Option most relevant to image input",
"group",
".",
"add_argument",
"(",
"'-image_channel_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"choices",
"=",
"[",
"3",
",",
"1",
"]",
",",
"help",
"=",
"\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/opts.py#L245-L436 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/opts.py | python | translate_opts | (parser) | Translation / inference options | Translation / inference options | [
"Translation",
"/",
"inference",
"options"
] | def translate_opts(parser):
""" Translation / inference options """
group = parser.add_argument_group('Model')
group.add_argument('-model', dest='models', metavar='MODEL',
nargs='+', type=str, default=[], required=True,
help='Path to model .pt file(s). '
'Multiple models can be specified, '
'for ensemble decoding.')
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="Type of the source input. Options: [text|img].")
group.add_argument('-src', required=True,
help="""Source sequence to decode (one line per
sequence)""")
group.add_argument('-src_dir', default="",
help='Source directory for image or audio files')
group.add_argument('-tgt',
help='True target sequence (optional)')
group.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
group.add_argument('-report_bleu', action='store_true',
help="""Report bleu score after translation,
call tools/multi-bleu.perl on command line""")
group.add_argument('-report_rouge', action='store_true',
help="""Report rouge 1/2/3/L/SU4 score after translation
call tools/test_rouge.py on command line""")
# Options most relevant to summarization.
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
group = parser.add_argument_group('Beam')
group.add_argument('-fast', action="store_true",
help="""Use fast beam search (some features may not be
supported!)""")
group.add_argument('-beam_size', type=int, default=5,
help='Beam size')
group.add_argument('-min_length', type=int, default=0,
help='Minimum prediction length')
group.add_argument('-max_length', type=int, default=100,
help='Maximum prediction length.')
group.add_argument('-max_sent_length', action=DeprecateAction,
help="Deprecated, use `-max_length` instead")
# Alpha and Beta values for Google Length + Coverage penalty
# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7
group.add_argument('-stepwise_penalty', action='store_true',
help="""Apply penalty at every decoding step.
Helpful for summary penalty.""")
group.add_argument('-length_penalty', default='none',
choices=['none', 'wu', 'avg'],
help="""Length Penalty to use.""")
group.add_argument('-coverage_penalty', default='none',
choices=['none', 'wu', 'summary'],
help="""Coverage Penalty to use.""")
group.add_argument('-alpha', type=float, default=0.,
help="""Google NMT length penalty parameter
(higher = longer generation)""")
group.add_argument('-beta', type=float, default=-0.,
help="""Coverage penalty parameter""")
group.add_argument('-block_ngram_repeat', type=int, default=0,
help='Block repetition of ngrams during decoding.')
group.add_argument('-ignore_when_blocking', nargs='+', type=str,
default=[],
help="""Ignore these strings when blocking repeats.
You want to block sentence delimiters.""")
group.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the
source token that had highest attention weight. If
phrase_table is provided, it will lookup the
identified source token and give the corresponding
target token. If it is not provided(or the identified
source token does not exist in the table) then it
will copy the source token""")
group = parser.add_argument_group('Logging')
group.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add_argument('-attn_debug', action="store_true",
help='Print best attn for each word')
group.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
group.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
group = parser.add_argument_group('Efficiency')
group.add_argument('-batch_size', type=int, default=30,
help='Batch size')
group.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
# Options most relevant to speech.
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help='Window size for spectrogram in seconds')
group.add_argument('-window_stride', type=float, default=.01,
help='Window stride for spectrogram in seconds')
group.add_argument('-window', default='hamming',
help='Window type for spectrogram generation')
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""") | [
"def",
"translate_opts",
"(",
"parser",
")",
":",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model'",
")",
"group",
".",
"add_argument",
"(",
"'-model'",
",",
"dest",
"=",
"'models'",
",",
"metavar",
"=",
"'MODEL'",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"[",
"]",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Path to model .pt file(s). '",
"'Multiple models can be specified, '",
"'for ensemble decoding.'",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Data'",
")",
"group",
".",
"add_argument",
"(",
"'-data_type'",
",",
"default",
"=",
"\"text\"",
",",
"help",
"=",
"\"Type of the source input. Options: [text|img].\"",
")",
"group",
".",
"add_argument",
"(",
"'-src'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"\"\"Source sequence to decode (one line per\n sequence)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_dir'",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"'Source directory for image or audio files'",
")",
"group",
".",
"add_argument",
"(",
"'-tgt'",
",",
"help",
"=",
"'True target sequence (optional)'",
")",
"group",
".",
"add_argument",
"(",
"'-output'",
",",
"default",
"=",
"'pred.txt'",
",",
"help",
"=",
"\"\"\"Path to output the predictions (each line will\n be the decoded sequence\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-report_bleu'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Report bleu score after translation,\n call tools/multi-bleu.perl on command line\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-report_rouge'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Report rouge 1/2/3/L/SU4 score after translation\n call tools/test_rouge.py on command line\"\"\"",
")",
"# Options most relevant to summarization.",
"group",
".",
"add_argument",
"(",
"'-dynamic_dict'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Create dynamic dictionaries\"",
")",
"group",
".",
"add_argument",
"(",
"'-share_vocab'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Share source and target vocabulary\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Beam'",
")",
"group",
".",
"add_argument",
"(",
"'-fast'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"\"\"Use fast beam search (some features may not be\n supported!)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-beam_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"5",
",",
"help",
"=",
"'Beam size'",
")",
"group",
".",
"add_argument",
"(",
"'-min_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'Minimum prediction length'",
")",
"group",
".",
"add_argument",
"(",
"'-max_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100",
",",
"help",
"=",
"'Maximum prediction length.'",
")",
"group",
".",
"add_argument",
"(",
"'-max_sent_length'",
",",
"action",
"=",
"DeprecateAction",
",",
"help",
"=",
"\"Deprecated, use `-max_length` instead\"",
")",
"# Alpha and Beta values for Google Length + Coverage penalty",
"# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7",
"group",
".",
"add_argument",
"(",
"'-stepwise_penalty'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Apply penalty at every decoding step.\n Helpful for summary penalty.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-length_penalty'",
",",
"default",
"=",
"'none'",
",",
"choices",
"=",
"[",
"'none'",
",",
"'wu'",
",",
"'avg'",
"]",
",",
"help",
"=",
"\"\"\"Length Penalty to use.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-coverage_penalty'",
",",
"default",
"=",
"'none'",
",",
"choices",
"=",
"[",
"'none'",
",",
"'wu'",
",",
"'summary'",
"]",
",",
"help",
"=",
"\"\"\"Coverage Penalty to use.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-alpha'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.",
",",
"help",
"=",
"\"\"\"Google NMT length penalty parameter\n (higher = longer generation)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-beta'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"-",
"0.",
",",
"help",
"=",
"\"\"\"Coverage penalty parameter\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-block_ngram_repeat'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'Block repetition of ngrams during decoding.'",
")",
"group",
".",
"add_argument",
"(",
"'-ignore_when_blocking'",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"\"\"\"Ignore these strings when blocking repeats.\n You want to block sentence delimiters.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-replace_unk'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"\"\"Replace the generated UNK tokens with the\n source token that had highest attention weight. If\n phrase_table is provided, it will lookup the\n identified source token and give the corresponding\n target token. If it is not provided(or the identified\n source token does not exist in the table) then it\n will copy the source token\"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Logging'",
")",
"group",
".",
"add_argument",
"(",
"'-verbose'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'Print scores and predictions for each sentence'",
")",
"group",
".",
"add_argument",
"(",
"'-log_file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Output logs to a file under this path.\"",
")",
"group",
".",
"add_argument",
"(",
"'-attn_debug'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'Print best attn for each word'",
")",
"group",
".",
"add_argument",
"(",
"'-dump_beam'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"'File to dump beam information to.'",
")",
"group",
".",
"add_argument",
"(",
"'-n_best'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"\"\"If verbose is set, will output the n_best\n decoded sentences\"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Efficiency'",
")",
"group",
".",
"add_argument",
"(",
"'-batch_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"30",
",",
"help",
"=",
"'Batch size'",
")",
"group",
".",
"add_argument",
"(",
"'-gpu'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"\"Device to run on\"",
")",
"# Options most relevant to speech.",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Speech'",
")",
"group",
".",
"add_argument",
"(",
"'-sample_rate'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"16000",
",",
"help",
"=",
"\"Sample rate.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window_size'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".02",
",",
"help",
"=",
"'Window size for spectrogram in seconds'",
")",
"group",
".",
"add_argument",
"(",
"'-window_stride'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".01",
",",
"help",
"=",
"'Window stride for spectrogram in seconds'",
")",
"group",
".",
"add_argument",
"(",
"'-window'",
",",
"default",
"=",
"'hamming'",
",",
"help",
"=",
"'Window type for spectrogram generation'",
")",
"# Option most relevant to image input",
"group",
".",
"add_argument",
"(",
"'-image_channel_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"choices",
"=",
"[",
"3",
",",
"1",
"]",
",",
"help",
"=",
"\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/opts.py#L439-L553 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/opts.py | python | add_md_help_argument | (parser) | md help parser | md help parser | [
"md",
"help",
"parser"
] | def add_md_help_argument(parser):
""" md help parser """
parser.add_argument('-md', action=MarkdownHelpAction,
help='print Markdown-formatted help text and exit.') | [
"def",
"add_md_help_argument",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'-md'",
",",
"action",
"=",
"MarkdownHelpAction",
",",
"help",
"=",
"'print Markdown-formatted help text and exit.'",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/opts.py#L556-L559 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/audio_encoder.py | python | AudioEncoder.load_pretrained_vectors | (self, opt) | Pass in needed options only when modify function definition. | Pass in needed options only when modify function definition. | [
"Pass",
"in",
"needed",
"options",
"only",
"when",
"modify",
"function",
"definition",
"."
] | def load_pretrained_vectors(self, opt):
""" Pass in needed options only when modify function definition."""
pass | [
"def",
"load_pretrained_vectors",
"(",
"self",
",",
"opt",
")",
":",
"pass"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/audio_encoder.py#L45-L47 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/audio_encoder.py | python | AudioEncoder.forward | (self, src, lengths=None) | return hidden, output | See :obj:`onmt.encoders.encoder.EncoderBase.forward()` | See :obj:`onmt.encoders.encoder.EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"encoders",
".",
"encoder",
".",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`"
# (batch_size, 1, nfft, t)
# layer 1
src = self.batch_norm1(self.layer1(src[:, :, :, :]))
# (batch_size, 32, nfft/2, t/2)
src = F.hardtanh(src, 0, 20, inplace=True)
# (batch_size, 32, nfft/2/2, t/2)
# layer 2
src = self.batch_norm2(self.layer2(src))
# (batch_size, 32, nfft/2/2, t/2)
src = F.hardtanh(src, 0, 20, inplace=True)
batch_size = src.size(0)
length = src.size(3)
src = src.view(batch_size, -1, length)
src = src.transpose(0, 2).transpose(1, 2)
output, hidden = self.rnn(src)
return hidden, output | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"# (batch_size, 1, nfft, t)",
"# layer 1",
"src",
"=",
"self",
".",
"batch_norm1",
"(",
"self",
".",
"layer1",
"(",
"src",
"[",
":",
",",
":",
",",
":",
",",
":",
"]",
")",
")",
"# (batch_size, 32, nfft/2, t/2)",
"src",
"=",
"F",
".",
"hardtanh",
"(",
"src",
",",
"0",
",",
"20",
",",
"inplace",
"=",
"True",
")",
"# (batch_size, 32, nfft/2/2, t/2)",
"# layer 2",
"src",
"=",
"self",
".",
"batch_norm2",
"(",
"self",
".",
"layer2",
"(",
"src",
")",
")",
"# (batch_size, 32, nfft/2/2, t/2)",
"src",
"=",
"F",
".",
"hardtanh",
"(",
"src",
",",
"0",
",",
"20",
",",
"inplace",
"=",
"True",
")",
"batch_size",
"=",
"src",
".",
"size",
"(",
"0",
")",
"length",
"=",
"src",
".",
"size",
"(",
"3",
")",
"src",
"=",
"src",
".",
"view",
"(",
"batch_size",
",",
"-",
"1",
",",
"length",
")",
"src",
"=",
"src",
".",
"transpose",
"(",
"0",
",",
"2",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"output",
",",
"hidden",
"=",
"self",
".",
"rnn",
"(",
"src",
")",
"return",
"hidden",
",",
"output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/audio_encoder.py#L49-L72 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/mean_encoder.py | python | MeanEncoder.forward | (self, src, lengths=None) | return encoder_final, memory_bank | See :obj:`EncoderBase.forward()` | See :obj:`EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(src, lengths)
emb = self.embeddings(src)
_, batch, emb_dim = emb.size()
mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)
memory_bank = emb
encoder_final = (mean, mean)
return encoder_final, memory_bank | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"src",
",",
"lengths",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"src",
")",
"_",
",",
"batch",
",",
"emb_dim",
"=",
"emb",
".",
"size",
"(",
")",
"mean",
"=",
"emb",
".",
"mean",
"(",
"0",
")",
".",
"expand",
"(",
"self",
".",
"num_layers",
",",
"batch",
",",
"emb_dim",
")",
"memory_bank",
"=",
"emb",
"encoder_final",
"=",
"(",
"mean",
",",
"mean",
")",
"return",
"encoder_final",
",",
"memory_bank"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/mean_encoder.py#L20-L29 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/transformer.py | python | TransformerEncoderLayer.forward | (self, inputs, mask) | return self.feed_forward(out) | Transformer Encoder Layer definition.
Args:
inputs (`FloatTensor`): `[batch_size x src_len x model_dim]`
mask (`LongTensor`): `[batch_size x src_len x src_len]`
Returns:
(`FloatTensor`):
* outputs `[batch_size x src_len x model_dim]` | Transformer Encoder Layer definition. | [
"Transformer",
"Encoder",
"Layer",
"definition",
"."
] | def forward(self, inputs, mask):
"""
Transformer Encoder Layer definition.
Args:
inputs (`FloatTensor`): `[batch_size x src_len x model_dim]`
mask (`LongTensor`): `[batch_size x src_len x src_len]`
Returns:
(`FloatTensor`):
* outputs `[batch_size x src_len x model_dim]`
"""
input_norm = self.layer_norm(inputs)
context, _ = self.self_attn(input_norm, input_norm, input_norm,
mask=mask)
out = self.dropout(context) + inputs
return self.feed_forward(out) | [
"def",
"forward",
"(",
"self",
",",
"inputs",
",",
"mask",
")",
":",
"input_norm",
"=",
"self",
".",
"layer_norm",
"(",
"inputs",
")",
"context",
",",
"_",
"=",
"self",
".",
"self_attn",
"(",
"input_norm",
",",
"input_norm",
",",
"input_norm",
",",
"mask",
"=",
"mask",
")",
"out",
"=",
"self",
".",
"dropout",
"(",
"context",
")",
"+",
"inputs",
"return",
"self",
".",
"feed_forward",
"(",
"out",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/transformer.py#L35-L52 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/transformer.py | python | TransformerEncoder.forward | (self, src, lengths=None) | return emb, out.transpose(0, 1).contiguous() | See :obj:`EncoderBase.forward()` | See :obj:`EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
""" See :obj:`EncoderBase.forward()`"""
self._check_args(src, lengths)
emb = self.embeddings(src)
out = emb.transpose(0, 1).contiguous()
words = src[:, :, 0].transpose(0, 1)
w_batch, w_len = words.size()
padding_idx = self.embeddings.word_padding_idx
mask = words.data.eq(padding_idx).unsqueeze(1) \
.expand(w_batch, w_len, w_len)
# Run the forward pass of every layer of the tranformer.
for i in range(self.num_layers):
out = self.transformer[i](out, mask)
out = self.layer_norm(out)
return emb, out.transpose(0, 1).contiguous() | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"src",
",",
"lengths",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"src",
")",
"out",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"words",
"=",
"src",
"[",
":",
",",
":",
",",
"0",
"]",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"w_batch",
",",
"w_len",
"=",
"words",
".",
"size",
"(",
")",
"padding_idx",
"=",
"self",
".",
"embeddings",
".",
"word_padding_idx",
"mask",
"=",
"words",
".",
"data",
".",
"eq",
"(",
"padding_idx",
")",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand",
"(",
"w_batch",
",",
"w_len",
",",
"w_len",
")",
"# Run the forward pass of every layer of the tranformer.",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_layers",
")",
":",
"out",
"=",
"self",
".",
"transformer",
"[",
"i",
"]",
"(",
"out",
",",
"mask",
")",
"out",
"=",
"self",
".",
"layer_norm",
"(",
"out",
")",
"return",
"emb",
",",
"out",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/transformer.py#L98-L115 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/encoder.py | python | EncoderBase.forward | (self, src, lengths=None) | Args:
src (:obj:`LongTensor`):
padded sequences of sparse indices `[src_len x batch x nfeat]`
lengths (:obj:`LongTensor`): length of each sequence `[batch]`
Returns:
(tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):
* final encoder state, used to initialize decoder
* memory bank for attention, `[src_len x batch x hidden]` | Args:
src (:obj:`LongTensor`):
padded sequences of sparse indices `[src_len x batch x nfeat]`
lengths (:obj:`LongTensor`): length of each sequence `[batch]` | [
"Args",
":",
"src",
"(",
":",
"obj",
":",
"LongTensor",
")",
":",
"padded",
"sequences",
"of",
"sparse",
"indices",
"[",
"src_len",
"x",
"batch",
"x",
"nfeat",
"]",
"lengths",
"(",
":",
"obj",
":",
"LongTensor",
")",
":",
"length",
"of",
"each",
"sequence",
"[",
"batch",
"]"
] | def forward(self, src, lengths=None):
"""
Args:
src (:obj:`LongTensor`):
padded sequences of sparse indices `[src_len x batch x nfeat]`
lengths (:obj:`LongTensor`): length of each sequence `[batch]`
Returns:
(tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):
* final encoder state, used to initialize decoder
* memory bank for attention, `[src_len x batch x hidden]`
"""
raise NotImplementedError | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/encoder.py#L41-L54 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/rnn_encoder.py | python | RNNEncoder.forward | (self, src, lengths=None) | return encoder_final, memory_bank | See :obj:`EncoderBase.forward()` | See :obj:`EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(src, lengths)
emb = self.embeddings(src)
# s_len, batch, emb_dim = emb.size()
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Tensor.
lengths = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths)
memory_bank, encoder_final = self.rnn(packed_emb)
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
return encoder_final, memory_bank | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"src",
",",
"lengths",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"src",
")",
"# s_len, batch, emb_dim = emb.size()",
"packed_emb",
"=",
"emb",
"if",
"lengths",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"no_pack_padded_seq",
":",
"# Lengths data is wrapped inside a Tensor.",
"lengths",
"=",
"lengths",
".",
"view",
"(",
"-",
"1",
")",
".",
"tolist",
"(",
")",
"packed_emb",
"=",
"pack",
"(",
"emb",
",",
"lengths",
")",
"memory_bank",
",",
"encoder_final",
"=",
"self",
".",
"rnn",
"(",
"packed_emb",
")",
"if",
"lengths",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"no_pack_padded_seq",
":",
"memory_bank",
"=",
"unpack",
"(",
"memory_bank",
")",
"[",
"0",
"]",
"if",
"self",
".",
"use_bridge",
":",
"encoder_final",
"=",
"self",
".",
"_bridge",
"(",
"encoder_final",
")",
"return",
"encoder_final",
",",
"memory_bank"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/rnn_encoder.py#L53-L73 |