nwo
stringlengths 6
76
| sha
stringlengths 40
40
| path
stringlengths 5
118
| language
stringclasses 1
value | identifier
stringlengths 1
89
| parameters
stringlengths 2
5.4k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
51.1k
| docstring
stringlengths 1
17.6k
| docstring_summary
stringlengths 0
7.02k
| docstring_tokens
sequence | function
stringlengths 30
51.1k
| function_tokens
sequence | url
stringlengths 85
218
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py | python | AudioDataset.get_num_features | (corpus_file, side) | return num_feats | For audio corpus, source side is in form of audio, thus
no feature; while target side is in form of text, thus
we can extract its text features.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`. | For audio corpus, source side is in form of audio, thus
no feature; while target side is in form of text, thus
we can extract its text features. | [
"For",
"audio",
"corpus",
"source",
"side",
"is",
"in",
"form",
"of",
"audio",
"thus",
"no",
"feature",
";",
"while",
"target",
"side",
"is",
"in",
"form",
"of",
"text",
"thus",
"we",
"can",
"extract",
"its",
"text",
"features",
"."
] | def get_num_features(corpus_file, side):
"""
For audio corpus, source side is in form of audio, thus
no feature; while target side is in form of text, thus
we can extract its text features.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`.
"""
if side == 'src':
num_feats = 0
else:
with codecs.open(corpus_file, "r", "utf-8") as cf:
f_line = cf.readline().strip().split()
_, _, num_feats = AudioDataset.extract_text_features(f_line)
return num_feats | [
"def",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
":",
"if",
"side",
"==",
"'src'",
":",
"num_feats",
"=",
"0",
"else",
":",
"with",
"codecs",
".",
"open",
"(",
"corpus_file",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"cf",
":",
"f_line",
"=",
"cf",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"_",
",",
"_",
",",
"num_feats",
"=",
"AudioDataset",
".",
"extract_text_features",
"(",
"f_line",
")",
"return",
"num_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py#L271-L291 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | get_fields | (data_type, n_src_features, n_tgt_features) | Args:
data_type: type of the source input. Options are [text|img|audio].
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values are the
corresponding Field objects. | Args:
data_type: type of the source input. Options are [text|img|audio].
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for. | [
"Args",
":",
"data_type",
":",
"type",
"of",
"the",
"source",
"input",
".",
"Options",
"are",
"[",
"text|img|audio",
"]",
".",
"n_src_features",
":",
"the",
"number",
"of",
"source",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
".",
"n_tgt_features",
":",
"the",
"number",
"of",
"target",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
"."
] | def get_fields(data_type, n_src_features, n_tgt_features):
"""
Args:
data_type: type of the source input. Options are [text|img|audio].
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values are the
corresponding Field objects.
"""
if data_type == 'text':
return TextDataset.get_fields(n_src_features, n_tgt_features)
elif data_type == 'img':
return ImageDataset.get_fields(n_src_features, n_tgt_features)
elif data_type == 'audio':
return AudioDataset.get_fields(n_src_features, n_tgt_features)
else:
raise ValueError("Data type not implemented") | [
"def",
"get_fields",
"(",
"data_type",
",",
"n_src_features",
",",
"n_tgt_features",
")",
":",
"if",
"data_type",
"==",
"'text'",
":",
"return",
"TextDataset",
".",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
"elif",
"data_type",
"==",
"'img'",
":",
"return",
"ImageDataset",
".",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
"elif",
"data_type",
"==",
"'audio'",
":",
"return",
"AudioDataset",
".",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Data type not implemented\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L36-L56 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | load_fields_from_vocab | (vocab, data_type="text") | return fields | Load Field objects from `vocab.pt` file. | Load Field objects from `vocab.pt` file. | [
"Load",
"Field",
"objects",
"from",
"vocab",
".",
"pt",
"file",
"."
] | def load_fields_from_vocab(vocab, data_type="text"):
"""
Load Field objects from `vocab.pt` file.
"""
vocab = dict(vocab)
n_src_features = len(collect_features(vocab, 'src'))
n_tgt_features = len(collect_features(vocab, 'tgt'))
fields = get_fields(data_type, n_src_features, n_tgt_features)
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
fields[k].vocab = v
return fields | [
"def",
"load_fields_from_vocab",
"(",
"vocab",
",",
"data_type",
"=",
"\"text\"",
")",
":",
"vocab",
"=",
"dict",
"(",
"vocab",
")",
"n_src_features",
"=",
"len",
"(",
"collect_features",
"(",
"vocab",
",",
"'src'",
")",
")",
"n_tgt_features",
"=",
"len",
"(",
"collect_features",
"(",
"vocab",
",",
"'tgt'",
")",
")",
"fields",
"=",
"get_fields",
"(",
"data_type",
",",
"n_src_features",
",",
"n_tgt_features",
")",
"for",
"k",
",",
"v",
"in",
"vocab",
".",
"items",
"(",
")",
":",
"# Hack. Can't pickle defaultdict :(",
"v",
".",
"stoi",
"=",
"defaultdict",
"(",
"lambda",
":",
"0",
",",
"v",
".",
"stoi",
")",
"fields",
"[",
"k",
"]",
".",
"vocab",
"=",
"v",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L59-L71 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | save_fields_to_vocab | (fields) | return vocab | Save Vocab objects in Field objects to `vocab.pt` file. | Save Vocab objects in Field objects to `vocab.pt` file. | [
"Save",
"Vocab",
"objects",
"in",
"Field",
"objects",
"to",
"vocab",
".",
"pt",
"file",
"."
] | def save_fields_to_vocab(fields):
"""
Save Vocab objects in Field objects to `vocab.pt` file.
"""
vocab = []
for k, f in fields.items():
if f is not None and 'vocab' in f.__dict__:
f.vocab.stoi = f.vocab.stoi
vocab.append((k, f.vocab))
return vocab | [
"def",
"save_fields_to_vocab",
"(",
"fields",
")",
":",
"vocab",
"=",
"[",
"]",
"for",
"k",
",",
"f",
"in",
"fields",
".",
"items",
"(",
")",
":",
"if",
"f",
"is",
"not",
"None",
"and",
"'vocab'",
"in",
"f",
".",
"__dict__",
":",
"f",
".",
"vocab",
".",
"stoi",
"=",
"f",
".",
"vocab",
".",
"stoi",
"vocab",
".",
"append",
"(",
"(",
"k",
",",
"f",
".",
"vocab",
")",
")",
"return",
"vocab"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L74-L83 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | merge_vocabs | (vocabs, vocab_size=None) | return torchtext.vocab.Vocab(merged,
specials=[UNK_WORD, PAD_WORD,
BOS_WORD, EOS_WORD],
max_size=vocab_size) | Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab` | Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary. | [
"Merge",
"individual",
"vocabularies",
"(",
"assumed",
"to",
"be",
"generated",
"from",
"disjoint",
"documents",
")",
"into",
"a",
"larger",
"vocabulary",
"."
] | def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return torchtext.vocab.Vocab(merged,
specials=[UNK_WORD, PAD_WORD,
BOS_WORD, EOS_WORD],
max_size=vocab_size) | [
"def",
"merge_vocabs",
"(",
"vocabs",
",",
"vocab_size",
"=",
"None",
")",
":",
"merged",
"=",
"sum",
"(",
"[",
"vocab",
".",
"freqs",
"for",
"vocab",
"in",
"vocabs",
"]",
",",
"Counter",
"(",
")",
")",
"return",
"torchtext",
".",
"vocab",
".",
"Vocab",
"(",
"merged",
",",
"specials",
"=",
"[",
"UNK_WORD",
",",
"PAD_WORD",
",",
"BOS_WORD",
",",
"EOS_WORD",
"]",
",",
"max_size",
"=",
"vocab_size",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L86-L101 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | get_num_features | (data_type, corpus_file, side) | Args:
data_type (str): type of the source input.
Options are [text|img|audio].
corpus_file (str): file path to get the features.
side (str): for source or for target.
Returns:
number of features on `side`. | Args:
data_type (str): type of the source input.
Options are [text|img|audio].
corpus_file (str): file path to get the features.
side (str): for source or for target. | [
"Args",
":",
"data_type",
"(",
"str",
")",
":",
"type",
"of",
"the",
"source",
"input",
".",
"Options",
"are",
"[",
"text|img|audio",
"]",
".",
"corpus_file",
"(",
"str",
")",
":",
"file",
"path",
"to",
"get",
"the",
"features",
".",
"side",
"(",
"str",
")",
":",
"for",
"source",
"or",
"for",
"target",
"."
] | def get_num_features(data_type, corpus_file, side):
"""
Args:
data_type (str): type of the source input.
Options are [text|img|audio].
corpus_file (str): file path to get the features.
side (str): for source or for target.
Returns:
number of features on `side`.
"""
assert side in ["src", "tgt"]
if data_type == 'text':
return TextDataset.get_num_features(corpus_file, side)
elif data_type == 'img':
return ImageDataset.get_num_features(corpus_file, side)
elif data_type == 'audio':
return AudioDataset.get_num_features(corpus_file, side)
else:
raise ValueError("Data type not implemented") | [
"def",
"get_num_features",
"(",
"data_type",
",",
"corpus_file",
",",
"side",
")",
":",
"assert",
"side",
"in",
"[",
"\"src\"",
",",
"\"tgt\"",
"]",
"if",
"data_type",
"==",
"'text'",
":",
"return",
"TextDataset",
".",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
"elif",
"data_type",
"==",
"'img'",
":",
"return",
"ImageDataset",
".",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
"elif",
"data_type",
"==",
"'audio'",
":",
"return",
"AudioDataset",
".",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Data type not implemented\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L104-L124 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | make_features | (batch, side, data_type='text') | Args:
batch (Tensor): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input.
Options are [text|img|audio].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch). | Args:
batch (Tensor): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input.
Options are [text|img|audio].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch). | [
"Args",
":",
"batch",
"(",
"Tensor",
")",
":",
"a",
"batch",
"of",
"source",
"or",
"target",
"data",
".",
"side",
"(",
"str",
")",
":",
"for",
"source",
"or",
"for",
"target",
".",
"data_type",
"(",
"str",
")",
":",
"type",
"of",
"the",
"source",
"input",
".",
"Options",
"are",
"[",
"text|img|audio",
"]",
".",
"Returns",
":",
"A",
"sequence",
"of",
"src",
"/",
"tgt",
"tensors",
"with",
"optional",
"feature",
"tensors",
"of",
"size",
"(",
"len",
"x",
"batch",
")",
"."
] | def make_features(batch, side, data_type='text'):
"""
Args:
batch (Tensor): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input.
Options are [text|img|audio].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
keys = sorted([k for k in batch.__dict__ if feat_start in k])
features = [batch.__dict__[k] for k in keys]
levels = [data] + features
if data_type == 'text':
return torch.cat([level.unsqueeze(2) for level in levels], 2)
else:
return levels[0] | [
"def",
"make_features",
"(",
"batch",
",",
"side",
",",
"data_type",
"=",
"'text'",
")",
":",
"assert",
"side",
"in",
"[",
"'src'",
",",
"'tgt'",
"]",
"if",
"isinstance",
"(",
"batch",
".",
"__dict__",
"[",
"side",
"]",
",",
"tuple",
")",
":",
"data",
"=",
"batch",
".",
"__dict__",
"[",
"side",
"]",
"[",
"0",
"]",
"else",
":",
"data",
"=",
"batch",
".",
"__dict__",
"[",
"side",
"]",
"feat_start",
"=",
"side",
"+",
"\"_feat_\"",
"keys",
"=",
"sorted",
"(",
"[",
"k",
"for",
"k",
"in",
"batch",
".",
"__dict__",
"if",
"feat_start",
"in",
"k",
"]",
")",
"features",
"=",
"[",
"batch",
".",
"__dict__",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"]",
"levels",
"=",
"[",
"data",
"]",
"+",
"features",
"if",
"data_type",
"==",
"'text'",
":",
"return",
"torch",
".",
"cat",
"(",
"[",
"level",
".",
"unsqueeze",
"(",
"2",
")",
"for",
"level",
"in",
"levels",
"]",
",",
"2",
")",
"else",
":",
"return",
"levels",
"[",
"0",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L127-L152 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | collect_features | (fields, side="src") | return feats | Collect features from Field object. | Collect features from Field object. | [
"Collect",
"features",
"from",
"Field",
"object",
"."
] | def collect_features(fields, side="src"):
"""
Collect features from Field object.
"""
assert side in ["src", "tgt"]
feats = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feats.append(key)
return feats | [
"def",
"collect_features",
"(",
"fields",
",",
"side",
"=",
"\"src\"",
")",
":",
"assert",
"side",
"in",
"[",
"\"src\"",
",",
"\"tgt\"",
"]",
"feats",
"=",
"[",
"]",
"for",
"j",
"in",
"count",
"(",
")",
":",
"key",
"=",
"side",
"+",
"\"_feat_\"",
"+",
"str",
"(",
"j",
")",
"if",
"key",
"not",
"in",
"fields",
":",
"break",
"feats",
".",
"append",
"(",
"key",
")",
"return",
"feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L155-L166 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | collect_feature_vocabs | (fields, side) | return feature_vocabs | Collect feature Vocab objects from Field object. | Collect feature Vocab objects from Field object. | [
"Collect",
"feature",
"Vocab",
"objects",
"from",
"Field",
"object",
"."
] | def collect_feature_vocabs(fields, side):
"""
Collect feature Vocab objects from Field object.
"""
assert side in ['src', 'tgt']
feature_vocabs = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feature_vocabs.append(fields[key].vocab)
return feature_vocabs | [
"def",
"collect_feature_vocabs",
"(",
"fields",
",",
"side",
")",
":",
"assert",
"side",
"in",
"[",
"'src'",
",",
"'tgt'",
"]",
"feature_vocabs",
"=",
"[",
"]",
"for",
"j",
"in",
"count",
"(",
")",
":",
"key",
"=",
"side",
"+",
"\"_feat_\"",
"+",
"str",
"(",
"j",
")",
"if",
"key",
"not",
"in",
"fields",
":",
"break",
"feature_vocabs",
".",
"append",
"(",
"fields",
"[",
"key",
"]",
".",
"vocab",
")",
"return",
"feature_vocabs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L169-L180 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | build_dataset | (fields, data_type, src_data_iter=None, src_path=None,
src_dir=None, tgt_data_iter=None, tgt_path=None,
src_seq_length=0, tgt_seq_length=0,
src_seq_length_trunc=0, tgt_seq_length_trunc=0,
dynamic_dict=True, sample_rate=0,
window_size=0, window_stride=0, window=None,
normalize_audio=True, use_filter_pred=True,
image_channel_size=3) | return dataset | Build src/tgt examples iterator from corpus files, also extract
number of features. | Build src/tgt examples iterator from corpus files, also extract
number of features. | [
"Build",
"src",
"/",
"tgt",
"examples",
"iterator",
"from",
"corpus",
"files",
"also",
"extract",
"number",
"of",
"features",
"."
] | def build_dataset(fields, data_type, src_data_iter=None, src_path=None,
src_dir=None, tgt_data_iter=None, tgt_path=None,
src_seq_length=0, tgt_seq_length=0,
src_seq_length_trunc=0, tgt_seq_length_trunc=0,
dynamic_dict=True, sample_rate=0,
window_size=0, window_stride=0, window=None,
normalize_audio=True, use_filter_pred=True,
image_channel_size=3):
"""
Build src/tgt examples iterator from corpus files, also extract
number of features.
"""
def _make_examples_nfeats_tpl(data_type, src_data_iter, src_path, src_dir,
src_seq_length_trunc, sample_rate,
window_size, window_stride,
window, normalize_audio,
image_channel_size=3):
"""
Process the corpus into (example_dict iterator, num_feats) tuple
on source side for different 'data_type'.
"""
if data_type == 'text':
src_examples_iter, num_src_feats = \
TextDataset.make_text_examples_nfeats_tpl(
src_data_iter, src_path, src_seq_length_trunc, "src")
elif data_type == 'img':
src_examples_iter, num_src_feats = \
ImageDataset.make_image_examples_nfeats_tpl(
src_data_iter, src_path, src_dir, image_channel_size)
elif data_type == 'audio':
if src_data_iter:
raise ValueError("""Data iterator for AudioDataset isn't
implemented""")
if src_path is None:
raise ValueError("AudioDataset requires a non None path")
src_examples_iter, num_src_feats = \
AudioDataset.make_audio_examples_nfeats_tpl(
src_path, src_dir, sample_rate,
window_size, window_stride, window,
normalize_audio)
return src_examples_iter, num_src_feats
src_examples_iter, num_src_feats = \
_make_examples_nfeats_tpl(data_type, src_data_iter, src_path, src_dir,
src_seq_length_trunc, sample_rate,
window_size, window_stride,
window, normalize_audio,
image_channel_size=image_channel_size)
# For all data types, the tgt side corpus is in form of text.
tgt_examples_iter, num_tgt_feats = \
TextDataset.make_text_examples_nfeats_tpl(
tgt_data_iter, tgt_path, tgt_seq_length_trunc, "tgt")
if data_type == 'text':
dataset = TextDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
src_seq_length=src_seq_length,
tgt_seq_length=tgt_seq_length,
dynamic_dict=dynamic_dict,
use_filter_pred=use_filter_pred)
elif data_type == 'img':
dataset = ImageDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
tgt_seq_length=tgt_seq_length,
use_filter_pred=use_filter_pred,
image_channel_size=image_channel_size)
elif data_type == 'audio':
dataset = AudioDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
tgt_seq_length=tgt_seq_length,
sample_rate=sample_rate,
window_size=window_size,
window_stride=window_stride,
window=window,
normalize_audio=normalize_audio,
use_filter_pred=use_filter_pred)
return dataset | [
"def",
"build_dataset",
"(",
"fields",
",",
"data_type",
",",
"src_data_iter",
"=",
"None",
",",
"src_path",
"=",
"None",
",",
"src_dir",
"=",
"None",
",",
"tgt_data_iter",
"=",
"None",
",",
"tgt_path",
"=",
"None",
",",
"src_seq_length",
"=",
"0",
",",
"tgt_seq_length",
"=",
"0",
",",
"src_seq_length_trunc",
"=",
"0",
",",
"tgt_seq_length_trunc",
"=",
"0",
",",
"dynamic_dict",
"=",
"True",
",",
"sample_rate",
"=",
"0",
",",
"window_size",
"=",
"0",
",",
"window_stride",
"=",
"0",
",",
"window",
"=",
"None",
",",
"normalize_audio",
"=",
"True",
",",
"use_filter_pred",
"=",
"True",
",",
"image_channel_size",
"=",
"3",
")",
":",
"def",
"_make_examples_nfeats_tpl",
"(",
"data_type",
",",
"src_data_iter",
",",
"src_path",
",",
"src_dir",
",",
"src_seq_length_trunc",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"image_channel_size",
"=",
"3",
")",
":",
"\"\"\"\n Process the corpus into (example_dict iterator, num_feats) tuple\n on source side for different 'data_type'.\n \"\"\"",
"if",
"data_type",
"==",
"'text'",
":",
"src_examples_iter",
",",
"num_src_feats",
"=",
"TextDataset",
".",
"make_text_examples_nfeats_tpl",
"(",
"src_data_iter",
",",
"src_path",
",",
"src_seq_length_trunc",
",",
"\"src\"",
")",
"elif",
"data_type",
"==",
"'img'",
":",
"src_examples_iter",
",",
"num_src_feats",
"=",
"ImageDataset",
".",
"make_image_examples_nfeats_tpl",
"(",
"src_data_iter",
",",
"src_path",
",",
"src_dir",
",",
"image_channel_size",
")",
"elif",
"data_type",
"==",
"'audio'",
":",
"if",
"src_data_iter",
":",
"raise",
"ValueError",
"(",
"\"\"\"Data iterator for AudioDataset isn't\n implemented\"\"\"",
")",
"if",
"src_path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"AudioDataset requires a non None path\"",
")",
"src_examples_iter",
",",
"num_src_feats",
"=",
"AudioDataset",
".",
"make_audio_examples_nfeats_tpl",
"(",
"src_path",
",",
"src_dir",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
")",
"return",
"src_examples_iter",
",",
"num_src_feats",
"src_examples_iter",
",",
"num_src_feats",
"=",
"_make_examples_nfeats_tpl",
"(",
"data_type",
",",
"src_data_iter",
",",
"src_path",
",",
"src_dir",
",",
"src_seq_length_trunc",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"image_channel_size",
"=",
"image_channel_size",
")",
"# For all data types, the tgt side corpus is in form of text.",
"tgt_examples_iter",
",",
"num_tgt_feats",
"=",
"TextDataset",
".",
"make_text_examples_nfeats_tpl",
"(",
"tgt_data_iter",
",",
"tgt_path",
",",
"tgt_seq_length_trunc",
",",
"\"tgt\"",
")",
"if",
"data_type",
"==",
"'text'",
":",
"dataset",
"=",
"TextDataset",
"(",
"fields",
",",
"src_examples_iter",
",",
"tgt_examples_iter",
",",
"num_src_feats",
",",
"num_tgt_feats",
",",
"src_seq_length",
"=",
"src_seq_length",
",",
"tgt_seq_length",
"=",
"tgt_seq_length",
",",
"dynamic_dict",
"=",
"dynamic_dict",
",",
"use_filter_pred",
"=",
"use_filter_pred",
")",
"elif",
"data_type",
"==",
"'img'",
":",
"dataset",
"=",
"ImageDataset",
"(",
"fields",
",",
"src_examples_iter",
",",
"tgt_examples_iter",
",",
"num_src_feats",
",",
"num_tgt_feats",
",",
"tgt_seq_length",
"=",
"tgt_seq_length",
",",
"use_filter_pred",
"=",
"use_filter_pred",
",",
"image_channel_size",
"=",
"image_channel_size",
")",
"elif",
"data_type",
"==",
"'audio'",
":",
"dataset",
"=",
"AudioDataset",
"(",
"fields",
",",
"src_examples_iter",
",",
"tgt_examples_iter",
",",
"num_src_feats",
",",
"num_tgt_feats",
",",
"tgt_seq_length",
"=",
"tgt_seq_length",
",",
"sample_rate",
"=",
"sample_rate",
",",
"window_size",
"=",
"window_size",
",",
"window_stride",
"=",
"window_stride",
",",
"window",
"=",
"window",
",",
"normalize_audio",
"=",
"normalize_audio",
",",
"use_filter_pred",
"=",
"use_filter_pred",
")",
"return",
"dataset"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L183-L269 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | build_vocab | (train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency) | return fields | Args:
train_dataset_files: a list of train dataset pt file.
fields (dict): fields to build vocab for.
data_type: "text", "img" or "audio"?
share_vocab(bool): share source and target vocabulary?
src_vocab_path(string): Path to src vocabulary file.
src_vocab_size(int): size of the source vocabulary.
src_words_min_frequency(int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path(string): Path to tgt vocabulary file.
tgt_vocab_size(int): size of the target vocabulary.
tgt_words_min_frequency(int): the minimum frequency needed to
include a target word in the vocabulary.
Returns:
Dict of Fields | Args:
train_dataset_files: a list of train dataset pt file.
fields (dict): fields to build vocab for.
data_type: "text", "img" or "audio"?
share_vocab(bool): share source and target vocabulary?
src_vocab_path(string): Path to src vocabulary file.
src_vocab_size(int): size of the source vocabulary.
src_words_min_frequency(int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path(string): Path to tgt vocabulary file.
tgt_vocab_size(int): size of the target vocabulary.
tgt_words_min_frequency(int): the minimum frequency needed to
include a target word in the vocabulary. | [
"Args",
":",
"train_dataset_files",
":",
"a",
"list",
"of",
"train",
"dataset",
"pt",
"file",
".",
"fields",
"(",
"dict",
")",
":",
"fields",
"to",
"build",
"vocab",
"for",
".",
"data_type",
":",
"text",
"img",
"or",
"audio",
"?",
"share_vocab",
"(",
"bool",
")",
":",
"share",
"source",
"and",
"target",
"vocabulary?",
"src_vocab_path",
"(",
"string",
")",
":",
"Path",
"to",
"src",
"vocabulary",
"file",
".",
"src_vocab_size",
"(",
"int",
")",
":",
"size",
"of",
"the",
"source",
"vocabulary",
".",
"src_words_min_frequency",
"(",
"int",
")",
":",
"the",
"minimum",
"frequency",
"needed",
"to",
"include",
"a",
"source",
"word",
"in",
"the",
"vocabulary",
".",
"tgt_vocab_path",
"(",
"string",
")",
":",
"Path",
"to",
"tgt",
"vocabulary",
"file",
".",
"tgt_vocab_size",
"(",
"int",
")",
":",
"size",
"of",
"the",
"target",
"vocabulary",
".",
"tgt_words_min_frequency",
"(",
"int",
")",
":",
"the",
"minimum",
"frequency",
"needed",
"to",
"include",
"a",
"target",
"word",
"in",
"the",
"vocabulary",
"."
] | def build_vocab(train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency):
"""
Args:
train_dataset_files: a list of train dataset pt file.
fields (dict): fields to build vocab for.
data_type: "text", "img" or "audio"?
share_vocab(bool): share source and target vocabulary?
src_vocab_path(string): Path to src vocabulary file.
src_vocab_size(int): size of the source vocabulary.
src_words_min_frequency(int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path(string): Path to tgt vocabulary file.
tgt_vocab_size(int): size of the target vocabulary.
tgt_words_min_frequency(int): the minimum frequency needed to
include a target word in the vocabulary.
Returns:
Dict of Fields
"""
counter = {}
# Prop src from field to get lower memory using when training with image
if data_type == 'img':
fields.pop("src")
for k in fields:
counter[k] = Counter()
# Load vocabulary
src_vocab = load_vocabulary(src_vocab_path, tag="source")
tgt_vocab = load_vocabulary(tgt_vocab_path, tag="target")
for index, path in enumerate(train_dataset_files):
dataset = torch.load(path)
logger.info(" * reloading %s." % path)
for ex in dataset.examples:
for k in fields:
val = getattr(ex, k, None)
if val is not None and not fields[k].sequential:
val = [val]
elif k == 'src' and src_vocab:
val = [item for item in val if item in src_vocab]
elif k == 'tgt' and tgt_vocab:
val = [item for item in val if item in tgt_vocab]
counter[k].update(val)
# Drop the none-using from memory but keep the last
if (index < len(train_dataset_files) - 1):
dataset.examples = None
gc.collect()
del dataset.examples
gc.collect()
del dataset
gc.collect()
_build_field_vocab(fields["tgt"], counter["tgt"],
max_size=tgt_vocab_size,
min_freq=tgt_words_min_frequency)
logger.info(" * tgt vocab size: %d." % len(fields["tgt"].vocab))
# All datasets have same num of n_tgt_features,
# getting the last one is OK.
for j in range(dataset.n_tgt_feats):
key = "tgt_feat_" + str(j)
_build_field_vocab(fields[key], counter[key])
logger.info(" * %s vocab size: %d." % (key,
len(fields[key].vocab)))
if data_type == 'text':
_build_field_vocab(fields["src"], counter["src"],
max_size=src_vocab_size,
min_freq=src_words_min_frequency)
logger.info(" * src vocab size: %d." % len(fields["src"].vocab))
# All datasets have same num of n_src_features,
# getting the last one is OK.
for j in range(dataset.n_src_feats):
key = "src_feat_" + str(j)
_build_field_vocab(fields[key], counter[key])
logger.info(" * %s vocab size: %d." %
(key, len(fields[key].vocab)))
# Merge the input and output vocabularies.
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
logger.info(" * merging src and tgt vocab...")
merged_vocab = merge_vocabs(
[fields["src"].vocab, fields["tgt"].vocab],
vocab_size=src_vocab_size)
fields["src"].vocab = merged_vocab
fields["tgt"].vocab = merged_vocab
return fields | [
"def",
"build_vocab",
"(",
"train_dataset_files",
",",
"fields",
",",
"data_type",
",",
"share_vocab",
",",
"src_vocab_path",
",",
"src_vocab_size",
",",
"src_words_min_frequency",
",",
"tgt_vocab_path",
",",
"tgt_vocab_size",
",",
"tgt_words_min_frequency",
")",
":",
"counter",
"=",
"{",
"}",
"# Prop src from field to get lower memory using when training with image",
"if",
"data_type",
"==",
"'img'",
":",
"fields",
".",
"pop",
"(",
"\"src\"",
")",
"for",
"k",
"in",
"fields",
":",
"counter",
"[",
"k",
"]",
"=",
"Counter",
"(",
")",
"# Load vocabulary",
"src_vocab",
"=",
"load_vocabulary",
"(",
"src_vocab_path",
",",
"tag",
"=",
"\"source\"",
")",
"tgt_vocab",
"=",
"load_vocabulary",
"(",
"tgt_vocab_path",
",",
"tag",
"=",
"\"target\"",
")",
"for",
"index",
",",
"path",
"in",
"enumerate",
"(",
"train_dataset_files",
")",
":",
"dataset",
"=",
"torch",
".",
"load",
"(",
"path",
")",
"logger",
".",
"info",
"(",
"\" * reloading %s.\"",
"%",
"path",
")",
"for",
"ex",
"in",
"dataset",
".",
"examples",
":",
"for",
"k",
"in",
"fields",
":",
"val",
"=",
"getattr",
"(",
"ex",
",",
"k",
",",
"None",
")",
"if",
"val",
"is",
"not",
"None",
"and",
"not",
"fields",
"[",
"k",
"]",
".",
"sequential",
":",
"val",
"=",
"[",
"val",
"]",
"elif",
"k",
"==",
"'src'",
"and",
"src_vocab",
":",
"val",
"=",
"[",
"item",
"for",
"item",
"in",
"val",
"if",
"item",
"in",
"src_vocab",
"]",
"elif",
"k",
"==",
"'tgt'",
"and",
"tgt_vocab",
":",
"val",
"=",
"[",
"item",
"for",
"item",
"in",
"val",
"if",
"item",
"in",
"tgt_vocab",
"]",
"counter",
"[",
"k",
"]",
".",
"update",
"(",
"val",
")",
"# Drop the none-using from memory but keep the last",
"if",
"(",
"index",
"<",
"len",
"(",
"train_dataset_files",
")",
"-",
"1",
")",
":",
"dataset",
".",
"examples",
"=",
"None",
"gc",
".",
"collect",
"(",
")",
"del",
"dataset",
".",
"examples",
"gc",
".",
"collect",
"(",
")",
"del",
"dataset",
"gc",
".",
"collect",
"(",
")",
"_build_field_vocab",
"(",
"fields",
"[",
"\"tgt\"",
"]",
",",
"counter",
"[",
"\"tgt\"",
"]",
",",
"max_size",
"=",
"tgt_vocab_size",
",",
"min_freq",
"=",
"tgt_words_min_frequency",
")",
"logger",
".",
"info",
"(",
"\" * tgt vocab size: %d.\"",
"%",
"len",
"(",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
")",
")",
"# All datasets have same num of n_tgt_features,",
"# getting the last one is OK.",
"for",
"j",
"in",
"range",
"(",
"dataset",
".",
"n_tgt_feats",
")",
":",
"key",
"=",
"\"tgt_feat_\"",
"+",
"str",
"(",
"j",
")",
"_build_field_vocab",
"(",
"fields",
"[",
"key",
"]",
",",
"counter",
"[",
"key",
"]",
")",
"logger",
".",
"info",
"(",
"\" * %s vocab size: %d.\"",
"%",
"(",
"key",
",",
"len",
"(",
"fields",
"[",
"key",
"]",
".",
"vocab",
")",
")",
")",
"if",
"data_type",
"==",
"'text'",
":",
"_build_field_vocab",
"(",
"fields",
"[",
"\"src\"",
"]",
",",
"counter",
"[",
"\"src\"",
"]",
",",
"max_size",
"=",
"src_vocab_size",
",",
"min_freq",
"=",
"src_words_min_frequency",
")",
"logger",
".",
"info",
"(",
"\" * src vocab size: %d.\"",
"%",
"len",
"(",
"fields",
"[",
"\"src\"",
"]",
".",
"vocab",
")",
")",
"# All datasets have same num of n_src_features,",
"# getting the last one is OK.",
"for",
"j",
"in",
"range",
"(",
"dataset",
".",
"n_src_feats",
")",
":",
"key",
"=",
"\"src_feat_\"",
"+",
"str",
"(",
"j",
")",
"_build_field_vocab",
"(",
"fields",
"[",
"key",
"]",
",",
"counter",
"[",
"key",
"]",
")",
"logger",
".",
"info",
"(",
"\" * %s vocab size: %d.\"",
"%",
"(",
"key",
",",
"len",
"(",
"fields",
"[",
"key",
"]",
".",
"vocab",
")",
")",
")",
"# Merge the input and output vocabularies.",
"if",
"share_vocab",
":",
"# `tgt_vocab_size` is ignored when sharing vocabularies",
"logger",
".",
"info",
"(",
"\" * merging src and tgt vocab...\"",
")",
"merged_vocab",
"=",
"merge_vocabs",
"(",
"[",
"fields",
"[",
"\"src\"",
"]",
".",
"vocab",
",",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
"]",
",",
"vocab_size",
"=",
"src_vocab_size",
")",
"fields",
"[",
"\"src\"",
"]",
".",
"vocab",
"=",
"merged_vocab",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
"=",
"merged_vocab",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L280-L374 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | load_vocabulary | (vocabulary_path, tag="") | return vocabulary | Loads a vocabulary from the given path.
:param vocabulary_path: path to load vocabulary from
:param tag: tag for vocabulary (only used for logging)
:return: vocabulary or None if path is null | Loads a vocabulary from the given path.
:param vocabulary_path: path to load vocabulary from
:param tag: tag for vocabulary (only used for logging)
:return: vocabulary or None if path is null | [
"Loads",
"a",
"vocabulary",
"from",
"the",
"given",
"path",
".",
":",
"param",
"vocabulary_path",
":",
"path",
"to",
"load",
"vocabulary",
"from",
":",
"param",
"tag",
":",
"tag",
"for",
"vocabulary",
"(",
"only",
"used",
"for",
"logging",
")",
":",
"return",
":",
"vocabulary",
"or",
"None",
"if",
"path",
"is",
"null"
] | def load_vocabulary(vocabulary_path, tag=""):
"""
Loads a vocabulary from the given path.
:param vocabulary_path: path to load vocabulary from
:param tag: tag for vocabulary (only used for logging)
:return: vocabulary or None if path is null
"""
vocabulary = None
if vocabulary_path:
vocabulary = set([])
logger.info("Loading {} vocabulary from {}".format(tag,
vocabulary_path))
if not os.path.exists(vocabulary_path):
raise RuntimeError(
"{} vocabulary not found at {}!".format(tag, vocabulary_path))
else:
with open(vocabulary_path) as f:
for line in f:
if len(line.strip()) == 0:
continue
word = line.strip().split()[0]
vocabulary.add(word)
return vocabulary | [
"def",
"load_vocabulary",
"(",
"vocabulary_path",
",",
"tag",
"=",
"\"\"",
")",
":",
"vocabulary",
"=",
"None",
"if",
"vocabulary_path",
":",
"vocabulary",
"=",
"set",
"(",
"[",
"]",
")",
"logger",
".",
"info",
"(",
"\"Loading {} vocabulary from {}\"",
".",
"format",
"(",
"tag",
",",
"vocabulary_path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"vocabulary_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"{} vocabulary not found at {}!\"",
".",
"format",
"(",
"tag",
",",
"vocabulary_path",
")",
")",
"else",
":",
"with",
"open",
"(",
"vocabulary_path",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"len",
"(",
"line",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"continue",
"word",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"vocabulary",
".",
"add",
"(",
"word",
")",
"return",
"vocabulary"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L377-L400 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | build_dataset_iter | (datasets, fields, opt, is_train=True) | return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn,
device, is_train) | This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too. | This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too. | [
"This",
"returns",
"user",
"-",
"defined",
"train",
"/",
"validate",
"data",
"iterator",
"for",
"the",
"trainer",
"to",
"iterate",
"over",
".",
"We",
"implement",
"simple",
"ordered",
"iterator",
"strategy",
"here",
"but",
"more",
"sophisticated",
"strategy",
"like",
"curriculum",
"learning",
"is",
"ok",
"too",
"."
] | def build_dataset_iter(datasets, fields, opt, is_train=True):
"""
This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too.
"""
batch_size = opt.batch_size if is_train else opt.valid_batch_size
if is_train and opt.batch_type == "tokens":
def batch_size_fn(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: <bos> w1 ... wN <eos>
max_src_in_batch = max(max_src_in_batch, len(new.src) + 2)
# Tgt: w1 ... wN <eos>
max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
else:
batch_size_fn = None
if opt.gpu_ranks:
device = "cuda"
else:
device = "cpu"
return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn,
device, is_train) | [
"def",
"build_dataset_iter",
"(",
"datasets",
",",
"fields",
",",
"opt",
",",
"is_train",
"=",
"True",
")",
":",
"batch_size",
"=",
"opt",
".",
"batch_size",
"if",
"is_train",
"else",
"opt",
".",
"valid_batch_size",
"if",
"is_train",
"and",
"opt",
".",
"batch_type",
"==",
"\"tokens\"",
":",
"def",
"batch_size_fn",
"(",
"new",
",",
"count",
",",
"sofar",
")",
":",
"\"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"",
"# Maintains the longest src and tgt length in the current batch",
"global",
"max_src_in_batch",
",",
"max_tgt_in_batch",
"# Reset current longest length at a new batch (count=1)",
"if",
"count",
"==",
"1",
":",
"max_src_in_batch",
"=",
"0",
"max_tgt_in_batch",
"=",
"0",
"# Src: <bos> w1 ... wN <eos>",
"max_src_in_batch",
"=",
"max",
"(",
"max_src_in_batch",
",",
"len",
"(",
"new",
".",
"src",
")",
"+",
"2",
")",
"# Tgt: w1 ... wN <eos>",
"max_tgt_in_batch",
"=",
"max",
"(",
"max_tgt_in_batch",
",",
"len",
"(",
"new",
".",
"tgt",
")",
"+",
"1",
")",
"src_elements",
"=",
"count",
"*",
"max_src_in_batch",
"tgt_elements",
"=",
"count",
"*",
"max_tgt_in_batch",
"return",
"max",
"(",
"src_elements",
",",
"tgt_elements",
")",
"else",
":",
"batch_size_fn",
"=",
"None",
"if",
"opt",
".",
"gpu_ranks",
":",
"device",
"=",
"\"cuda\"",
"else",
":",
"device",
"=",
"\"cpu\"",
"return",
"DatasetLazyIter",
"(",
"datasets",
",",
"fields",
",",
"batch_size",
",",
"batch_size_fn",
",",
"device",
",",
"is_train",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L491-L527 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | lazily_load_dataset | (corpus_type, opt) | Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded. | Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time. | [
"Dataset",
"generator",
".",
"Don",
"t",
"do",
"extra",
"stuff",
"here",
"like",
"printing",
"because",
"they",
"will",
"be",
"postponed",
"to",
"the",
"first",
"loading",
"time",
"."
] | def lazily_load_dataset(corpus_type, opt):
"""
Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded.
"""
assert corpus_type in ["train", "valid"]
def _lazy_dataset_loader(pt_file, corpus_type):
dataset = torch.load(pt_file)
logger.info('Loading %s dataset from %s, number of examples: %d' %
(corpus_type, pt_file, len(dataset)))
return dataset
# Sort the glob output by file name (by increasing indexes).
pts = sorted(glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt'))
if pts:
for pt in pts:
yield _lazy_dataset_loader(pt, corpus_type)
else:
# Only one inputters.*Dataset, simple!
pt = opt.data + '.' + corpus_type + '.pt'
yield _lazy_dataset_loader(pt, corpus_type) | [
"def",
"lazily_load_dataset",
"(",
"corpus_type",
",",
"opt",
")",
":",
"assert",
"corpus_type",
"in",
"[",
"\"train\"",
",",
"\"valid\"",
"]",
"def",
"_lazy_dataset_loader",
"(",
"pt_file",
",",
"corpus_type",
")",
":",
"dataset",
"=",
"torch",
".",
"load",
"(",
"pt_file",
")",
"logger",
".",
"info",
"(",
"'Loading %s dataset from %s, number of examples: %d'",
"%",
"(",
"corpus_type",
",",
"pt_file",
",",
"len",
"(",
"dataset",
")",
")",
")",
"return",
"dataset",
"# Sort the glob output by file name (by increasing indexes).",
"pts",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"opt",
".",
"data",
"+",
"'.'",
"+",
"corpus_type",
"+",
"'.[0-9]*.pt'",
")",
")",
"if",
"pts",
":",
"for",
"pt",
"in",
"pts",
":",
"yield",
"_lazy_dataset_loader",
"(",
"pt",
",",
"corpus_type",
")",
"else",
":",
"# Only one inputters.*Dataset, simple!",
"pt",
"=",
"opt",
".",
"data",
"+",
"'.'",
"+",
"corpus_type",
"+",
"'.pt'",
"yield",
"_lazy_dataset_loader",
"(",
"pt",
",",
"corpus_type",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L530-L556 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/inputter.py | python | OrderedIterator.create_batches | (self) | Create batches | Create batches | [
"Create",
"batches"
] | def create_batches(self):
""" Create batches """
if self.train:
def _pool(data, random_shuffler):
for p in torchtext.data.batch(data, self.batch_size * 100):
p_batch = torchtext.data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = _pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key)) | [
"def",
"create_batches",
"(",
"self",
")",
":",
"if",
"self",
".",
"train",
":",
"def",
"_pool",
"(",
"data",
",",
"random_shuffler",
")",
":",
"for",
"p",
"in",
"torchtext",
".",
"data",
".",
"batch",
"(",
"data",
",",
"self",
".",
"batch_size",
"*",
"100",
")",
":",
"p_batch",
"=",
"torchtext",
".",
"data",
".",
"batch",
"(",
"sorted",
"(",
"p",
",",
"key",
"=",
"self",
".",
"sort_key",
")",
",",
"self",
".",
"batch_size",
",",
"self",
".",
"batch_size_fn",
")",
"for",
"b",
"in",
"random_shuffler",
"(",
"list",
"(",
"p_batch",
")",
")",
":",
"yield",
"b",
"self",
".",
"batches",
"=",
"_pool",
"(",
"self",
".",
"data",
"(",
")",
",",
"self",
".",
"random_shuffler",
")",
"else",
":",
"self",
".",
"batches",
"=",
"[",
"]",
"for",
"b",
"in",
"torchtext",
".",
"data",
".",
"batch",
"(",
"self",
".",
"data",
"(",
")",
",",
"self",
".",
"batch_size",
",",
"self",
".",
"batch_size_fn",
")",
":",
"self",
".",
"batches",
".",
"append",
"(",
"sorted",
"(",
"b",
",",
"key",
"=",
"self",
".",
"sort_key",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/inputter.py#L406-L422 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | TranslationServer.start | (self, config_file) | Read the config file and pre-/load the models | Read the config file and pre-/load the models | [
"Read",
"the",
"config",
"file",
"and",
"pre",
"-",
"/",
"load",
"the",
"models"
] | def start(self, config_file):
"""Read the config file and pre-/load the models
"""
self.config_file = config_file
with open(self.config_file) as f:
self.confs = json.load(f)
self.models_root = self.confs.get('models_root', './available_models')
for i, conf in enumerate(self.confs["models"]):
if "models" not in conf:
if "model" in conf:
# backwards compatibility for confs
conf["models"] = [conf["model"]]
else:
raise ValueError("""Incorrect config file: missing 'models'
parameter for model #%d""" % i)
kwargs = {'timeout': conf.get('timeout', None),
'load': conf.get('load', None),
'tokenizer_opt': conf.get('tokenizer', None),
'on_timeout': conf.get('on_timeout', None),
'model_root': conf.get('model_root', self.models_root)
}
kwargs = {k: v for (k, v) in kwargs.items() if v is not None}
model_id = conf.get("id", None)
opt = conf["opt"]
opt["models"] = conf["models"]
self.preload_model(opt, model_id=model_id, **kwargs) | [
"def",
"start",
"(",
"self",
",",
"config_file",
")",
":",
"self",
".",
"config_file",
"=",
"config_file",
"with",
"open",
"(",
"self",
".",
"config_file",
")",
"as",
"f",
":",
"self",
".",
"confs",
"=",
"json",
".",
"load",
"(",
"f",
")",
"self",
".",
"models_root",
"=",
"self",
".",
"confs",
".",
"get",
"(",
"'models_root'",
",",
"'./available_models'",
")",
"for",
"i",
",",
"conf",
"in",
"enumerate",
"(",
"self",
".",
"confs",
"[",
"\"models\"",
"]",
")",
":",
"if",
"\"models\"",
"not",
"in",
"conf",
":",
"if",
"\"model\"",
"in",
"conf",
":",
"# backwards compatibility for confs",
"conf",
"[",
"\"models\"",
"]",
"=",
"[",
"conf",
"[",
"\"model\"",
"]",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"Incorrect config file: missing 'models'\n parameter for model #%d\"\"\"",
"%",
"i",
")",
"kwargs",
"=",
"{",
"'timeout'",
":",
"conf",
".",
"get",
"(",
"'timeout'",
",",
"None",
")",
",",
"'load'",
":",
"conf",
".",
"get",
"(",
"'load'",
",",
"None",
")",
",",
"'tokenizer_opt'",
":",
"conf",
".",
"get",
"(",
"'tokenizer'",
",",
"None",
")",
",",
"'on_timeout'",
":",
"conf",
".",
"get",
"(",
"'on_timeout'",
",",
"None",
")",
",",
"'model_root'",
":",
"conf",
".",
"get",
"(",
"'model_root'",
",",
"self",
".",
"models_root",
")",
"}",
"kwargs",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"model_id",
"=",
"conf",
".",
"get",
"(",
"\"id\"",
",",
"None",
")",
"opt",
"=",
"conf",
"[",
"\"opt\"",
"]",
"opt",
"[",
"\"models\"",
"]",
"=",
"conf",
"[",
"\"models\"",
"]",
"self",
".",
"preload_model",
"(",
"opt",
",",
"model_id",
"=",
"model_id",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L54-L80 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | TranslationServer.clone_model | (self, model_id, opt, timeout=-1) | Clone a model `model_id`.
Different options may be passed. If `opt` is None, it will use the
same set of options | Clone a model `model_id`.
Different options may be passed. If `opt` is None, it will use the
same set of options | [
"Clone",
"a",
"model",
"model_id",
".",
"Different",
"options",
"may",
"be",
"passed",
".",
"If",
"opt",
"is",
"None",
"it",
"will",
"use",
"the",
"same",
"set",
"of",
"options"
] | def clone_model(self, model_id, opt, timeout=-1):
"""Clone a model `model_id`.
Different options may be passed. If `opt` is None, it will use the
same set of options
"""
if model_id in self.models:
if opt is None:
opt = self.models[model_id].user_opt
opt["models"] = self.models[model_id].opt.models
return self.load_model(opt, timeout)
else:
raise ServerModelError("No such model '%s'" % str(model_id)) | [
"def",
"clone_model",
"(",
"self",
",",
"model_id",
",",
"opt",
",",
"timeout",
"=",
"-",
"1",
")",
":",
"if",
"model_id",
"in",
"self",
".",
"models",
":",
"if",
"opt",
"is",
"None",
":",
"opt",
"=",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"user_opt",
"opt",
"[",
"\"models\"",
"]",
"=",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"opt",
".",
"models",
"return",
"self",
".",
"load_model",
"(",
"opt",
",",
"timeout",
")",
"else",
":",
"raise",
"ServerModelError",
"(",
"\"No such model '%s'\"",
"%",
"str",
"(",
"model_id",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L82-L93 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | TranslationServer.load_model | (self, opt, model_id=None, **model_kwargs) | return model_id, load_time | Loading a model given a set of options | Loading a model given a set of options | [
"Loading",
"a",
"model",
"given",
"a",
"set",
"of",
"options"
] | def load_model(self, opt, model_id=None, **model_kwargs):
"""Loading a model given a set of options
"""
model_id = self.preload_model(opt, model_id=model_id, **model_kwargs)
load_time = self.models[model_id].load_time
return model_id, load_time | [
"def",
"load_model",
"(",
"self",
",",
"opt",
",",
"model_id",
"=",
"None",
",",
"*",
"*",
"model_kwargs",
")",
":",
"model_id",
"=",
"self",
".",
"preload_model",
"(",
"opt",
",",
"model_id",
"=",
"model_id",
",",
"*",
"*",
"model_kwargs",
")",
"load_time",
"=",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"load_time",
"return",
"model_id",
",",
"load_time"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L95-L101 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | TranslationServer.preload_model | (self, opt, model_id=None, **model_kwargs) | return model_id | Preloading the model: updating internal datastructure
It will effectively load the model if `load` is set | Preloading the model: updating internal datastructure
It will effectively load the model if `load` is set | [
"Preloading",
"the",
"model",
":",
"updating",
"internal",
"datastructure",
"It",
"will",
"effectively",
"load",
"the",
"model",
"if",
"load",
"is",
"set"
] | def preload_model(self, opt, model_id=None, **model_kwargs):
"""Preloading the model: updating internal datastructure
It will effectively load the model if `load` is set
"""
if model_id is not None:
if model_id in self.models.keys():
raise ValueError("Model ID %d already exists" % model_id)
else:
model_id = self.next_id
while model_id in self.models.keys():
model_id += 1
self.next_id = model_id + 1
print("Pre-loading model %d" % model_id)
model = ServerModel(opt, model_id, **model_kwargs)
self.models[model_id] = model
return model_id | [
"def",
"preload_model",
"(",
"self",
",",
"opt",
",",
"model_id",
"=",
"None",
",",
"*",
"*",
"model_kwargs",
")",
":",
"if",
"model_id",
"is",
"not",
"None",
":",
"if",
"model_id",
"in",
"self",
".",
"models",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Model ID %d already exists\"",
"%",
"model_id",
")",
"else",
":",
"model_id",
"=",
"self",
".",
"next_id",
"while",
"model_id",
"in",
"self",
".",
"models",
".",
"keys",
"(",
")",
":",
"model_id",
"+=",
"1",
"self",
".",
"next_id",
"=",
"model_id",
"+",
"1",
"print",
"(",
"\"Pre-loading model %d\"",
"%",
"model_id",
")",
"model",
"=",
"ServerModel",
"(",
"opt",
",",
"model_id",
",",
"*",
"*",
"model_kwargs",
")",
"self",
".",
"models",
"[",
"model_id",
"]",
"=",
"model",
"return",
"model_id"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L103-L119 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | TranslationServer.run | (self, inputs) | Translate `inputs`
We keep the same format as the Lua version i.e.
[{"id": model_id, "src": "sequence to translate"},{ ...}]
We use inputs[0]["id"] as the model id | Translate `inputs`
We keep the same format as the Lua version i.e.
[{"id": model_id, "src": "sequence to translate"},{ ...}] | [
"Translate",
"inputs",
"We",
"keep",
"the",
"same",
"format",
"as",
"the",
"Lua",
"version",
"i",
".",
"e",
".",
"[",
"{",
"id",
":",
"model_id",
"src",
":",
"sequence",
"to",
"translate",
"}",
"{",
"...",
"}",
"]"
] | def run(self, inputs):
"""Translate `inputs`
We keep the same format as the Lua version i.e.
[{"id": model_id, "src": "sequence to translate"},{ ...}]
We use inputs[0]["id"] as the model id
"""
model_id = inputs[0].get("id", 0)
if model_id in self.models and self.models[model_id] is not None:
return self.models[model_id].run(inputs)
else:
print("Error No such model '%s'" % str(model_id))
raise ServerModelError("No such model '%s'" % str(model_id)) | [
"def",
"run",
"(",
"self",
",",
"inputs",
")",
":",
"model_id",
"=",
"inputs",
"[",
"0",
"]",
".",
"get",
"(",
"\"id\"",
",",
"0",
")",
"if",
"model_id",
"in",
"self",
".",
"models",
"and",
"self",
".",
"models",
"[",
"model_id",
"]",
"is",
"not",
"None",
":",
"return",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"run",
"(",
"inputs",
")",
"else",
":",
"print",
"(",
"\"Error No such model '%s'\"",
"%",
"str",
"(",
"model_id",
")",
")",
"raise",
"ServerModelError",
"(",
"\"No such model '%s'\"",
"%",
"str",
"(",
"model_id",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L121-L133 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | TranslationServer.unload_model | (self, model_id) | Manually unload a model.
It will free the memory and cancel the timer | Manually unload a model.
It will free the memory and cancel the timer | [
"Manually",
"unload",
"a",
"model",
".",
"It",
"will",
"free",
"the",
"memory",
"and",
"cancel",
"the",
"timer"
] | def unload_model(self, model_id):
"""Manually unload a model.
It will free the memory and cancel the timer
"""
if model_id in self.models and self.models[model_id] is not None:
self.models[model_id].unload()
else:
raise ServerModelError("No such model '%s'" % str(model_id)) | [
"def",
"unload_model",
"(",
"self",
",",
"model_id",
")",
":",
"if",
"model_id",
"in",
"self",
".",
"models",
"and",
"self",
".",
"models",
"[",
"model_id",
"]",
"is",
"not",
"None",
":",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"unload",
"(",
")",
"else",
":",
"raise",
"ServerModelError",
"(",
"\"No such model '%s'\"",
"%",
"str",
"(",
"model_id",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L135-L142 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | TranslationServer.list_models | (self) | return models | Return the list of available models | Return the list of available models | [
"Return",
"the",
"list",
"of",
"available",
"models"
] | def list_models(self):
"""Return the list of available models
"""
models = []
for _, model in self.models.items():
models += [model.to_dict()]
return models | [
"def",
"list_models",
"(",
"self",
")",
":",
"models",
"=",
"[",
"]",
"for",
"_",
",",
"model",
"in",
"self",
".",
"models",
".",
"items",
"(",
")",
":",
"models",
"+=",
"[",
"model",
".",
"to_dict",
"(",
")",
"]",
"return",
"models"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L144-L150 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.__init__ | (self, opt, model_id, tokenizer_opt=None, load=False,
timeout=-1, on_timeout="to_cpu", model_root="./") | Args:
opt: (dict) options for the Translator
model_id: (int) model id
tokenizer_opt: (dict) options for the tokenizer or None
load: (bool) whether to load the model during __init__
timeout: (int) seconds before running `do_timeout`
Negative values means no timeout
on_timeout: (str) in ["to_cpu", "unload"] set what to do on
timeout (see function `do_timeout`)
model_root: (str) path to the model directory
it must contain de model and tokenizer file | Args:
opt: (dict) options for the Translator
model_id: (int) model id
tokenizer_opt: (dict) options for the tokenizer or None
load: (bool) whether to load the model during __init__
timeout: (int) seconds before running `do_timeout`
Negative values means no timeout
on_timeout: (str) in ["to_cpu", "unload"] set what to do on
timeout (see function `do_timeout`)
model_root: (str) path to the model directory
it must contain de model and tokenizer file | [
"Args",
":",
"opt",
":",
"(",
"dict",
")",
"options",
"for",
"the",
"Translator",
"model_id",
":",
"(",
"int",
")",
"model",
"id",
"tokenizer_opt",
":",
"(",
"dict",
")",
"options",
"for",
"the",
"tokenizer",
"or",
"None",
"load",
":",
"(",
"bool",
")",
"whether",
"to",
"load",
"the",
"model",
"during",
"__init__",
"timeout",
":",
"(",
"int",
")",
"seconds",
"before",
"running",
"do_timeout",
"Negative",
"values",
"means",
"no",
"timeout",
"on_timeout",
":",
"(",
"str",
")",
"in",
"[",
"to_cpu",
"unload",
"]",
"set",
"what",
"to",
"do",
"on",
"timeout",
"(",
"see",
"function",
"do_timeout",
")",
"model_root",
":",
"(",
"str",
")",
"path",
"to",
"the",
"model",
"directory",
"it",
"must",
"contain",
"de",
"model",
"and",
"tokenizer",
"file"
] | def __init__(self, opt, model_id, tokenizer_opt=None, load=False,
timeout=-1, on_timeout="to_cpu", model_root="./"):
"""
Args:
opt: (dict) options for the Translator
model_id: (int) model id
tokenizer_opt: (dict) options for the tokenizer or None
load: (bool) whether to load the model during __init__
timeout: (int) seconds before running `do_timeout`
Negative values means no timeout
on_timeout: (str) in ["to_cpu", "unload"] set what to do on
timeout (see function `do_timeout`)
model_root: (str) path to the model directory
it must contain de model and tokenizer file
"""
self.model_root = model_root
self.opt = self.parse_opt(opt)
if self.opt.n_best > 1:
raise ValueError("Values of n_best > 1 are not supported")
self.model_id = model_id
self.tokenizer_opt = tokenizer_opt
self.timeout = timeout
self.on_timeout = on_timeout
self.unload_timer = None
self.user_opt = opt
self.tokenizer = None
self.logger = init_logger(self.opt.log_file)
self.loading_lock = threading.Event()
self.loading_lock.set()
if load:
self.load() | [
"def",
"__init__",
"(",
"self",
",",
"opt",
",",
"model_id",
",",
"tokenizer_opt",
"=",
"None",
",",
"load",
"=",
"False",
",",
"timeout",
"=",
"-",
"1",
",",
"on_timeout",
"=",
"\"to_cpu\"",
",",
"model_root",
"=",
"\"./\"",
")",
":",
"self",
".",
"model_root",
"=",
"model_root",
"self",
".",
"opt",
"=",
"self",
".",
"parse_opt",
"(",
"opt",
")",
"if",
"self",
".",
"opt",
".",
"n_best",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Values of n_best > 1 are not supported\"",
")",
"self",
".",
"model_id",
"=",
"model_id",
"self",
".",
"tokenizer_opt",
"=",
"tokenizer_opt",
"self",
".",
"timeout",
"=",
"timeout",
"self",
".",
"on_timeout",
"=",
"on_timeout",
"self",
".",
"unload_timer",
"=",
"None",
"self",
".",
"user_opt",
"=",
"opt",
"self",
".",
"tokenizer",
"=",
"None",
"self",
".",
"logger",
"=",
"init_logger",
"(",
"self",
".",
"opt",
".",
"log_file",
")",
"self",
".",
"loading_lock",
"=",
"threading",
".",
"Event",
"(",
")",
"self",
".",
"loading_lock",
".",
"set",
"(",
")",
"if",
"load",
":",
"self",
".",
"load",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L154-L188 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.parse_opt | (self, opt) | return opt | Parse the option set passed by the user using `onmt.opts`
Args:
opt: (dict) options passed by the user
Returns:
opt: (Namespace) full set of options for the Translator | Parse the option set passed by the user using `onmt.opts`
Args:
opt: (dict) options passed by the user | [
"Parse",
"the",
"option",
"set",
"passed",
"by",
"the",
"user",
"using",
"onmt",
".",
"opts",
"Args",
":",
"opt",
":",
"(",
"dict",
")",
"options",
"passed",
"by",
"the",
"user"
] | def parse_opt(self, opt):
"""Parse the option set passed by the user using `onmt.opts`
Args:
opt: (dict) options passed by the user
Returns:
opt: (Namespace) full set of options for the Translator
"""
prec_argv = sys.argv
sys.argv = sys.argv[:1]
parser = argparse.ArgumentParser()
onmt.opts.translate_opts(parser)
models = opt['models']
if not isinstance(models, (list, tuple)):
models = [models]
opt['models'] = [os.path.join(self.model_root, model)
for model in models]
opt['src'] = "dummy_src"
for (k, v) in opt.items():
if k == 'models':
sys.argv += ['-model']
sys.argv += [str(model) for model in v]
elif type(v) == bool:
sys.argv += ['-%s' % k]
else:
sys.argv += ['-%s' % k, str(v)]
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
sys.argv = prec_argv
return opt | [
"def",
"parse_opt",
"(",
"self",
",",
"opt",
")",
":",
"prec_argv",
"=",
"sys",
".",
"argv",
"sys",
".",
"argv",
"=",
"sys",
".",
"argv",
"[",
":",
"1",
"]",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"onmt",
".",
"opts",
".",
"translate_opts",
"(",
"parser",
")",
"models",
"=",
"opt",
"[",
"'models'",
"]",
"if",
"not",
"isinstance",
"(",
"models",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"models",
"=",
"[",
"models",
"]",
"opt",
"[",
"'models'",
"]",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"model_root",
",",
"model",
")",
"for",
"model",
"in",
"models",
"]",
"opt",
"[",
"'src'",
"]",
"=",
"\"dummy_src\"",
"for",
"(",
"k",
",",
"v",
")",
"in",
"opt",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'models'",
":",
"sys",
".",
"argv",
"+=",
"[",
"'-model'",
"]",
"sys",
".",
"argv",
"+=",
"[",
"str",
"(",
"model",
")",
"for",
"model",
"in",
"v",
"]",
"elif",
"type",
"(",
"v",
")",
"==",
"bool",
":",
"sys",
".",
"argv",
"+=",
"[",
"'-%s'",
"%",
"k",
"]",
"else",
":",
"sys",
".",
"argv",
"+=",
"[",
"'-%s'",
"%",
"k",
",",
"str",
"(",
"v",
")",
"]",
"opt",
"=",
"parser",
".",
"parse_args",
"(",
")",
"opt",
".",
"cuda",
"=",
"opt",
".",
"gpu",
">",
"-",
"1",
"sys",
".",
"argv",
"=",
"prec_argv",
"return",
"opt"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L190-L223 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.run | (self, inputs) | return results, scores, self.opt.n_best, timer.times | Translate `inputs` using this model
Args:
inputs: [{"src": "..."},{"src": ...}]
Returns:
result: (list) translations
times: (dict) containing times | Translate `inputs` using this model | [
"Translate",
"inputs",
"using",
"this",
"model"
] | def run(self, inputs):
"""Translate `inputs` using this model
Args:
inputs: [{"src": "..."},{"src": ...}]
Returns:
result: (list) translations
times: (dict) containing times
"""
self.stop_unload_timer()
timer = Timer()
timer.start()
self.logger.info("Running translation using %d" % self.model_id)
if not self.loading_lock.is_set():
self.logger.info(
"Model #%d is being loaded by another thread, waiting"
% self.model_id)
if not self.loading_lock.wait(timeout=30):
raise ServerModelError("Model %d loading timeout"
% self.model_id)
else:
if not self.loaded:
self.load()
timer.tick(name="load")
elif self.opt.cuda:
self.to_gpu()
timer.tick(name="to_gpu")
texts = []
head_spaces = []
tail_spaces = []
sslength = []
for i, inp in enumerate(inputs):
src = inp['src']
if src.strip() == "":
head_spaces.append(src)
texts.append("")
tail_spaces.append("")
else:
whitespaces_before, whitespaces_after = "", ""
match_before = re.search(r'^\s+', src)
match_after = re.search(r'\s+$', src)
if match_before is not None:
whitespaces_before = match_before.group(0)
if match_after is not None:
whitespaces_after = match_after.group(0)
head_spaces.append(whitespaces_before)
tok = self.maybe_tokenize(src.strip())
texts.append(tok)
sslength.append(len(tok.split()))
tail_spaces.append(whitespaces_after)
empty_indices = [i for i, x in enumerate(texts) if x == ""]
texts_to_translate = [x for x in texts if x != ""]
scores = []
predictions = []
if len(texts_to_translate) > 0:
try:
scores, predictions = self.translator.translate(
src_data_iter=texts_to_translate,
batch_size=self.opt.batch_size)
except RuntimeError as e:
raise ServerModelError("Runtime Error: %s" % str(e))
timer.tick(name="translation")
self.logger.info("""Using model #%d\t%d inputs
\ttranslation time: %f""" % (self.model_id, len(texts),
timer.times['translation']))
self.reset_unload_timer()
# NOTE: translator returns lists of `n_best` list
# we can ignore that (i.e. flatten lists) only because
# we restrict `n_best=1`
def flatten_list(_list): return sum(_list, [])
results = flatten_list(predictions)
scores = [score_tensor.item()
for score_tensor in flatten_list(scores)]
results = [self.maybe_detokenize(item)
for item in results]
# build back results with empty texts
for i in empty_indices:
results.insert(i, "")
scores.insert(i, 0)
results = ["".join(items)
for items in zip(head_spaces, results, tail_spaces)]
self.logger.info("Translation Results: %d", len(results))
return results, scores, self.opt.n_best, timer.times | [
"def",
"run",
"(",
"self",
",",
"inputs",
")",
":",
"self",
".",
"stop_unload_timer",
"(",
")",
"timer",
"=",
"Timer",
"(",
")",
"timer",
".",
"start",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Running translation using %d\"",
"%",
"self",
".",
"model_id",
")",
"if",
"not",
"self",
".",
"loading_lock",
".",
"is_set",
"(",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Model #%d is being loaded by another thread, waiting\"",
"%",
"self",
".",
"model_id",
")",
"if",
"not",
"self",
".",
"loading_lock",
".",
"wait",
"(",
"timeout",
"=",
"30",
")",
":",
"raise",
"ServerModelError",
"(",
"\"Model %d loading timeout\"",
"%",
"self",
".",
"model_id",
")",
"else",
":",
"if",
"not",
"self",
".",
"loaded",
":",
"self",
".",
"load",
"(",
")",
"timer",
".",
"tick",
"(",
"name",
"=",
"\"load\"",
")",
"elif",
"self",
".",
"opt",
".",
"cuda",
":",
"self",
".",
"to_gpu",
"(",
")",
"timer",
".",
"tick",
"(",
"name",
"=",
"\"to_gpu\"",
")",
"texts",
"=",
"[",
"]",
"head_spaces",
"=",
"[",
"]",
"tail_spaces",
"=",
"[",
"]",
"sslength",
"=",
"[",
"]",
"for",
"i",
",",
"inp",
"in",
"enumerate",
"(",
"inputs",
")",
":",
"src",
"=",
"inp",
"[",
"'src'",
"]",
"if",
"src",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"head_spaces",
".",
"append",
"(",
"src",
")",
"texts",
".",
"append",
"(",
"\"\"",
")",
"tail_spaces",
".",
"append",
"(",
"\"\"",
")",
"else",
":",
"whitespaces_before",
",",
"whitespaces_after",
"=",
"\"\"",
",",
"\"\"",
"match_before",
"=",
"re",
".",
"search",
"(",
"r'^\\s+'",
",",
"src",
")",
"match_after",
"=",
"re",
".",
"search",
"(",
"r'\\s+$'",
",",
"src",
")",
"if",
"match_before",
"is",
"not",
"None",
":",
"whitespaces_before",
"=",
"match_before",
".",
"group",
"(",
"0",
")",
"if",
"match_after",
"is",
"not",
"None",
":",
"whitespaces_after",
"=",
"match_after",
".",
"group",
"(",
"0",
")",
"head_spaces",
".",
"append",
"(",
"whitespaces_before",
")",
"tok",
"=",
"self",
".",
"maybe_tokenize",
"(",
"src",
".",
"strip",
"(",
")",
")",
"texts",
".",
"append",
"(",
"tok",
")",
"sslength",
".",
"append",
"(",
"len",
"(",
"tok",
".",
"split",
"(",
")",
")",
")",
"tail_spaces",
".",
"append",
"(",
"whitespaces_after",
")",
"empty_indices",
"=",
"[",
"i",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"texts",
")",
"if",
"x",
"==",
"\"\"",
"]",
"texts_to_translate",
"=",
"[",
"x",
"for",
"x",
"in",
"texts",
"if",
"x",
"!=",
"\"\"",
"]",
"scores",
"=",
"[",
"]",
"predictions",
"=",
"[",
"]",
"if",
"len",
"(",
"texts_to_translate",
")",
">",
"0",
":",
"try",
":",
"scores",
",",
"predictions",
"=",
"self",
".",
"translator",
".",
"translate",
"(",
"src_data_iter",
"=",
"texts_to_translate",
",",
"batch_size",
"=",
"self",
".",
"opt",
".",
"batch_size",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"raise",
"ServerModelError",
"(",
"\"Runtime Error: %s\"",
"%",
"str",
"(",
"e",
")",
")",
"timer",
".",
"tick",
"(",
"name",
"=",
"\"translation\"",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"\"\"Using model #%d\\t%d inputs\n \\ttranslation time: %f\"\"\"",
"%",
"(",
"self",
".",
"model_id",
",",
"len",
"(",
"texts",
")",
",",
"timer",
".",
"times",
"[",
"'translation'",
"]",
")",
")",
"self",
".",
"reset_unload_timer",
"(",
")",
"# NOTE: translator returns lists of `n_best` list",
"# we can ignore that (i.e. flatten lists) only because",
"# we restrict `n_best=1`",
"def",
"flatten_list",
"(",
"_list",
")",
":",
"return",
"sum",
"(",
"_list",
",",
"[",
"]",
")",
"results",
"=",
"flatten_list",
"(",
"predictions",
")",
"scores",
"=",
"[",
"score_tensor",
".",
"item",
"(",
")",
"for",
"score_tensor",
"in",
"flatten_list",
"(",
"scores",
")",
"]",
"results",
"=",
"[",
"self",
".",
"maybe_detokenize",
"(",
"item",
")",
"for",
"item",
"in",
"results",
"]",
"# build back results with empty texts",
"for",
"i",
"in",
"empty_indices",
":",
"results",
".",
"insert",
"(",
"i",
",",
"\"\"",
")",
"scores",
".",
"insert",
"(",
"i",
",",
"0",
")",
"results",
"=",
"[",
"\"\"",
".",
"join",
"(",
"items",
")",
"for",
"items",
"in",
"zip",
"(",
"head_spaces",
",",
"results",
",",
"tail_spaces",
")",
"]",
"self",
".",
"logger",
".",
"info",
"(",
"\"Translation Results: %d\"",
",",
"len",
"(",
"results",
")",
")",
"return",
"results",
",",
"scores",
",",
"self",
".",
"opt",
".",
"n_best",
",",
"timer",
".",
"times"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L286-L382 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.do_timeout | (self) | Timeout function that free GPU memory by moving the model to CPU
or unloading it; depending on `self.on_timemout` value | Timeout function that free GPU memory by moving the model to CPU
or unloading it; depending on `self.on_timemout` value | [
"Timeout",
"function",
"that",
"free",
"GPU",
"memory",
"by",
"moving",
"the",
"model",
"to",
"CPU",
"or",
"unloading",
"it",
";",
"depending",
"on",
"self",
".",
"on_timemout",
"value"
] | def do_timeout(self):
"""Timeout function that free GPU memory by moving the model to CPU
or unloading it; depending on `self.on_timemout` value
"""
if self.on_timeout == "unload":
self.logger.info("Timeout: unloading model %d" % self.model_id)
self.unload()
if self.on_timeout == "to_cpu":
self.logger.info("Timeout: sending model %d to CPU"
% self.model_id)
self.to_cpu() | [
"def",
"do_timeout",
"(",
"self",
")",
":",
"if",
"self",
".",
"on_timeout",
"==",
"\"unload\"",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Timeout: unloading model %d\"",
"%",
"self",
".",
"model_id",
")",
"self",
".",
"unload",
"(",
")",
"if",
"self",
".",
"on_timeout",
"==",
"\"to_cpu\"",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Timeout: sending model %d to CPU\"",
"%",
"self",
".",
"model_id",
")",
"self",
".",
"to_cpu",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L384-L394 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.to_cpu | (self) | Move the model to CPU and clear CUDA cache | Move the model to CPU and clear CUDA cache | [
"Move",
"the",
"model",
"to",
"CPU",
"and",
"clear",
"CUDA",
"cache"
] | def to_cpu(self):
"""Move the model to CPU and clear CUDA cache
"""
self.translator.model.cpu()
if self.opt.cuda:
torch.cuda.empty_cache() | [
"def",
"to_cpu",
"(",
"self",
")",
":",
"self",
".",
"translator",
".",
"model",
".",
"cpu",
"(",
")",
"if",
"self",
".",
"opt",
".",
"cuda",
":",
"torch",
".",
"cuda",
".",
"empty_cache",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L428-L433 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.to_gpu | (self) | Move the model to GPU | Move the model to GPU | [
"Move",
"the",
"model",
"to",
"GPU"
] | def to_gpu(self):
"""Move the model to GPU
"""
torch.cuda.set_device(self.opt.gpu)
self.translator.model.cuda() | [
"def",
"to_gpu",
"(",
"self",
")",
":",
"torch",
".",
"cuda",
".",
"set_device",
"(",
"self",
".",
"opt",
".",
"gpu",
")",
"self",
".",
"translator",
".",
"model",
".",
"cuda",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L435-L439 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.maybe_tokenize | (self, sequence) | return sequence | Tokenize the sequence (or not)
Same args/returns as `tokenize` | Tokenize the sequence (or not) | [
"Tokenize",
"the",
"sequence",
"(",
"or",
"not",
")"
] | def maybe_tokenize(self, sequence):
"""Tokenize the sequence (or not)
Same args/returns as `tokenize`
"""
if self.tokenizer_opt is not None:
return self.tokenize(sequence)
return sequence | [
"def",
"maybe_tokenize",
"(",
"self",
",",
"sequence",
")",
":",
"if",
"self",
".",
"tokenizer_opt",
"is",
"not",
"None",
":",
"return",
"self",
".",
"tokenize",
"(",
"sequence",
")",
"return",
"sequence"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L441-L448 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.tokenize | (self, sequence) | return tok | Tokenize a single sequence
Args:
sequence: (str) the sequence to tokenize
Returns:
tok: (str) the tokenized sequence | Tokenize a single sequence | [
"Tokenize",
"a",
"single",
"sequence"
] | def tokenize(self, sequence):
"""Tokenize a single sequence
Args:
sequence: (str) the sequence to tokenize
Returns:
tok: (str) the tokenized sequence
"""
if self.tokenizer is None:
raise ValueError("No tokenizer loaded")
if self.tokenizer_opt["type"] == "sentencepiece":
tok = self.tokenizer.EncodeAsPieces(sequence)
tok = " ".join(tok)
elif self.tokenizer_opt["type"] == "pyonmttok":
tok, _ = self.tokenizer.tokenize(sequence)
tok = " ".join(tok)
return tok | [
"def",
"tokenize",
"(",
"self",
",",
"sequence",
")",
":",
"if",
"self",
".",
"tokenizer",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"No tokenizer loaded\"",
")",
"if",
"self",
".",
"tokenizer_opt",
"[",
"\"type\"",
"]",
"==",
"\"sentencepiece\"",
":",
"tok",
"=",
"self",
".",
"tokenizer",
".",
"EncodeAsPieces",
"(",
"sequence",
")",
"tok",
"=",
"\" \"",
".",
"join",
"(",
"tok",
")",
"elif",
"self",
".",
"tokenizer_opt",
"[",
"\"type\"",
"]",
"==",
"\"pyonmttok\"",
":",
"tok",
",",
"_",
"=",
"self",
".",
"tokenizer",
".",
"tokenize",
"(",
"sequence",
")",
"tok",
"=",
"\" \"",
".",
"join",
"(",
"tok",
")",
"return",
"tok"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L450-L469 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.maybe_detokenize | (self, sequence) | return sequence | De-tokenize the sequence (or not)
Same args/returns as `tokenize` | De-tokenize the sequence (or not) | [
"De",
"-",
"tokenize",
"the",
"sequence",
"(",
"or",
"not",
")"
] | def maybe_detokenize(self, sequence):
"""De-tokenize the sequence (or not)
Same args/returns as `tokenize`
"""
if self.tokenizer_opt is not None and ''.join(sequence.split()) != '':
return self.detokenize(sequence)
return sequence | [
"def",
"maybe_detokenize",
"(",
"self",
",",
"sequence",
")",
":",
"if",
"self",
".",
"tokenizer_opt",
"is",
"not",
"None",
"and",
"''",
".",
"join",
"(",
"sequence",
".",
"split",
"(",
")",
")",
"!=",
"''",
":",
"return",
"self",
".",
"detokenize",
"(",
"sequence",
")",
"return",
"sequence"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L471-L478 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation_server.py | python | ServerModel.detokenize | (self, sequence) | return detok | Detokenize a single sequence
Same args/returns as `tokenize` | Detokenize a single sequence | [
"Detokenize",
"a",
"single",
"sequence"
] | def detokenize(self, sequence):
"""Detokenize a single sequence
Same args/returns as `tokenize`
"""
if self.tokenizer is None:
raise ValueError("No tokenizer loaded")
if self.tokenizer_opt["type"] == "sentencepiece":
detok = self.tokenizer.DecodePieces(sequence.split())
elif self.tokenizer_opt["type"] == "pyonmttok":
detok = self.tokenizer.detokenize(sequence.split())
return detok | [
"def",
"detokenize",
"(",
"self",
",",
"sequence",
")",
":",
"if",
"self",
".",
"tokenizer",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"No tokenizer loaded\"",
")",
"if",
"self",
".",
"tokenizer_opt",
"[",
"\"type\"",
"]",
"==",
"\"sentencepiece\"",
":",
"detok",
"=",
"self",
".",
"tokenizer",
".",
"DecodePieces",
"(",
"sequence",
".",
"split",
"(",
")",
")",
"elif",
"self",
".",
"tokenizer_opt",
"[",
"\"type\"",
"]",
"==",
"\"pyonmttok\"",
":",
"detok",
"=",
"self",
".",
"tokenizer",
".",
"detokenize",
"(",
"sequence",
".",
"split",
"(",
")",
")",
"return",
"detok"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation_server.py#L480-L493 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/penalties.py | python | PenaltyBuilder.coverage_wu | (self, beam, cov, beta=0.) | return beta * penalty | NMT coverage re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`. | NMT coverage re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`. | [
"NMT",
"coverage",
"re",
"-",
"ranking",
"score",
"from",
"Google",
"s",
"Neural",
"Machine",
"Translation",
"System",
":",
"cite",
":",
"wu2016google",
"."
] | def coverage_wu(self, beam, cov, beta=0.):
"""
NMT coverage re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
penalty = -torch.min(cov, cov.clone().fill_(1.0)).log().sum(1)
return beta * penalty | [
"def",
"coverage_wu",
"(",
"self",
",",
"beam",
",",
"cov",
",",
"beta",
"=",
"0.",
")",
":",
"penalty",
"=",
"-",
"torch",
".",
"min",
"(",
"cov",
",",
"cov",
".",
"clone",
"(",
")",
".",
"fill_",
"(",
"1.0",
")",
")",
".",
"log",
"(",
")",
".",
"sum",
"(",
"1",
")",
"return",
"beta",
"*",
"penalty"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/penalties.py#L38-L44 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/penalties.py | python | PenaltyBuilder.coverage_summary | (self, beam, cov, beta=0.) | return beta * penalty | Our summary penalty. | Our summary penalty. | [
"Our",
"summary",
"penalty",
"."
] | def coverage_summary(self, beam, cov, beta=0.):
"""
Our summary penalty.
"""
penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(1)
penalty -= cov.size(1)
return beta * penalty | [
"def",
"coverage_summary",
"(",
"self",
",",
"beam",
",",
"cov",
",",
"beta",
"=",
"0.",
")",
":",
"penalty",
"=",
"torch",
".",
"max",
"(",
"cov",
",",
"cov",
".",
"clone",
"(",
")",
".",
"fill_",
"(",
"1.0",
")",
")",
".",
"sum",
"(",
"1",
")",
"penalty",
"-=",
"cov",
".",
"size",
"(",
"1",
")",
"return",
"beta",
"*",
"penalty"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/penalties.py#L46-L52 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/penalties.py | python | PenaltyBuilder.coverage_none | (self, beam, cov, beta=0.) | return beam.scores.clone().fill_(0.0) | returns zero as penalty | returns zero as penalty | [
"returns",
"zero",
"as",
"penalty"
] | def coverage_none(self, beam, cov, beta=0.):
"""
returns zero as penalty
"""
return beam.scores.clone().fill_(0.0) | [
"def",
"coverage_none",
"(",
"self",
",",
"beam",
",",
"cov",
",",
"beta",
"=",
"0.",
")",
":",
"return",
"beam",
".",
"scores",
".",
"clone",
"(",
")",
".",
"fill_",
"(",
"0.0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/penalties.py#L54-L58 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/penalties.py | python | PenaltyBuilder.length_wu | (self, beam, logprobs, alpha=0.) | return (logprobs / modifier) | NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`. | NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`. | [
"NMT",
"length",
"re",
"-",
"ranking",
"score",
"from",
"Google",
"s",
"Neural",
"Machine",
"Translation",
"System",
":",
"cite",
":",
"wu2016google",
"."
] | def length_wu(self, beam, logprobs, alpha=0.):
"""
NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
modifier = (((5 + len(beam.next_ys)) ** alpha) /
((5 + 1) ** alpha))
return (logprobs / modifier) | [
"def",
"length_wu",
"(",
"self",
",",
"beam",
",",
"logprobs",
",",
"alpha",
"=",
"0.",
")",
":",
"modifier",
"=",
"(",
"(",
"(",
"5",
"+",
"len",
"(",
"beam",
".",
"next_ys",
")",
")",
"**",
"alpha",
")",
"/",
"(",
"(",
"5",
"+",
"1",
")",
"**",
"alpha",
")",
")",
"return",
"(",
"logprobs",
"/",
"modifier",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/penalties.py#L60-L68 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/penalties.py | python | PenaltyBuilder.length_average | (self, beam, logprobs, alpha=0.) | return logprobs / len(beam.next_ys) | Returns the average probability of tokens in a sequence. | Returns the average probability of tokens in a sequence. | [
"Returns",
"the",
"average",
"probability",
"of",
"tokens",
"in",
"a",
"sequence",
"."
] | def length_average(self, beam, logprobs, alpha=0.):
"""
Returns the average probability of tokens in a sequence.
"""
return logprobs / len(beam.next_ys) | [
"def",
"length_average",
"(",
"self",
",",
"beam",
",",
"logprobs",
",",
"alpha",
"=",
"0.",
")",
":",
"return",
"logprobs",
"/",
"len",
"(",
"beam",
".",
"next_ys",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/penalties.py#L70-L74 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/penalties.py | python | PenaltyBuilder.length_none | (self, beam, logprobs, alpha=0., beta=0.) | return logprobs | Returns unmodified scores. | Returns unmodified scores. | [
"Returns",
"unmodified",
"scores",
"."
] | def length_none(self, beam, logprobs, alpha=0., beta=0.):
"""
Returns unmodified scores.
"""
return logprobs | [
"def",
"length_none",
"(",
"self",
",",
"beam",
",",
"logprobs",
",",
"alpha",
"=",
"0.",
",",
"beta",
"=",
"0.",
")",
":",
"return",
"logprobs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/penalties.py#L76-L80 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translator.py | python | Translator.translate | (self,
src_path=None,
src_data_iter=None,
tgt_path=None,
tgt_data_iter=None,
src_dir=None,
batch_size=None,
attn_debug=False) | return all_scores, all_predictions | Translate content of `src_data_iter` (if not None) or `src_path`
and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.
Note: batch_size must not be None
Note: one of ('src_path', 'src_data_iter') must not be None
Args:
src_path (str): filepath of source data
src_data_iter (iterator): an interator generating source data
e.g. it may be a list or an openned file
tgt_path (str): filepath of target data
tgt_data_iter (iterator): an interator generating target data
src_dir (str): source directory path
(used for Audio and Image datasets)
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions | Translate content of `src_data_iter` (if not None) or `src_path`
and get gold scores if one of `tgt_data_iter` or `tgt_path` is set. | [
"Translate",
"content",
"of",
"src_data_iter",
"(",
"if",
"not",
"None",
")",
"or",
"src_path",
"and",
"get",
"gold",
"scores",
"if",
"one",
"of",
"tgt_data_iter",
"or",
"tgt_path",
"is",
"set",
"."
] | def translate(self,
src_path=None,
src_data_iter=None,
tgt_path=None,
tgt_data_iter=None,
src_dir=None,
batch_size=None,
attn_debug=False):
"""
Translate content of `src_data_iter` (if not None) or `src_path`
and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.
Note: batch_size must not be None
Note: one of ('src_path', 'src_data_iter') must not be None
Args:
src_path (str): filepath of source data
src_data_iter (iterator): an interator generating source data
e.g. it may be a list or an openned file
tgt_path (str): filepath of target data
tgt_data_iter (iterator): an interator generating target data
src_dir (str): source directory path
(used for Audio and Image datasets)
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions
"""
assert src_data_iter is not None or src_path is not None
if batch_size is None:
raise ValueError("batch_size must be set")
data = inputters. \
build_dataset(self.fields,
self.data_type,
src_path=src_path,
src_data_iter=src_data_iter,
tgt_path=tgt_path,
tgt_data_iter=tgt_data_iter,
src_dir=src_dir,
sample_rate=self.sample_rate,
window_size=self.window_size,
window_stride=self.window_stride,
window=self.window,
use_filter_pred=self.use_filter_pred,
image_channel_size=self.image_channel_size)
if self.cuda:
cur_device = "cuda"
else:
cur_device = "cpu"
data_iter = inputters.OrderedIterator(
dataset=data, device=cur_device,
batch_size=batch_size, train=False, sort=False,
sort_within_batch=True, shuffle=False)
builder = onmt.translate.TranslationBuilder(
data, self.fields,
self.n_best, self.replace_unk, tgt_path)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
all_scores = []
all_predictions = []
for batch in data_iter:
batch_data = self.translate_batch(batch, data, fast=self.fast)
# import pdb;pdb.set_trace()
translations = builder.from_batch(batch_data)
for trans in translations:
all_scores += [trans.pred_scores[:self.n_best]]
pred_score_total += trans.pred_scores[0]
pred_words_total += len(trans.pred_sents[0])
if tgt_path is not None:
gold_score_total += trans.gold_score
gold_words_total += len(trans.gold_sent) + 1
n_best_preds = [" ".join(pred)
for pred in trans.pred_sents[:self.n_best]]
all_predictions += [n_best_preds]
self.out_file.write('\n'.join(n_best_preds) + '\n')
self.out_file.flush()
if self.verbose:
sent_number = next(counter)
output = trans.log(sent_number)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
# Debug attention.
if attn_debug:
srcs = trans.src_raw
preds = trans.pred_sents[0]
preds.append('</s>')
attns = trans.attns[0].tolist()
header_format = "{:>10.10} " + "{:>10.7} " * len(srcs)
row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
output = header_format.format("", *trans.src_raw) + '\n'
for word, row in zip(preds, attns):
max_index = row.index(max(row))
row_format = row_format.replace(
"{:>10.7f} ", "{:*>10.7f} ", max_index + 1)
row_format = row_format.replace(
"{:*>10.7f} ", "{:>10.7f} ", max_index)
output += row_format.format(word, *row) + '\n'
row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
os.write(1, output.encode('utf-8'))
#TODO change back
#if self.report_score:
# msg = self._report_score('PRED', pred_score_total,
# pred_words_total)
# if self.logger:
# self.logger.info(msg)
# else:
# print(msg)
# if tgt_path is not None:
# msg = self._report_score('GOLD', gold_score_total,
# gold_words_total)
# if self.logger:
# self.logger.info(msg)
# else:
# print(msg)
# if self.report_bleu:
# msg = self._report_bleu(tgt_path)
# if self.logger:
# self.logger.info(msg)
# else:
# print(msg)
# if self.report_rouge:
# msg = self._report_rouge(tgt_path)
# if self.logger:
# self.logger.info(msg)
# else:
# print(msg)
if self.dump_beam:
import json
json.dump(self.translator.beam_accum,
codecs.open(self.dump_beam, 'w', 'utf-8'))
return all_scores, all_predictions | [
"def",
"translate",
"(",
"self",
",",
"src_path",
"=",
"None",
",",
"src_data_iter",
"=",
"None",
",",
"tgt_path",
"=",
"None",
",",
"tgt_data_iter",
"=",
"None",
",",
"src_dir",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"attn_debug",
"=",
"False",
")",
":",
"assert",
"src_data_iter",
"is",
"not",
"None",
"or",
"src_path",
"is",
"not",
"None",
"if",
"batch_size",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"batch_size must be set\"",
")",
"data",
"=",
"inputters",
".",
"build_dataset",
"(",
"self",
".",
"fields",
",",
"self",
".",
"data_type",
",",
"src_path",
"=",
"src_path",
",",
"src_data_iter",
"=",
"src_data_iter",
",",
"tgt_path",
"=",
"tgt_path",
",",
"tgt_data_iter",
"=",
"tgt_data_iter",
",",
"src_dir",
"=",
"src_dir",
",",
"sample_rate",
"=",
"self",
".",
"sample_rate",
",",
"window_size",
"=",
"self",
".",
"window_size",
",",
"window_stride",
"=",
"self",
".",
"window_stride",
",",
"window",
"=",
"self",
".",
"window",
",",
"use_filter_pred",
"=",
"self",
".",
"use_filter_pred",
",",
"image_channel_size",
"=",
"self",
".",
"image_channel_size",
")",
"if",
"self",
".",
"cuda",
":",
"cur_device",
"=",
"\"cuda\"",
"else",
":",
"cur_device",
"=",
"\"cpu\"",
"data_iter",
"=",
"inputters",
".",
"OrderedIterator",
"(",
"dataset",
"=",
"data",
",",
"device",
"=",
"cur_device",
",",
"batch_size",
"=",
"batch_size",
",",
"train",
"=",
"False",
",",
"sort",
"=",
"False",
",",
"sort_within_batch",
"=",
"True",
",",
"shuffle",
"=",
"False",
")",
"builder",
"=",
"onmt",
".",
"translate",
".",
"TranslationBuilder",
"(",
"data",
",",
"self",
".",
"fields",
",",
"self",
".",
"n_best",
",",
"self",
".",
"replace_unk",
",",
"tgt_path",
")",
"# Statistics",
"counter",
"=",
"count",
"(",
"1",
")",
"pred_score_total",
",",
"pred_words_total",
"=",
"0",
",",
"0",
"gold_score_total",
",",
"gold_words_total",
"=",
"0",
",",
"0",
"all_scores",
"=",
"[",
"]",
"all_predictions",
"=",
"[",
"]",
"for",
"batch",
"in",
"data_iter",
":",
"batch_data",
"=",
"self",
".",
"translate_batch",
"(",
"batch",
",",
"data",
",",
"fast",
"=",
"self",
".",
"fast",
")",
"# import pdb;pdb.set_trace()",
"translations",
"=",
"builder",
".",
"from_batch",
"(",
"batch_data",
")",
"for",
"trans",
"in",
"translations",
":",
"all_scores",
"+=",
"[",
"trans",
".",
"pred_scores",
"[",
":",
"self",
".",
"n_best",
"]",
"]",
"pred_score_total",
"+=",
"trans",
".",
"pred_scores",
"[",
"0",
"]",
"pred_words_total",
"+=",
"len",
"(",
"trans",
".",
"pred_sents",
"[",
"0",
"]",
")",
"if",
"tgt_path",
"is",
"not",
"None",
":",
"gold_score_total",
"+=",
"trans",
".",
"gold_score",
"gold_words_total",
"+=",
"len",
"(",
"trans",
".",
"gold_sent",
")",
"+",
"1",
"n_best_preds",
"=",
"[",
"\" \"",
".",
"join",
"(",
"pred",
")",
"for",
"pred",
"in",
"trans",
".",
"pred_sents",
"[",
":",
"self",
".",
"n_best",
"]",
"]",
"all_predictions",
"+=",
"[",
"n_best_preds",
"]",
"self",
".",
"out_file",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"n_best_preds",
")",
"+",
"'\\n'",
")",
"self",
".",
"out_file",
".",
"flush",
"(",
")",
"if",
"self",
".",
"verbose",
":",
"sent_number",
"=",
"next",
"(",
"counter",
")",
"output",
"=",
"trans",
".",
"log",
"(",
"sent_number",
")",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"info",
"(",
"output",
")",
"else",
":",
"os",
".",
"write",
"(",
"1",
",",
"output",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"# Debug attention.",
"if",
"attn_debug",
":",
"srcs",
"=",
"trans",
".",
"src_raw",
"preds",
"=",
"trans",
".",
"pred_sents",
"[",
"0",
"]",
"preds",
".",
"append",
"(",
"'</s>'",
")",
"attns",
"=",
"trans",
".",
"attns",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"header_format",
"=",
"\"{:>10.10} \"",
"+",
"\"{:>10.7} \"",
"*",
"len",
"(",
"srcs",
")",
"row_format",
"=",
"\"{:>10.10} \"",
"+",
"\"{:>10.7f} \"",
"*",
"len",
"(",
"srcs",
")",
"output",
"=",
"header_format",
".",
"format",
"(",
"\"\"",
",",
"*",
"trans",
".",
"src_raw",
")",
"+",
"'\\n'",
"for",
"word",
",",
"row",
"in",
"zip",
"(",
"preds",
",",
"attns",
")",
":",
"max_index",
"=",
"row",
".",
"index",
"(",
"max",
"(",
"row",
")",
")",
"row_format",
"=",
"row_format",
".",
"replace",
"(",
"\"{:>10.7f} \"",
",",
"\"{:*>10.7f} \"",
",",
"max_index",
"+",
"1",
")",
"row_format",
"=",
"row_format",
".",
"replace",
"(",
"\"{:*>10.7f} \"",
",",
"\"{:>10.7f} \"",
",",
"max_index",
")",
"output",
"+=",
"row_format",
".",
"format",
"(",
"word",
",",
"*",
"row",
")",
"+",
"'\\n'",
"row_format",
"=",
"\"{:>10.10} \"",
"+",
"\"{:>10.7f} \"",
"*",
"len",
"(",
"srcs",
")",
"os",
".",
"write",
"(",
"1",
",",
"output",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"#TODO change back",
"#if self.report_score:",
"# msg = self._report_score('PRED', pred_score_total,",
"# pred_words_total)",
"# if self.logger:",
"# self.logger.info(msg)",
"# else:",
"# print(msg)",
"# if tgt_path is not None:",
"# msg = self._report_score('GOLD', gold_score_total,",
"# gold_words_total)",
"# if self.logger:",
"# self.logger.info(msg)",
"# else:",
"# print(msg)",
"# if self.report_bleu:",
"# msg = self._report_bleu(tgt_path)",
"# if self.logger:",
"# self.logger.info(msg)",
"# else:",
"# print(msg)",
"# if self.report_rouge:",
"# msg = self._report_rouge(tgt_path)",
"# if self.logger:",
"# self.logger.info(msg)",
"# else:",
"# print(msg)",
"if",
"self",
".",
"dump_beam",
":",
"import",
"json",
"json",
".",
"dump",
"(",
"self",
".",
"translator",
".",
"beam_accum",
",",
"codecs",
".",
"open",
"(",
"self",
".",
"dump_beam",
",",
"'w'",
",",
"'utf-8'",
")",
")",
"return",
"all_scores",
",",
"all_predictions"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translator.py#L150-L303 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translator.py | python | Translator.translate_batch | (self, batch, data, fast=False) | Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset. | Translate a batch of sentences. | [
"Translate",
"a",
"batch",
"of",
"sentences",
"."
] | def translate_batch(self, batch, data, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
with torch.no_grad():
if fast:
return self._fast_translate_batch(
batch,
data,
self.max_length,
min_length=self.min_length,
n_best=self.n_best,
return_attention=self.replace_unk)
else:
return self._translate_batch(batch, data) | [
"def",
"translate_batch",
"(",
"self",
",",
"batch",
",",
"data",
",",
"fast",
"=",
"False",
")",
":",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"if",
"fast",
":",
"return",
"self",
".",
"_fast_translate_batch",
"(",
"batch",
",",
"data",
",",
"self",
".",
"max_length",
",",
"min_length",
"=",
"self",
".",
"min_length",
",",
"n_best",
"=",
"self",
".",
"n_best",
",",
"return_attention",
"=",
"self",
".",
"replace_unk",
")",
"else",
":",
"return",
"self",
".",
"_translate_batch",
"(",
"batch",
",",
"data",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translator.py#L305-L329 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/beam.py | python | Beam.get_current_state | (self) | return self.next_ys[-1] | Get the outputs for the current timestep. | Get the outputs for the current timestep. | [
"Get",
"the",
"outputs",
"for",
"the",
"current",
"timestep",
"."
] | def get_current_state(self):
"Get the outputs for the current timestep."
return self.next_ys[-1] | [
"def",
"get_current_state",
"(",
"self",
")",
":",
"return",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/beam.py#L66-L68 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/beam.py | python | Beam.get_current_origin | (self) | return self.prev_ks[-1] | Get the backpointers for the current timestep. | Get the backpointers for the current timestep. | [
"Get",
"the",
"backpointers",
"for",
"the",
"current",
"timestep",
"."
] | def get_current_origin(self):
"Get the backpointers for the current timestep."
return self.prev_ks[-1] | [
"def",
"get_current_origin",
"(",
"self",
")",
":",
"return",
"self",
".",
"prev_ks",
"[",
"-",
"1",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/beam.py#L70-L72 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/beam.py | python | Beam.advance | (self, word_probs, attn_out) | Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Parameters:
* `word_probs`- probs of advancing from the last step (K x words)
* `attn_out`- attention at the last step
Returns: True if beam search is complete. | Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search. | [
"Given",
"prob",
"over",
"words",
"for",
"every",
"last",
"beam",
"wordLk",
"and",
"attention",
"attn_out",
":",
"Compute",
"and",
"update",
"the",
"beam",
"search",
"."
] | def advance(self, word_probs, attn_out):
"""
Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Parameters:
* `word_probs`- probs of advancing from the last step (K x words)
* `attn_out`- attention at the last step
Returns: True if beam search is complete.
"""
num_words = word_probs.size(1)
if self.stepwise_penalty:
self.global_scorer.update_score(self, attn_out)
# force the output to be longer than self.min_length
cur_len = len(self.next_ys)
if cur_len < self.min_length:
for k in range(len(word_probs)):
word_probs[k][self._eos] = -1e20
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_scores = word_probs + \
self.scores.unsqueeze(1).expand_as(word_probs)
# Don't let EOS have children.
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
beam_scores[i] = -1e20
# Block ngram repeats
if self.block_ngram_repeat > 0:
ngrams = []
le = len(self.next_ys)
for j in range(self.next_ys[-1].size(0)):
hyp, _ = self.get_hyp(le - 1, j)
ngrams = set()
fail = False
gram = []
for i in range(le - 1):
# Last n tokens, n = block_ngram_repeat
gram = (gram +
[hyp[i].item()])[-self.block_ngram_repeat:]
# Skip the blocking if it is in the exclusion list
if set(gram) & self.exclusion_tokens:
continue
if tuple(gram) in ngrams:
fail = True
ngrams.add(tuple(gram))
if fail:
beam_scores[j] = -10e20
else:
beam_scores = word_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0,
True, True)
self.all_scores.append(self.scores)
self.scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append((best_scores_id - prev_k * num_words))
self.attn.append(attn_out.index_select(0, prev_k))
self.global_scorer.update_global_state(self)
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.next_ys[-1][0] == self._eos:
self.all_scores.append(self.scores)
self.eos_top = True | [
"def",
"advance",
"(",
"self",
",",
"word_probs",
",",
"attn_out",
")",
":",
"num_words",
"=",
"word_probs",
".",
"size",
"(",
"1",
")",
"if",
"self",
".",
"stepwise_penalty",
":",
"self",
".",
"global_scorer",
".",
"update_score",
"(",
"self",
",",
"attn_out",
")",
"# force the output to be longer than self.min_length",
"cur_len",
"=",
"len",
"(",
"self",
".",
"next_ys",
")",
"if",
"cur_len",
"<",
"self",
".",
"min_length",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"word_probs",
")",
")",
":",
"word_probs",
"[",
"k",
"]",
"[",
"self",
".",
"_eos",
"]",
"=",
"-",
"1e20",
"# Sum the previous scores.",
"if",
"len",
"(",
"self",
".",
"prev_ks",
")",
">",
"0",
":",
"beam_scores",
"=",
"word_probs",
"+",
"self",
".",
"scores",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand_as",
"(",
"word_probs",
")",
"# Don't let EOS have children.",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
".",
"size",
"(",
"0",
")",
")",
":",
"if",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
"[",
"i",
"]",
"==",
"self",
".",
"_eos",
":",
"beam_scores",
"[",
"i",
"]",
"=",
"-",
"1e20",
"# Block ngram repeats",
"if",
"self",
".",
"block_ngram_repeat",
">",
"0",
":",
"ngrams",
"=",
"[",
"]",
"le",
"=",
"len",
"(",
"self",
".",
"next_ys",
")",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
".",
"size",
"(",
"0",
")",
")",
":",
"hyp",
",",
"_",
"=",
"self",
".",
"get_hyp",
"(",
"le",
"-",
"1",
",",
"j",
")",
"ngrams",
"=",
"set",
"(",
")",
"fail",
"=",
"False",
"gram",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"le",
"-",
"1",
")",
":",
"# Last n tokens, n = block_ngram_repeat",
"gram",
"=",
"(",
"gram",
"+",
"[",
"hyp",
"[",
"i",
"]",
".",
"item",
"(",
")",
"]",
")",
"[",
"-",
"self",
".",
"block_ngram_repeat",
":",
"]",
"# Skip the blocking if it is in the exclusion list",
"if",
"set",
"(",
"gram",
")",
"&",
"self",
".",
"exclusion_tokens",
":",
"continue",
"if",
"tuple",
"(",
"gram",
")",
"in",
"ngrams",
":",
"fail",
"=",
"True",
"ngrams",
".",
"add",
"(",
"tuple",
"(",
"gram",
")",
")",
"if",
"fail",
":",
"beam_scores",
"[",
"j",
"]",
"=",
"-",
"10e20",
"else",
":",
"beam_scores",
"=",
"word_probs",
"[",
"0",
"]",
"flat_beam_scores",
"=",
"beam_scores",
".",
"view",
"(",
"-",
"1",
")",
"best_scores",
",",
"best_scores_id",
"=",
"flat_beam_scores",
".",
"topk",
"(",
"self",
".",
"size",
",",
"0",
",",
"True",
",",
"True",
")",
"self",
".",
"all_scores",
".",
"append",
"(",
"self",
".",
"scores",
")",
"self",
".",
"scores",
"=",
"best_scores",
"# best_scores_id is flattened beam x word array, so calculate which",
"# word and beam each score came from",
"prev_k",
"=",
"best_scores_id",
"/",
"num_words",
"self",
".",
"prev_ks",
".",
"append",
"(",
"prev_k",
")",
"self",
".",
"next_ys",
".",
"append",
"(",
"(",
"best_scores_id",
"-",
"prev_k",
"*",
"num_words",
")",
")",
"self",
".",
"attn",
".",
"append",
"(",
"attn_out",
".",
"index_select",
"(",
"0",
",",
"prev_k",
")",
")",
"self",
".",
"global_scorer",
".",
"update_global_state",
"(",
"self",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
".",
"size",
"(",
"0",
")",
")",
":",
"if",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
"[",
"i",
"]",
"==",
"self",
".",
"_eos",
":",
"global_scores",
"=",
"self",
".",
"global_scorer",
".",
"score",
"(",
"self",
",",
"self",
".",
"scores",
")",
"s",
"=",
"global_scores",
"[",
"i",
"]",
"self",
".",
"finished",
".",
"append",
"(",
"(",
"s",
",",
"len",
"(",
"self",
".",
"next_ys",
")",
"-",
"1",
",",
"i",
")",
")",
"# End condition is when top-of-beam is EOS and no global score.",
"if",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"self",
".",
"_eos",
":",
"self",
".",
"all_scores",
".",
"append",
"(",
"self",
".",
"scores",
")",
"self",
".",
"eos_top",
"=",
"True"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/beam.py#L74-L150 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/beam.py | python | Beam.get_hyp | (self, timestep, k) | return hyp[::-1], torch.stack(attn[::-1]) | Walk back to construct the full hypothesis. | Walk back to construct the full hypothesis. | [
"Walk",
"back",
"to",
"construct",
"the",
"full",
"hypothesis",
"."
] | def get_hyp(self, timestep, k):
"""
Walk back to construct the full hypothesis.
"""
hyp, attn = [], []
for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):
hyp.append(self.next_ys[j + 1][k])
attn.append(self.attn[j][k])
k = self.prev_ks[j][k]
return hyp[::-1], torch.stack(attn[::-1]) | [
"def",
"get_hyp",
"(",
"self",
",",
"timestep",
",",
"k",
")",
":",
"hyp",
",",
"attn",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"prev_ks",
"[",
":",
"timestep",
"]",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"hyp",
".",
"append",
"(",
"self",
".",
"next_ys",
"[",
"j",
"+",
"1",
"]",
"[",
"k",
"]",
")",
"attn",
".",
"append",
"(",
"self",
".",
"attn",
"[",
"j",
"]",
"[",
"k",
"]",
")",
"k",
"=",
"self",
".",
"prev_ks",
"[",
"j",
"]",
"[",
"k",
"]",
"return",
"hyp",
"[",
":",
":",
"-",
"1",
"]",
",",
"torch",
".",
"stack",
"(",
"attn",
"[",
":",
":",
"-",
"1",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/beam.py#L170-L179 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/beam.py | python | GNMTGlobalScorer.score | (self, beam, logprobs) | return normalized_probs | Rescores a prediction based on penalty functions | Rescores a prediction based on penalty functions | [
"Rescores",
"a",
"prediction",
"based",
"on",
"penalty",
"functions"
] | def score(self, beam, logprobs):
"""
Rescores a prediction based on penalty functions
"""
normalized_probs = self.length_penalty(beam,
logprobs,
self.alpha)
if not beam.stepwise_penalty:
penalty = self.cov_penalty(beam,
beam.global_state["coverage"],
self.beta)
normalized_probs -= penalty
return normalized_probs | [
"def",
"score",
"(",
"self",
",",
"beam",
",",
"logprobs",
")",
":",
"normalized_probs",
"=",
"self",
".",
"length_penalty",
"(",
"beam",
",",
"logprobs",
",",
"self",
".",
"alpha",
")",
"if",
"not",
"beam",
".",
"stepwise_penalty",
":",
"penalty",
"=",
"self",
".",
"cov_penalty",
"(",
"beam",
",",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
",",
"self",
".",
"beta",
")",
"normalized_probs",
"-=",
"penalty",
"return",
"normalized_probs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/beam.py#L202-L215 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/beam.py | python | GNMTGlobalScorer.update_score | (self, beam, attn) | Function to update scores of a Beam that is not finished | Function to update scores of a Beam that is not finished | [
"Function",
"to",
"update",
"scores",
"of",
"a",
"Beam",
"that",
"is",
"not",
"finished"
] | def update_score(self, beam, attn):
"""
Function to update scores of a Beam that is not finished
"""
if "prev_penalty" in beam.global_state.keys():
beam.scores.add_(beam.global_state["prev_penalty"])
penalty = self.cov_penalty(beam,
beam.global_state["coverage"] + attn,
self.beta)
beam.scores.sub_(penalty) | [
"def",
"update_score",
"(",
"self",
",",
"beam",
",",
"attn",
")",
":",
"if",
"\"prev_penalty\"",
"in",
"beam",
".",
"global_state",
".",
"keys",
"(",
")",
":",
"beam",
".",
"scores",
".",
"add_",
"(",
"beam",
".",
"global_state",
"[",
"\"prev_penalty\"",
"]",
")",
"penalty",
"=",
"self",
".",
"cov_penalty",
"(",
"beam",
",",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
"+",
"attn",
",",
"self",
".",
"beta",
")",
"beam",
".",
"scores",
".",
"sub_",
"(",
"penalty",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/beam.py#L217-L226 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/beam.py | python | GNMTGlobalScorer.update_global_state | (self, beam) | Keeps the coverage vector as sum of attentions | Keeps the coverage vector as sum of attentions | [
"Keeps",
"the",
"coverage",
"vector",
"as",
"sum",
"of",
"attentions"
] | def update_global_state(self, beam):
"Keeps the coverage vector as sum of attentions"
if len(beam.prev_ks) == 1:
beam.global_state["prev_penalty"] = beam.scores.clone().fill_(0.0)
beam.global_state["coverage"] = beam.attn[-1]
self.cov_total = beam.attn[-1].sum(1)
else:
self.cov_total += torch.min(beam.attn[-1],
beam.global_state['coverage']).sum(1)
beam.global_state["coverage"] = beam.global_state["coverage"] \
.index_select(0, beam.prev_ks[-1]).add(beam.attn[-1])
prev_penalty = self.cov_penalty(beam,
beam.global_state["coverage"],
self.beta)
beam.global_state["prev_penalty"] = prev_penalty | [
"def",
"update_global_state",
"(",
"self",
",",
"beam",
")",
":",
"if",
"len",
"(",
"beam",
".",
"prev_ks",
")",
"==",
"1",
":",
"beam",
".",
"global_state",
"[",
"\"prev_penalty\"",
"]",
"=",
"beam",
".",
"scores",
".",
"clone",
"(",
")",
".",
"fill_",
"(",
"0.0",
")",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
"=",
"beam",
".",
"attn",
"[",
"-",
"1",
"]",
"self",
".",
"cov_total",
"=",
"beam",
".",
"attn",
"[",
"-",
"1",
"]",
".",
"sum",
"(",
"1",
")",
"else",
":",
"self",
".",
"cov_total",
"+=",
"torch",
".",
"min",
"(",
"beam",
".",
"attn",
"[",
"-",
"1",
"]",
",",
"beam",
".",
"global_state",
"[",
"'coverage'",
"]",
")",
".",
"sum",
"(",
"1",
")",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
"=",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
".",
"index_select",
"(",
"0",
",",
"beam",
".",
"prev_ks",
"[",
"-",
"1",
"]",
")",
".",
"add",
"(",
"beam",
".",
"attn",
"[",
"-",
"1",
"]",
")",
"prev_penalty",
"=",
"self",
".",
"cov_penalty",
"(",
"beam",
",",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
",",
"self",
".",
"beta",
")",
"beam",
".",
"global_state",
"[",
"\"prev_penalty\"",
"]",
"=",
"prev_penalty"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/beam.py#L228-L243 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/translate/translation.py | python | Translation.log | (self, sent_number) | return output | Log translation. | Log translation. | [
"Log",
"translation",
"."
] | def log(self, sent_number):
"""
Log translation.
"""
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
output += "PRED SCORE: {:.4f}\n".format(best_score)
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += ("GOLD SCORE: {:.4f}\n".format(self.gold_score))
if len(self.pred_sents) > 1:
output += '\nBEST HYP:\n'
for score, sent in zip(self.pred_scores, self.pred_sents):
output += "[{:.4f}] {}\n".format(score, sent)
return output | [
"def",
"log",
"(",
"self",
",",
"sent_number",
")",
":",
"output",
"=",
"'\\nSENT {}: {}\\n'",
".",
"format",
"(",
"sent_number",
",",
"self",
".",
"src_raw",
")",
"best_pred",
"=",
"self",
".",
"pred_sents",
"[",
"0",
"]",
"best_score",
"=",
"self",
".",
"pred_scores",
"[",
"0",
"]",
"pred_sent",
"=",
"' '",
".",
"join",
"(",
"best_pred",
")",
"output",
"+=",
"'PRED {}: {}\\n'",
".",
"format",
"(",
"sent_number",
",",
"pred_sent",
")",
"output",
"+=",
"\"PRED SCORE: {:.4f}\\n\"",
".",
"format",
"(",
"best_score",
")",
"if",
"self",
".",
"gold_sent",
"is",
"not",
"None",
":",
"tgt_sent",
"=",
"' '",
".",
"join",
"(",
"self",
".",
"gold_sent",
")",
"output",
"+=",
"'GOLD {}: {}\\n'",
".",
"format",
"(",
"sent_number",
",",
"tgt_sent",
")",
"output",
"+=",
"(",
"\"GOLD SCORE: {:.4f}\\n\"",
".",
"format",
"(",
"self",
".",
"gold_score",
")",
")",
"if",
"len",
"(",
"self",
".",
"pred_sents",
")",
">",
"1",
":",
"output",
"+=",
"'\\nBEST HYP:\\n'",
"for",
"score",
",",
"sent",
"in",
"zip",
"(",
"self",
".",
"pred_scores",
",",
"self",
".",
"pred_sents",
")",
":",
"output",
"+=",
"\"[{:.4f}] {}\\n\"",
".",
"format",
"(",
"score",
",",
"sent",
")",
"return",
"output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/translate/translation.py#L134-L156 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | RNNDecoderBase.forward | (self, tgt, memory_bank, state, memory_lengths=None,
step=None) | return decoder_outputs, state, attns | Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`. | Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`. | [
"Args",
":",
"tgt",
"(",
"LongTensor",
")",
":",
"sequences",
"of",
"padded",
"tokens",
"[",
"tgt_len",
"x",
"batch",
"x",
"nfeats",
"]",
".",
"memory_bank",
"(",
"FloatTensor",
")",
":",
"vectors",
"from",
"the",
"encoder",
"[",
"src_len",
"x",
"batch",
"x",
"hidden",
"]",
".",
"state",
"(",
":",
"obj",
":",
"onmt",
".",
"models",
".",
"DecoderState",
")",
":",
"decoder",
"state",
"object",
"to",
"initialize",
"the",
"decoder",
"memory_lengths",
"(",
"LongTensor",
")",
":",
"the",
"padded",
"source",
"lengths",
"[",
"batch",
"]",
".",
"Returns",
":",
"(",
"FloatTensor",
":",
"obj",
":",
"onmt",
".",
"Models",
".",
"DecoderState",
"FloatTensor",
")",
":",
"*",
"decoder_outputs",
":",
"output",
"from",
"the",
"decoder",
"(",
"after",
"attn",
")",
"[",
"tgt_len",
"x",
"batch",
"x",
"hidden",
"]",
".",
"*",
"decoder_state",
":",
"final",
"hidden",
"state",
"from",
"the",
"decoder",
"*",
"attns",
":",
"distribution",
"over",
"src",
"at",
"each",
"tgt",
"[",
"tgt_len",
"x",
"batch",
"x",
"src_len",
"]",
"."
] | def forward(self, tgt, memory_bank, state, memory_lengths=None,
step=None):
"""
Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`.
"""
# Check
assert isinstance(state, RNNDecoderState)
# tgt.size() returns tgt length and batch
_, tgt_batch, _ = tgt.size()
_, memory_batch, _ = memory_bank.size()
aeq(tgt_batch, memory_batch)
# END
# Run the forward pass of the RNN.
decoder_final, decoder_outputs, attns = self._run_forward_pass(
tgt, memory_bank, state, memory_lengths=memory_lengths)
# Update the state with the result.
final_output = decoder_outputs[-1]
coverage = None
if "coverage" in attns:
coverage = attns["coverage"][-1].unsqueeze(0)
state.update_state(decoder_final, final_output.unsqueeze(0), coverage)
# Concatenates sequence of tensors along a new dimension.
# NOTE: v0.3 to 0.4: decoder_outputs / attns[*] may not be list
# (in particular in case of SRU) it was not raising error in 0.3
# since stack(Variable) was allowed.
# In 0.4, SRU returns a tensor that shouldn't be stacke
if type(decoder_outputs) == list:
decoder_outputs = torch.stack(decoder_outputs)
for k in attns:
if type(attns[k]) == list:
attns[k] = torch.stack(attns[k])
return decoder_outputs, state, attns | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"# Check",
"assert",
"isinstance",
"(",
"state",
",",
"RNNDecoderState",
")",
"# tgt.size() returns tgt length and batch",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"_",
",",
"memory_batch",
",",
"_",
"=",
"memory_bank",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"memory_batch",
")",
"# END",
"# Run the forward pass of the RNN.",
"decoder_final",
",",
"decoder_outputs",
",",
"attns",
"=",
"self",
".",
"_run_forward_pass",
"(",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"memory_lengths",
")",
"# Update the state with the result.",
"final_output",
"=",
"decoder_outputs",
"[",
"-",
"1",
"]",
"coverage",
"=",
"None",
"if",
"\"coverage\"",
"in",
"attns",
":",
"coverage",
"=",
"attns",
"[",
"\"coverage\"",
"]",
"[",
"-",
"1",
"]",
".",
"unsqueeze",
"(",
"0",
")",
"state",
".",
"update_state",
"(",
"decoder_final",
",",
"final_output",
".",
"unsqueeze",
"(",
"0",
")",
",",
"coverage",
")",
"# Concatenates sequence of tensors along a new dimension.",
"# NOTE: v0.3 to 0.4: decoder_outputs / attns[*] may not be list",
"# (in particular in case of SRU) it was not raising error in 0.3",
"# since stack(Variable) was allowed.",
"# In 0.4, SRU returns a tensor that shouldn't be stacke",
"if",
"type",
"(",
"decoder_outputs",
")",
"==",
"list",
":",
"decoder_outputs",
"=",
"torch",
".",
"stack",
"(",
"decoder_outputs",
")",
"for",
"k",
"in",
"attns",
":",
"if",
"type",
"(",
"attns",
"[",
"k",
"]",
")",
"==",
"list",
":",
"attns",
"[",
"k",
"]",
"=",
"torch",
".",
"stack",
"(",
"attns",
"[",
"k",
"]",
")",
"return",
"decoder_outputs",
",",
"state",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L107-L158 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | RNNDecoderBase.init_decoder_state | (self, src, memory_bank, encoder_final,
with_cache=False) | Init decoder state with last state of the encoder | Init decoder state with last state of the encoder | [
"Init",
"decoder",
"state",
"with",
"last",
"state",
"of",
"the",
"encoder"
] | def init_decoder_state(self, src, memory_bank, encoder_final,
with_cache=False):
""" Init decoder state with last state of the encoder """
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat([hidden[0:hidden.size(0):2],
hidden[1:hidden.size(0):2]], 2)
return hidden
if isinstance(encoder_final, tuple): # LSTM
return RNNDecoderState(self.hidden_size,
tuple([_fix_enc_hidden(enc_hid)
for enc_hid in encoder_final]))
else: # GRU
return RNNDecoderState(self.hidden_size,
_fix_enc_hidden(encoder_final)) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"src",
",",
"memory_bank",
",",
"encoder_final",
",",
"with_cache",
"=",
"False",
")",
":",
"def",
"_fix_enc_hidden",
"(",
"hidden",
")",
":",
"# The encoder hidden is (layers*directions) x batch x dim.",
"# We need to convert it to layers x batch x (directions*dim).",
"if",
"self",
".",
"bidirectional_encoder",
":",
"hidden",
"=",
"torch",
".",
"cat",
"(",
"[",
"hidden",
"[",
"0",
":",
"hidden",
".",
"size",
"(",
"0",
")",
":",
"2",
"]",
",",
"hidden",
"[",
"1",
":",
"hidden",
".",
"size",
"(",
"0",
")",
":",
"2",
"]",
"]",
",",
"2",
")",
"return",
"hidden",
"if",
"isinstance",
"(",
"encoder_final",
",",
"tuple",
")",
":",
"# LSTM",
"return",
"RNNDecoderState",
"(",
"self",
".",
"hidden_size",
",",
"tuple",
"(",
"[",
"_fix_enc_hidden",
"(",
"enc_hid",
")",
"for",
"enc_hid",
"in",
"encoder_final",
"]",
")",
")",
"else",
":",
"# GRU",
"return",
"RNNDecoderState",
"(",
"self",
".",
"hidden_size",
",",
"_fix_enc_hidden",
"(",
"encoder_final",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L160-L177 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | StdRNNDecoder._run_forward_pass | (self, tgt, memory_bank, state, memory_lengths=None) | return decoder_final, decoder_outputs, attns | Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder. | Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder. | [
"Private",
"helper",
"for",
"running",
"the",
"specific",
"RNN",
"forward",
"pass",
".",
"Must",
"be",
"overriden",
"by",
"all",
"subclasses",
".",
"Args",
":",
"tgt",
"(",
"LongTensor",
")",
":",
"a",
"sequence",
"of",
"input",
"tokens",
"tensors",
"[",
"len",
"x",
"batch",
"x",
"nfeats",
"]",
".",
"memory_bank",
"(",
"FloatTensor",
")",
":",
"output",
"(",
"tensor",
"sequence",
")",
"from",
"the",
"encoder",
"RNN",
"of",
"size",
"(",
"src_len",
"x",
"batch",
"x",
"hidden_size",
")",
".",
"state",
"(",
"FloatTensor",
")",
":",
"hidden",
"state",
"from",
"the",
"encoder",
"RNN",
"for",
"initializing",
"the",
"decoder",
".",
"memory_lengths",
"(",
"LongTensor",
")",
":",
"the",
"source",
"memory_bank",
"lengths",
".",
"Returns",
":",
"decoder_final",
"(",
"Tensor",
")",
":",
"final",
"hidden",
"state",
"from",
"the",
"decoder",
".",
"decoder_outputs",
"(",
"[",
"FloatTensor",
"]",
")",
":",
"an",
"array",
"of",
"output",
"of",
"every",
"time",
"step",
"from",
"the",
"decoder",
".",
"attns",
"(",
"dict",
"of",
"(",
"str",
"[",
"FloatTensor",
"]",
")",
":",
"a",
"dictionary",
"of",
"different",
"type",
"of",
"attention",
"Tensor",
"array",
"of",
"every",
"time",
"step",
"from",
"the",
"decoder",
"."
] | def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder.
"""
assert not self._copy # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
# Initialize local and return variables.
attns = {}
emb = self.embeddings(tgt)
# Run the forward pass of the RNN.
if isinstance(self.rnn, nn.GRU):
rnn_output, decoder_final = self.rnn(emb, state.hidden[0])
else:
rnn_output, decoder_final = self.rnn(emb, state.hidden)
# Check
tgt_len, tgt_batch, _ = tgt.size()
output_len, output_batch, _ = rnn_output.size()
aeq(tgt_len, output_len)
aeq(tgt_batch, output_batch)
# END
# Calculate the attention.
decoder_outputs, p_attn = self.attn(
rnn_output.transpose(0, 1).contiguous(),
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths
)
attns["std"] = p_attn
# Calculate the context gate.
if self.context_gate is not None:
decoder_outputs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
decoder_outputs.view(-1, decoder_outputs.size(2))
)
decoder_outputs = \
decoder_outputs.view(tgt_len, tgt_batch, self.hidden_size)
decoder_outputs = self.dropout(decoder_outputs)
return decoder_final, decoder_outputs, attns | [
"def",
"_run_forward_pass",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
")",
":",
"assert",
"not",
"self",
".",
"_copy",
"# TODO, no support yet.",
"assert",
"not",
"self",
".",
"_coverage",
"# TODO, no support yet.",
"# Initialize local and return variables.",
"attns",
"=",
"{",
"}",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"# Run the forward pass of the RNN.",
"if",
"isinstance",
"(",
"self",
".",
"rnn",
",",
"nn",
".",
"GRU",
")",
":",
"rnn_output",
",",
"decoder_final",
"=",
"self",
".",
"rnn",
"(",
"emb",
",",
"state",
".",
"hidden",
"[",
"0",
"]",
")",
"else",
":",
"rnn_output",
",",
"decoder_final",
"=",
"self",
".",
"rnn",
"(",
"emb",
",",
"state",
".",
"hidden",
")",
"# Check",
"tgt_len",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"output_len",
",",
"output_batch",
",",
"_",
"=",
"rnn_output",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_len",
",",
"output_len",
")",
"aeq",
"(",
"tgt_batch",
",",
"output_batch",
")",
"# END",
"# Calculate the attention.",
"decoder_outputs",
",",
"p_attn",
"=",
"self",
".",
"attn",
"(",
"rnn_output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"memory_lengths",
"=",
"memory_lengths",
")",
"attns",
"[",
"\"std\"",
"]",
"=",
"p_attn",
"# Calculate the context gate.",
"if",
"self",
".",
"context_gate",
"is",
"not",
"None",
":",
"decoder_outputs",
"=",
"self",
".",
"context_gate",
"(",
"emb",
".",
"view",
"(",
"-",
"1",
",",
"emb",
".",
"size",
"(",
"2",
")",
")",
",",
"rnn_output",
".",
"view",
"(",
"-",
"1",
",",
"rnn_output",
".",
"size",
"(",
"2",
")",
")",
",",
"decoder_outputs",
".",
"view",
"(",
"-",
"1",
",",
"decoder_outputs",
".",
"size",
"(",
"2",
")",
")",
")",
"decoder_outputs",
"=",
"decoder_outputs",
".",
"view",
"(",
"tgt_len",
",",
"tgt_batch",
",",
"self",
".",
"hidden_size",
")",
"decoder_outputs",
"=",
"self",
".",
"dropout",
"(",
"decoder_outputs",
")",
"return",
"decoder_final",
",",
"decoder_outputs",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L196-L255 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | StdRNNDecoder._input_size | (self) | return self.embeddings.embedding_size | Private helper returning the number of expected features. | Private helper returning the number of expected features. | [
"Private",
"helper",
"returning",
"the",
"number",
"of",
"expected",
"features",
"."
] | def _input_size(self):
"""
Private helper returning the number of expected features.
"""
return self.embeddings.embedding_size | [
"def",
"_input_size",
"(",
"self",
")",
":",
"return",
"self",
".",
"embeddings",
".",
"embedding_size"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L262-L266 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | InputFeedRNNDecoder._run_forward_pass | (self, tgt, memory_bank, state, memory_lengths=None) | return hidden, decoder_outputs, attns | See StdRNNDecoder._run_forward_pass() for description
of arguments and return values. | See StdRNNDecoder._run_forward_pass() for description
of arguments and return values. | [
"See",
"StdRNNDecoder",
".",
"_run_forward_pass",
"()",
"for",
"description",
"of",
"arguments",
"and",
"return",
"values",
"."
] | def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
"""
# Additional args check.
input_feed = state.input_feed.squeeze(0)
# print("input feed size: {}\n".format(input_feed.size()))
input_feed_batch, _ = input_feed.size()
_, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
# Initialize local and return variables.
decoder_outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
hidden = state.hidden
coverage = state.coverage.squeeze(0) \
if state.coverage is not None else None
# Input feed concatenates hidden state with
# input at every time step.
#pdb.set_trace()
#print("emb size: {}\n".format(emb.size()));exit()
for _, emb_t in enumerate(emb.split(1)):
emb_t = emb_t.squeeze(0)
decoder_input = torch.cat([emb_t, input_feed], 1)
rnn_output, hidden = self.rnn(decoder_input, hidden)
decoder_output, p_attn = self.attn(
rnn_output,
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths)
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
decoder_outputs += [decoder_output]
attns["std"] += [p_attn]
# Update the coverage attention.
if self._coverage:
coverage = coverage + p_attn \
if coverage is not None else p_attn
attns["coverage"] += [coverage]
# Run the forward pass of the copy attention layer.
if self._copy and not self._reuse_copy_attn:
_, copy_attn = self.copy_attn(decoder_output,
memory_bank.transpose(0, 1))
attns["copy"] += [copy_attn]
elif self._copy:
attns["copy"] = attns["std"]
# Return result.
return hidden, decoder_outputs, attns | [
"def",
"_run_forward_pass",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
")",
":",
"# Additional args check.",
"input_feed",
"=",
"state",
".",
"input_feed",
".",
"squeeze",
"(",
"0",
")",
"# print(\"input feed size: {}\\n\".format(input_feed.size()))",
"input_feed_batch",
",",
"_",
"=",
"input_feed",
".",
"size",
"(",
")",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"input_feed_batch",
")",
"# END Additional args check.",
"# Initialize local and return variables.",
"decoder_outputs",
"=",
"[",
"]",
"attns",
"=",
"{",
"\"std\"",
":",
"[",
"]",
"}",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"[",
"]",
"if",
"self",
".",
"_coverage",
":",
"attns",
"[",
"\"coverage\"",
"]",
"=",
"[",
"]",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"assert",
"emb",
".",
"dim",
"(",
")",
"==",
"3",
"# len x batch x embedding_dim",
"hidden",
"=",
"state",
".",
"hidden",
"coverage",
"=",
"state",
".",
"coverage",
".",
"squeeze",
"(",
"0",
")",
"if",
"state",
".",
"coverage",
"is",
"not",
"None",
"else",
"None",
"# Input feed concatenates hidden state with",
"# input at every time step.",
"#pdb.set_trace()",
"#print(\"emb size: {}\\n\".format(emb.size()));exit()",
"for",
"_",
",",
"emb_t",
"in",
"enumerate",
"(",
"emb",
".",
"split",
"(",
"1",
")",
")",
":",
"emb_t",
"=",
"emb_t",
".",
"squeeze",
"(",
"0",
")",
"decoder_input",
"=",
"torch",
".",
"cat",
"(",
"[",
"emb_t",
",",
"input_feed",
"]",
",",
"1",
")",
"rnn_output",
",",
"hidden",
"=",
"self",
".",
"rnn",
"(",
"decoder_input",
",",
"hidden",
")",
"decoder_output",
",",
"p_attn",
"=",
"self",
".",
"attn",
"(",
"rnn_output",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"memory_lengths",
"=",
"memory_lengths",
")",
"if",
"self",
".",
"context_gate",
"is",
"not",
"None",
":",
"# TODO: context gate should be employed",
"# instead of second RNN transform.",
"decoder_output",
"=",
"self",
".",
"context_gate",
"(",
"decoder_input",
",",
"rnn_output",
",",
"decoder_output",
")",
"decoder_output",
"=",
"self",
".",
"dropout",
"(",
"decoder_output",
")",
"input_feed",
"=",
"decoder_output",
"decoder_outputs",
"+=",
"[",
"decoder_output",
"]",
"attns",
"[",
"\"std\"",
"]",
"+=",
"[",
"p_attn",
"]",
"# Update the coverage attention.",
"if",
"self",
".",
"_coverage",
":",
"coverage",
"=",
"coverage",
"+",
"p_attn",
"if",
"coverage",
"is",
"not",
"None",
"else",
"p_attn",
"attns",
"[",
"\"coverage\"",
"]",
"+=",
"[",
"coverage",
"]",
"# Run the forward pass of the copy attention layer.",
"if",
"self",
".",
"_copy",
"and",
"not",
"self",
".",
"_reuse_copy_attn",
":",
"_",
",",
"copy_attn",
"=",
"self",
".",
"copy_attn",
"(",
"decoder_output",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
")",
"attns",
"[",
"\"copy\"",
"]",
"+=",
"[",
"copy_attn",
"]",
"elif",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"attns",
"[",
"\"std\"",
"]",
"# Return result.",
"return",
"hidden",
",",
"decoder_outputs",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L296-L363 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | InputFeedRNNDecoder._input_size | (self) | return self.embeddings.embedding_size + self.hidden_size | Using input feed by concatenating input with attention vectors. | Using input feed by concatenating input with attention vectors. | [
"Using",
"input",
"feed",
"by",
"concatenating",
"input",
"with",
"attention",
"vectors",
"."
] | def _input_size(self):
"""
Using input feed by concatenating input with attention vectors.
"""
return self.embeddings.embedding_size + self.hidden_size | [
"def",
"_input_size",
"(",
"self",
")",
":",
"return",
"self",
".",
"embeddings",
".",
"embedding_size",
"+",
"self",
".",
"hidden_size"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L377-L381 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | DecoderState.detach | (self) | Need to document this | Need to document this | [
"Need",
"to",
"document",
"this"
] | def detach(self):
""" Need to document this """
self.hidden = tuple([_.detach() for _ in self.hidden])
self.input_feed = self.input_feed.detach() | [
"def",
"detach",
"(",
"self",
")",
":",
"self",
".",
"hidden",
"=",
"tuple",
"(",
"[",
"_",
".",
"detach",
"(",
")",
"for",
"_",
"in",
"self",
".",
"hidden",
"]",
")",
"self",
".",
"input_feed",
"=",
"self",
".",
"input_feed",
".",
"detach",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L392-L395 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | DecoderState.beam_update | (self, idx, positions, beam_size) | Need to document this | Need to document this | [
"Need",
"to",
"document",
"this"
] | def beam_update(self, idx, positions, beam_size):
""" Need to document this """
for e in self._all:
sizes = e.size()
br = sizes[1]
if len(sizes) == 3:
sent_states = e.view(sizes[0], beam_size, br // beam_size,
sizes[2])[:, :, idx]
else:
sent_states = e.view(sizes[0], beam_size,
br // beam_size,
sizes[2],
sizes[3])[:, :, idx]
sent_states.data.copy_(
sent_states.data.index_select(1, positions)) | [
"def",
"beam_update",
"(",
"self",
",",
"idx",
",",
"positions",
",",
"beam_size",
")",
":",
"for",
"e",
"in",
"self",
".",
"_all",
":",
"sizes",
"=",
"e",
".",
"size",
"(",
")",
"br",
"=",
"sizes",
"[",
"1",
"]",
"if",
"len",
"(",
"sizes",
")",
"==",
"3",
":",
"sent_states",
"=",
"e",
".",
"view",
"(",
"sizes",
"[",
"0",
"]",
",",
"beam_size",
",",
"br",
"//",
"beam_size",
",",
"sizes",
"[",
"2",
"]",
")",
"[",
":",
",",
":",
",",
"idx",
"]",
"else",
":",
"sent_states",
"=",
"e",
".",
"view",
"(",
"sizes",
"[",
"0",
"]",
",",
"beam_size",
",",
"br",
"//",
"beam_size",
",",
"sizes",
"[",
"2",
"]",
",",
"sizes",
"[",
"3",
"]",
")",
"[",
":",
",",
":",
",",
"idx",
"]",
"sent_states",
".",
"data",
".",
"copy_",
"(",
"sent_states",
".",
"data",
".",
"index_select",
"(",
"1",
",",
"positions",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L397-L412 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | RNNDecoderState.__init__ | (self, hidden_size, rnnstate) | Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim). | Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim). | [
"Args",
":",
"hidden_size",
"(",
"int",
")",
":",
"the",
"size",
"of",
"hidden",
"layer",
"of",
"the",
"decoder",
".",
"rnnstate",
":",
"final",
"hidden",
"state",
"from",
"the",
"encoder",
".",
"transformed",
"to",
"shape",
":",
"layers",
"x",
"batch",
"x",
"(",
"directions",
"*",
"dim",
")",
"."
] | def __init__(self, hidden_size, rnnstate):
"""
Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim).
"""
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.coverage = None
# Init the input feed.
batch_size = self.hidden[0].size(1)
h_size = (batch_size, hidden_size)
self.input_feed = self.hidden[0].data.new(*h_size).zero_() \
.unsqueeze(0) | [
"def",
"__init__",
"(",
"self",
",",
"hidden_size",
",",
"rnnstate",
")",
":",
"if",
"not",
"isinstance",
"(",
"rnnstate",
",",
"tuple",
")",
":",
"self",
".",
"hidden",
"=",
"(",
"rnnstate",
",",
")",
"else",
":",
"self",
".",
"hidden",
"=",
"rnnstate",
"self",
".",
"coverage",
"=",
"None",
"# Init the input feed.",
"batch_size",
"=",
"self",
".",
"hidden",
"[",
"0",
"]",
".",
"size",
"(",
"1",
")",
"h_size",
"=",
"(",
"batch_size",
",",
"hidden_size",
")",
"self",
".",
"input_feed",
"=",
"self",
".",
"hidden",
"[",
"0",
"]",
".",
"data",
".",
"new",
"(",
"*",
"h_size",
")",
".",
"zero_",
"(",
")",
".",
"unsqueeze",
"(",
"0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L421-L438 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | RNNDecoderState.update_state | (self, rnnstate, input_feed, coverage) | Update decoder state | Update decoder state | [
"Update",
"decoder",
"state"
] | def update_state(self, rnnstate, input_feed, coverage):
""" Update decoder state """
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.input_feed = input_feed
self.coverage = coverage | [
"def",
"update_state",
"(",
"self",
",",
"rnnstate",
",",
"input_feed",
",",
"coverage",
")",
":",
"if",
"not",
"isinstance",
"(",
"rnnstate",
",",
"tuple",
")",
":",
"self",
".",
"hidden",
"=",
"(",
"rnnstate",
",",
")",
"else",
":",
"self",
".",
"hidden",
"=",
"rnnstate",
"self",
".",
"input_feed",
"=",
"input_feed",
"self",
".",
"coverage",
"=",
"coverage"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L444-L451 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/decoder.py | python | RNNDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
vars = [e.data.repeat(1, beam_size, 1)
for e in self._all]
self.hidden = tuple(vars[:-1])
self.input_feed = vars[-1] | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"vars",
"=",
"[",
"e",
".",
"data",
".",
"repeat",
"(",
"1",
",",
"beam_size",
",",
"1",
")",
"for",
"e",
"in",
"self",
".",
"_all",
"]",
"self",
".",
"hidden",
"=",
"tuple",
"(",
"vars",
"[",
":",
"-",
"1",
"]",
")",
"self",
".",
"input_feed",
"=",
"vars",
"[",
"-",
"1",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/decoder.py#L453-L458 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/ensemble.py | python | load_test_model | (opt, dummy_opt) | return shared_fields, ensemble_model, shared_model_opt | Read in multiple models for ensemble | Read in multiple models for ensemble | [
"Read",
"in",
"multiple",
"models",
"for",
"ensemble"
] | def load_test_model(opt, dummy_opt):
""" Read in multiple models for ensemble """
shared_fields = None
shared_model_opt = None
models = []
for model_path in opt.models:
fields, model, model_opt = \
onmt.model_builder.load_test_model(opt,
dummy_opt,
model_path=model_path)
if shared_fields is None:
shared_fields = fields
else:
for key, field in fields.items():
if field is not None and 'vocab' in field.__dict__:
assert field.vocab.stoi == shared_fields[key].vocab.stoi, \
'Ensemble models must use the same preprocessed data'
models.append(model)
if shared_model_opt is None:
shared_model_opt = model_opt
ensemble_model = EnsembleModel(models)
return shared_fields, ensemble_model, shared_model_opt | [
"def",
"load_test_model",
"(",
"opt",
",",
"dummy_opt",
")",
":",
"shared_fields",
"=",
"None",
"shared_model_opt",
"=",
"None",
"models",
"=",
"[",
"]",
"for",
"model_path",
"in",
"opt",
".",
"models",
":",
"fields",
",",
"model",
",",
"model_opt",
"=",
"onmt",
".",
"model_builder",
".",
"load_test_model",
"(",
"opt",
",",
"dummy_opt",
",",
"model_path",
"=",
"model_path",
")",
"if",
"shared_fields",
"is",
"None",
":",
"shared_fields",
"=",
"fields",
"else",
":",
"for",
"key",
",",
"field",
"in",
"fields",
".",
"items",
"(",
")",
":",
"if",
"field",
"is",
"not",
"None",
"and",
"'vocab'",
"in",
"field",
".",
"__dict__",
":",
"assert",
"field",
".",
"vocab",
".",
"stoi",
"==",
"shared_fields",
"[",
"key",
"]",
".",
"vocab",
".",
"stoi",
",",
"'Ensemble models must use the same preprocessed data'",
"models",
".",
"append",
"(",
"model",
")",
"if",
"shared_model_opt",
"is",
"None",
":",
"shared_model_opt",
"=",
"model_opt",
"ensemble_model",
"=",
"EnsembleModel",
"(",
"models",
")",
"return",
"shared_fields",
",",
"ensemble_model",
",",
"shared_model_opt"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/ensemble.py#L135-L156 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/ensemble.py | python | EnsembleDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
for model_state in self.model_decoder_states:
model_state.repeat_beam_size_times(beam_size) | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"for",
"model_state",
"in",
"self",
".",
"model_decoder_states",
":",
"model_state",
".",
"repeat_beam_size_times",
"(",
"beam_size",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/ensemble.py#L27-L30 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/ensemble.py | python | EnsembleDecoderOutput.squeeze | (self, dim=None) | return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_outputs]) | Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()` | Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()` | [
"Delegate",
"squeeze",
"to",
"avoid",
"modifying",
":",
"obj",
":",
"Translator",
".",
"translate_batch",
"()"
] | def squeeze(self, dim=None):
"""
Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()`
"""
return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_outputs]) | [
"def",
"squeeze",
"(",
"self",
",",
"dim",
"=",
"None",
")",
":",
"return",
"EnsembleDecoderOutput",
"(",
"[",
"x",
".",
"squeeze",
"(",
"dim",
")",
"for",
"x",
"in",
"self",
".",
"model_outputs",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/ensemble.py#L41-L47 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/ensemble.py | python | EnsembleDecoder.forward | (self, tgt, memory_bank, state, memory_lengths=None,
step=None) | return (EnsembleDecoderOutput(outputs),
EnsembleDecoderState(states),
mean_attns) | See :obj:`RNNDecoderBase.forward()` | See :obj:`RNNDecoderBase.forward()` | [
"See",
":",
"obj",
":",
"RNNDecoderBase",
".",
"forward",
"()"
] | def forward(self, tgt, memory_bank, state, memory_lengths=None,
step=None):
""" See :obj:`RNNDecoderBase.forward()` """
# Memory_lengths is a single tensor shared between all models.
# This assumption will not hold if Translator is modified
# to calculate memory_lengths as something other than the length
# of the input.
outputs, states, attns = zip(*[
model_decoder.forward(
tgt, memory_bank[i], state[i], memory_lengths, step=step)
for (i, model_decoder)
in enumerate(self.model_decoders)])
mean_attns = self.combine_attns(attns)
return (EnsembleDecoderOutput(outputs),
EnsembleDecoderState(states),
mean_attns) | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"# Memory_lengths is a single tensor shared between all models.",
"# This assumption will not hold if Translator is modified",
"# to calculate memory_lengths as something other than the length",
"# of the input.",
"outputs",
",",
"states",
",",
"attns",
"=",
"zip",
"(",
"*",
"[",
"model_decoder",
".",
"forward",
"(",
"tgt",
",",
"memory_bank",
"[",
"i",
"]",
",",
"state",
"[",
"i",
"]",
",",
"memory_lengths",
",",
"step",
"=",
"step",
")",
"for",
"(",
"i",
",",
"model_decoder",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_decoders",
")",
"]",
")",
"mean_attns",
"=",
"self",
".",
"combine_attns",
"(",
"attns",
")",
"return",
"(",
"EnsembleDecoderOutput",
"(",
"outputs",
")",
",",
"EnsembleDecoderState",
"(",
"states",
")",
",",
"mean_attns",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/ensemble.py#L72-L87 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/ensemble.py | python | EnsembleDecoder.init_decoder_state | (self, src, memory_bank, enc_hidden) | return EnsembleDecoderState(
[model_decoder.init_decoder_state(src,
memory_bank[i],
enc_hidden[i])
for (i, model_decoder) in enumerate(self.model_decoders)]) | See :obj:`RNNDecoderBase.init_decoder_state()` | See :obj:`RNNDecoderBase.init_decoder_state()` | [
"See",
":",
"obj",
":",
"RNNDecoderBase",
".",
"init_decoder_state",
"()"
] | def init_decoder_state(self, src, memory_bank, enc_hidden):
""" See :obj:`RNNDecoderBase.init_decoder_state()` """
return EnsembleDecoderState(
[model_decoder.init_decoder_state(src,
memory_bank[i],
enc_hidden[i])
for (i, model_decoder) in enumerate(self.model_decoders)]) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"src",
",",
"memory_bank",
",",
"enc_hidden",
")",
":",
"return",
"EnsembleDecoderState",
"(",
"[",
"model_decoder",
".",
"init_decoder_state",
"(",
"src",
",",
"memory_bank",
"[",
"i",
"]",
",",
"enc_hidden",
"[",
"i",
"]",
")",
"for",
"(",
"i",
",",
"model_decoder",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_decoders",
")",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/ensemble.py#L95-L101 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/ensemble.py | python | EnsembleGenerator.forward | (self, hidden) | return torch.stack(distributions).mean(0) | Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary. | Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary. | [
"Compute",
"a",
"distribution",
"over",
"the",
"target",
"dictionary",
"by",
"averaging",
"distributions",
"from",
"models",
"in",
"the",
"ensemble",
".",
"All",
"models",
"in",
"the",
"ensemble",
"must",
"share",
"a",
"target",
"vocabulary",
"."
] | def forward(self, hidden):
"""
Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary.
"""
distributions = [model_generator.forward(hidden[i])
for (i, model_generator)
in enumerate(self.model_generators)]
return torch.stack(distributions).mean(0) | [
"def",
"forward",
"(",
"self",
",",
"hidden",
")",
":",
"distributions",
"=",
"[",
"model_generator",
".",
"forward",
"(",
"hidden",
"[",
"i",
"]",
")",
"for",
"(",
"i",
",",
"model_generator",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_generators",
")",
"]",
"return",
"torch",
".",
"stack",
"(",
"distributions",
")",
".",
"mean",
"(",
"0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/ensemble.py#L113-L122 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/transformer.py | python | TransformerDecoderLayer.forward | (self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,
previous_input=None, layer_cache=None, step=None) | return output, attn, all_input | Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`
Returns:
(`FloatTensor`, `FloatTensor`, `FloatTensor`):
* output `[batch_size x 1 x model_dim]`
* attn `[batch_size x 1 x src_len]`
* all_input `[batch_size x current_step x model_dim]` | Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]` | [
"Args",
":",
"inputs",
"(",
"FloatTensor",
")",
":",
"[",
"batch_size",
"x",
"1",
"x",
"model_dim",
"]",
"memory_bank",
"(",
"FloatTensor",
")",
":",
"[",
"batch_size",
"x",
"src_len",
"x",
"model_dim",
"]",
"src_pad_mask",
"(",
"LongTensor",
")",
":",
"[",
"batch_size",
"x",
"1",
"x",
"src_len",
"]",
"tgt_pad_mask",
"(",
"LongTensor",
")",
":",
"[",
"batch_size",
"x",
"1",
"x",
"1",
"]"
] | def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,
previous_input=None, layer_cache=None, step=None):
"""
Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`
Returns:
(`FloatTensor`, `FloatTensor`, `FloatTensor`):
* output `[batch_size x 1 x model_dim]`
* attn `[batch_size x 1 x src_len]`
* all_input `[batch_size x current_step x model_dim]`
"""
dec_mask = torch.gt(tgt_pad_mask +
self.mask[:, :tgt_pad_mask.size(1),
:tgt_pad_mask.size(1)], 0)
input_norm = self.layer_norm_1(inputs)
all_input = input_norm
if previous_input is not None:
all_input = torch.cat((previous_input, input_norm), dim=1)
dec_mask = None
if self.self_attn_type == "scaled-dot":
query, attn = self.self_attn(all_input, all_input, input_norm,
mask=dec_mask,
layer_cache=layer_cache,
type="self")
elif self.self_attn_type == "average":
query, attn = self.self_attn(input_norm, mask=dec_mask,
layer_cache=layer_cache, step=step)
query = self.drop(query) + inputs
query_norm = self.layer_norm_2(query)
mid, attn = self.context_attn(memory_bank, memory_bank, query_norm,
mask=src_pad_mask,
layer_cache=layer_cache,
type="context")
output = self.feed_forward(self.drop(mid) + query)
return output, attn, all_input | [
"def",
"forward",
"(",
"self",
",",
"inputs",
",",
"memory_bank",
",",
"src_pad_mask",
",",
"tgt_pad_mask",
",",
"previous_input",
"=",
"None",
",",
"layer_cache",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"dec_mask",
"=",
"torch",
".",
"gt",
"(",
"tgt_pad_mask",
"+",
"self",
".",
"mask",
"[",
":",
",",
":",
"tgt_pad_mask",
".",
"size",
"(",
"1",
")",
",",
":",
"tgt_pad_mask",
".",
"size",
"(",
"1",
")",
"]",
",",
"0",
")",
"input_norm",
"=",
"self",
".",
"layer_norm_1",
"(",
"inputs",
")",
"all_input",
"=",
"input_norm",
"if",
"previous_input",
"is",
"not",
"None",
":",
"all_input",
"=",
"torch",
".",
"cat",
"(",
"(",
"previous_input",
",",
"input_norm",
")",
",",
"dim",
"=",
"1",
")",
"dec_mask",
"=",
"None",
"if",
"self",
".",
"self_attn_type",
"==",
"\"scaled-dot\"",
":",
"query",
",",
"attn",
"=",
"self",
".",
"self_attn",
"(",
"all_input",
",",
"all_input",
",",
"input_norm",
",",
"mask",
"=",
"dec_mask",
",",
"layer_cache",
"=",
"layer_cache",
",",
"type",
"=",
"\"self\"",
")",
"elif",
"self",
".",
"self_attn_type",
"==",
"\"average\"",
":",
"query",
",",
"attn",
"=",
"self",
".",
"self_attn",
"(",
"input_norm",
",",
"mask",
"=",
"dec_mask",
",",
"layer_cache",
"=",
"layer_cache",
",",
"step",
"=",
"step",
")",
"query",
"=",
"self",
".",
"drop",
"(",
"query",
")",
"+",
"inputs",
"query_norm",
"=",
"self",
".",
"layer_norm_2",
"(",
"query",
")",
"mid",
",",
"attn",
"=",
"self",
".",
"context_attn",
"(",
"memory_bank",
",",
"memory_bank",
",",
"query_norm",
",",
"mask",
"=",
"src_pad_mask",
",",
"layer_cache",
"=",
"layer_cache",
",",
"type",
"=",
"\"context\"",
")",
"output",
"=",
"self",
".",
"feed_forward",
"(",
"self",
".",
"drop",
"(",
"mid",
")",
"+",
"query",
")",
"return",
"output",
",",
"attn",
",",
"all_input"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/transformer.py#L53-L97 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/transformer.py | python | TransformerDecoderLayer._get_attn_subsequent_mask | (self, size) | return subsequent_mask | Get an attention mask to avoid using the subsequent info.
Args:
size: int
Returns:
(`LongTensor`):
* subsequent_mask `[1 x size x size]` | Get an attention mask to avoid using the subsequent info. | [
"Get",
"an",
"attention",
"mask",
"to",
"avoid",
"using",
"the",
"subsequent",
"info",
"."
] | def _get_attn_subsequent_mask(self, size):
"""
Get an attention mask to avoid using the subsequent info.
Args:
size: int
Returns:
(`LongTensor`):
* subsequent_mask `[1 x size x size]`
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
subsequent_mask = torch.from_numpy(subsequent_mask)
return subsequent_mask | [
"def",
"_get_attn_subsequent_mask",
"(",
"self",
",",
"size",
")",
":",
"attn_shape",
"=",
"(",
"1",
",",
"size",
",",
"size",
")",
"subsequent_mask",
"=",
"np",
".",
"triu",
"(",
"np",
".",
"ones",
"(",
"attn_shape",
")",
",",
"k",
"=",
"1",
")",
".",
"astype",
"(",
"'uint8'",
")",
"subsequent_mask",
"=",
"torch",
".",
"from_numpy",
"(",
"subsequent_mask",
")",
"return",
"subsequent_mask"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/transformer.py#L99-L114 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/transformer.py | python | TransformerDecoder.forward | (self, tgt, memory_bank, state, memory_lengths=None,
step=None, cache=None) | return outputs, state, attns | See :obj:`onmt.modules.RNNDecoderBase.forward()` | See :obj:`onmt.modules.RNNDecoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"modules",
".",
"RNNDecoderBase",
".",
"forward",
"()"
] | def forward(self, tgt, memory_bank, state, memory_lengths=None,
step=None, cache=None):
"""
See :obj:`onmt.modules.RNNDecoderBase.forward()`
"""
src = state.src
src_words = src[:, :, 0].transpose(0, 1)
tgt_words = tgt[:, :, 0].transpose(0, 1)
src_batch, src_len = src_words.size()
tgt_batch, tgt_len = tgt_words.size()
# Initialize return variables.
outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
# Run the forward pass of the TransformerDecoder.
emb = self.embeddings(tgt, step=step)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_memory_bank = memory_bank.transpose(0, 1).contiguous()
padding_idx = self.embeddings.word_padding_idx
src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1) \
.expand(src_batch, tgt_len, src_len)
tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1) \
.expand(tgt_batch, tgt_len, tgt_len)
if state.cache is None:
saved_inputs = []
for i in range(self.num_layers):
prev_layer_input = None
if state.cache is None:
if state.previous_input is not None:
prev_layer_input = state.previous_layer_inputs[i]
output, attn, all_input \
= self.transformer_layers[i](
output, src_memory_bank,
src_pad_mask, tgt_pad_mask,
previous_input=prev_layer_input,
layer_cache=state.cache["layer_{}".format(i)]
if state.cache is not None else None,
step=step)
if state.cache is None:
saved_inputs.append(all_input)
if state.cache is None:
saved_inputs = torch.stack(saved_inputs)
output = self.layer_norm(output)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
attn = attn.transpose(0, 1).contiguous()
attns["std"] = attn
if self._copy:
attns["copy"] = attn
if state.cache is None:
state = state.update_state(tgt, saved_inputs)
return outputs, state, attns | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
",",
"cache",
"=",
"None",
")",
":",
"src",
"=",
"state",
".",
"src",
"src_words",
"=",
"src",
"[",
":",
",",
":",
",",
"0",
"]",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"tgt_words",
"=",
"tgt",
"[",
":",
",",
":",
",",
"0",
"]",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"src_batch",
",",
"src_len",
"=",
"src_words",
".",
"size",
"(",
")",
"tgt_batch",
",",
"tgt_len",
"=",
"tgt_words",
".",
"size",
"(",
")",
"# Initialize return variables.",
"outputs",
"=",
"[",
"]",
"attns",
"=",
"{",
"\"std\"",
":",
"[",
"]",
"}",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"[",
"]",
"# Run the forward pass of the TransformerDecoder.",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
",",
"step",
"=",
"step",
")",
"assert",
"emb",
".",
"dim",
"(",
")",
"==",
"3",
"# len x batch x embedding_dim",
"output",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"src_memory_bank",
"=",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"padding_idx",
"=",
"self",
".",
"embeddings",
".",
"word_padding_idx",
"src_pad_mask",
"=",
"src_words",
".",
"data",
".",
"eq",
"(",
"padding_idx",
")",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand",
"(",
"src_batch",
",",
"tgt_len",
",",
"src_len",
")",
"tgt_pad_mask",
"=",
"tgt_words",
".",
"data",
".",
"eq",
"(",
"padding_idx",
")",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"tgt_len",
")",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"saved_inputs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_layers",
")",
":",
"prev_layer_input",
"=",
"None",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"if",
"state",
".",
"previous_input",
"is",
"not",
"None",
":",
"prev_layer_input",
"=",
"state",
".",
"previous_layer_inputs",
"[",
"i",
"]",
"output",
",",
"attn",
",",
"all_input",
"=",
"self",
".",
"transformer_layers",
"[",
"i",
"]",
"(",
"output",
",",
"src_memory_bank",
",",
"src_pad_mask",
",",
"tgt_pad_mask",
",",
"previous_input",
"=",
"prev_layer_input",
",",
"layer_cache",
"=",
"state",
".",
"cache",
"[",
"\"layer_{}\"",
".",
"format",
"(",
"i",
")",
"]",
"if",
"state",
".",
"cache",
"is",
"not",
"None",
"else",
"None",
",",
"step",
"=",
"step",
")",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"saved_inputs",
".",
"append",
"(",
"all_input",
")",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"saved_inputs",
"=",
"torch",
".",
"stack",
"(",
"saved_inputs",
")",
"output",
"=",
"self",
".",
"layer_norm",
"(",
"output",
")",
"# Process the result and update the attentions.",
"outputs",
"=",
"output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"attn",
"=",
"attn",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"attns",
"[",
"\"std\"",
"]",
"=",
"attn",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"attn",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"state",
"=",
"state",
".",
"update_state",
"(",
"tgt",
",",
"saved_inputs",
")",
"return",
"outputs",
",",
"state",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/transformer.py#L172-L237 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/transformer.py | python | TransformerDecoder.init_decoder_state | (self, src, memory_bank, enc_hidden,
with_cache=False) | return state | Init decoder state | Init decoder state | [
"Init",
"decoder",
"state"
] | def init_decoder_state(self, src, memory_bank, enc_hidden,
with_cache=False):
""" Init decoder state """
state = TransformerDecoderState(src)
if with_cache:
state._init_cache(memory_bank, self.num_layers,
self.self_attn_type)
return state | [
"def",
"init_decoder_state",
"(",
"self",
",",
"src",
",",
"memory_bank",
",",
"enc_hidden",
",",
"with_cache",
"=",
"False",
")",
":",
"state",
"=",
"TransformerDecoderState",
"(",
"src",
")",
"if",
"with_cache",
":",
"state",
".",
"_init_cache",
"(",
"memory_bank",
",",
"self",
".",
"num_layers",
",",
"self",
".",
"self_attn_type",
")",
"return",
"state"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/transformer.py#L239-L246 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/transformer.py | python | TransformerDecoderState.__init__ | (self, src) | Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch). | Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch). | [
"Args",
":",
"src",
"(",
"FloatTensor",
")",
":",
"a",
"sequence",
"of",
"source",
"words",
"tensors",
"with",
"optional",
"feature",
"tensors",
"of",
"size",
"(",
"len",
"x",
"batch",
")",
"."
] | def __init__(self, src):
"""
Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch).
"""
self.src = src
self.previous_input = None
self.previous_layer_inputs = None
self.cache = None | [
"def",
"__init__",
"(",
"self",
",",
"src",
")",
":",
"self",
".",
"src",
"=",
"src",
"self",
".",
"previous_input",
"=",
"None",
"self",
".",
"previous_layer_inputs",
"=",
"None",
"self",
".",
"cache",
"=",
"None"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/transformer.py#L252-L261 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/transformer.py | python | TransformerDecoderState._all | (self) | Contains attributes that need to be updated in self.beam_update(). | Contains attributes that need to be updated in self.beam_update(). | [
"Contains",
"attributes",
"that",
"need",
"to",
"be",
"updated",
"in",
"self",
".",
"beam_update",
"()",
"."
] | def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
if (self.previous_input is not None
and self.previous_layer_inputs is not None):
return (self.previous_input,
self.previous_layer_inputs,
self.src)
else:
return (self.src,) | [
"def",
"_all",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"previous_input",
"is",
"not",
"None",
"and",
"self",
".",
"previous_layer_inputs",
"is",
"not",
"None",
")",
":",
"return",
"(",
"self",
".",
"previous_input",
",",
"self",
".",
"previous_layer_inputs",
",",
"self",
".",
"src",
")",
"else",
":",
"return",
"(",
"self",
".",
"src",
",",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/transformer.py#L264-L274 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/transformer.py | python | TransformerDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.src = self.src.data.repeat(1, beam_size, 1) | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"self",
".",
"src",
"=",
"self",
".",
"src",
".",
"data",
".",
"repeat",
"(",
"1",
",",
"beam_size",
",",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/transformer.py#L309-L311 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py | python | CNNDecoder.forward | (self, tgt, memory_bank, state, memory_lengths=None, step=None) | return outputs, state, attns | See :obj:`onmt.modules.RNNDecoderBase.forward()` | See :obj:`onmt.modules.RNNDecoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"modules",
".",
"RNNDecoderBase",
".",
"forward",
"()"
] | def forward(self, tgt, memory_bank, state, memory_lengths=None, step=None):
""" See :obj:`onmt.modules.RNNDecoderBase.forward()`"""
# NOTE: memory_lengths is only here for compatibility reasons
# with onmt.modules.RNNDecoderBase.forward()
# CHECKS
assert isinstance(state, CNNDecoderState)
_, tgt_batch, _ = tgt.size()
_, contxt_batch, _ = memory_bank.size()
aeq(tgt_batch, contxt_batch)
# END CHECKS
if state.previous_input is not None:
tgt = torch.cat([state.previous_input, tgt], 0)
# Initialize return variables.
outputs = []
attns = {"std": []}
assert not self._copy, "Copy mechanism not yet tested in conv2conv"
if self._copy:
attns["copy"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_memory_bank_c = state.init_src.transpose(0, 1).contiguous()
# Run the forward pass of the CNNDecoder.
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = torch.zeros(x.size(0), x.size(1),
self.cnn_kernel_width - 1, 1)
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_memory_bank_t, src_memory_bank_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(tgt)
return outputs, state, attns | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"# NOTE: memory_lengths is only here for compatibility reasons",
"# with onmt.modules.RNNDecoderBase.forward()",
"# CHECKS",
"assert",
"isinstance",
"(",
"state",
",",
"CNNDecoderState",
")",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"_",
",",
"contxt_batch",
",",
"_",
"=",
"memory_bank",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"contxt_batch",
")",
"# END CHECKS",
"if",
"state",
".",
"previous_input",
"is",
"not",
"None",
":",
"tgt",
"=",
"torch",
".",
"cat",
"(",
"[",
"state",
".",
"previous_input",
",",
"tgt",
"]",
",",
"0",
")",
"# Initialize return variables.",
"outputs",
"=",
"[",
"]",
"attns",
"=",
"{",
"\"std\"",
":",
"[",
"]",
"}",
"assert",
"not",
"self",
".",
"_copy",
",",
"\"Copy mechanism not yet tested in conv2conv\"",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"[",
"]",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"assert",
"emb",
".",
"dim",
"(",
")",
"==",
"3",
"# len x batch x embedding_dim",
"tgt_emb",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# The output of CNNEncoder.",
"src_memory_bank_t",
"=",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# The combination of output of CNNEncoder and source embeddings.",
"src_memory_bank_c",
"=",
"state",
".",
"init_src",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# Run the forward pass of the CNNDecoder.",
"emb_reshape",
"=",
"tgt_emb",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"tgt_emb",
".",
"size",
"(",
"0",
")",
"*",
"tgt_emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"linear_out",
"=",
"self",
".",
"linear",
"(",
"emb_reshape",
")",
"x",
"=",
"linear_out",
".",
"view",
"(",
"tgt_emb",
".",
"size",
"(",
"0",
")",
",",
"tgt_emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"x",
"=",
"shape_transform",
"(",
"x",
")",
"pad",
"=",
"torch",
".",
"zeros",
"(",
"x",
".",
"size",
"(",
"0",
")",
",",
"x",
".",
"size",
"(",
"1",
")",
",",
"self",
".",
"cnn_kernel_width",
"-",
"1",
",",
"1",
")",
"pad",
"=",
"pad",
".",
"type_as",
"(",
"x",
")",
"base_target_emb",
"=",
"x",
"for",
"conv",
",",
"attention",
"in",
"zip",
"(",
"self",
".",
"conv_layers",
",",
"self",
".",
"attn_layers",
")",
":",
"new_target_input",
"=",
"torch",
".",
"cat",
"(",
"[",
"pad",
",",
"x",
"]",
",",
"2",
")",
"out",
"=",
"conv",
"(",
"new_target_input",
")",
"c",
",",
"attn",
"=",
"attention",
"(",
"base_target_emb",
",",
"out",
",",
"src_memory_bank_t",
",",
"src_memory_bank_c",
")",
"x",
"=",
"(",
"x",
"+",
"(",
"c",
"+",
"out",
")",
"*",
"SCALE_WEIGHT",
")",
"*",
"SCALE_WEIGHT",
"output",
"=",
"x",
".",
"squeeze",
"(",
"3",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"# Process the result and update the attentions.",
"outputs",
"=",
"output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"if",
"state",
".",
"previous_input",
"is",
"not",
"None",
":",
"outputs",
"=",
"outputs",
"[",
"state",
".",
"previous_input",
".",
"size",
"(",
"0",
")",
":",
"]",
"attn",
"=",
"attn",
"[",
":",
",",
"state",
".",
"previous_input",
".",
"size",
"(",
"0",
")",
":",
"]",
".",
"squeeze",
"(",
")",
"attn",
"=",
"torch",
".",
"stack",
"(",
"[",
"attn",
"]",
")",
"attns",
"[",
"\"std\"",
"]",
"=",
"attn",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"attn",
"# Update the state.",
"state",
".",
"update_state",
"(",
"tgt",
")",
"return",
"outputs",
",",
"state",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py#L58-L122 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py | python | CNNDecoder.init_decoder_state | (self, _, memory_bank, enc_hidden, with_cache=False) | return CNNDecoderState(memory_bank, enc_hidden) | Init decoder state. | Init decoder state. | [
"Init",
"decoder",
"state",
"."
] | def init_decoder_state(self, _, memory_bank, enc_hidden, with_cache=False):
"""
Init decoder state.
"""
return CNNDecoderState(memory_bank, enc_hidden) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"_",
",",
"memory_bank",
",",
"enc_hidden",
",",
"with_cache",
"=",
"False",
")",
":",
"return",
"CNNDecoderState",
"(",
"memory_bank",
",",
"enc_hidden",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py#L124-L128 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py | python | CNNDecoderState._all | (self) | return (self.previous_input,) | Contains attributes that need to be updated in self.beam_update(). | Contains attributes that need to be updated in self.beam_update(). | [
"Contains",
"attributes",
"that",
"need",
"to",
"be",
"updated",
"in",
"self",
".",
"beam_update",
"()",
"."
] | def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input,) | [
"def",
"_all",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"previous_input",
",",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py#L141-L145 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py | python | CNNDecoderState.update_state | (self, new_input) | Called for every decoder forward pass. | Called for every decoder forward pass. | [
"Called",
"for",
"every",
"decoder",
"forward",
"pass",
"."
] | def update_state(self, new_input):
""" Called for every decoder forward pass. """
self.previous_input = new_input | [
"def",
"update_state",
"(",
"self",
",",
"new_input",
")",
":",
"self",
".",
"previous_input",
"=",
"new_input"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py#L150-L152 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py | python | CNNDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.init_src = self.init_src.data.repeat(1, beam_size, 1) | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"self",
".",
"init_src",
"=",
"self",
".",
"init_src",
".",
"data",
".",
"repeat",
"(",
"1",
",",
"beam_size",
",",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/decoders/cnn_decoder.py#L154-L156 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/learn_bpe.py | python | get_vocabulary | (fobj, is_dict=False) | return vocab | Read text and return dictionary that encodes vocabulary | Read text and return dictionary that encodes vocabulary | [
"Read",
"text",
"and",
"return",
"dictionary",
"that",
"encodes",
"vocabulary"
] | def get_vocabulary(fobj, is_dict=False):
"""Read text and return dictionary that encodes vocabulary
"""
vocab = Counter()
for line in fobj:
if is_dict:
word, count = line.strip().split()
vocab[word] = int(count)
else:
for word in line.split():
vocab[word] += 1
return vocab | [
"def",
"get_vocabulary",
"(",
"fobj",
",",
"is_dict",
"=",
"False",
")",
":",
"vocab",
"=",
"Counter",
"(",
")",
"for",
"line",
"in",
"fobj",
":",
"if",
"is_dict",
":",
"word",
",",
"count",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"vocab",
"[",
"word",
"]",
"=",
"int",
"(",
"count",
")",
"else",
":",
"for",
"word",
"in",
"line",
".",
"split",
"(",
")",
":",
"vocab",
"[",
"word",
"]",
"+=",
"1",
"return",
"vocab"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/learn_bpe.py#L59-L70 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/learn_bpe.py | python | update_pair_statistics | (pair, changed, stats, indices) | Minimally update the indices and frequency of symbol pairs
if we merge a pair of symbols, only pairs that overlap with occurrences
of this pair are affected, and need to be updated. | Minimally update the indices and frequency of symbol pairs | [
"Minimally",
"update",
"the",
"indices",
"and",
"frequency",
"of",
"symbol",
"pairs"
] | def update_pair_statistics(pair, changed, stats, indices):
"""Minimally update the indices and frequency of symbol pairs
if we merge a pair of symbols, only pairs that overlap with occurrences
of this pair are affected, and need to be updated.
"""
stats[pair] = 0
indices[pair] = defaultdict(int)
first, second = pair
new_pair = first + second
for j, word, old_word, freq in changed:
# find all instances of pair, and update frequency/indices around it
i = 0
while True:
# find first symbol
try:
i = old_word.index(first, i)
except ValueError:
break
# if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2])
if i < len(old_word) - 1 and old_word[i + 1] == second:
# assuming a symbol sequence "A B C", if "B C" is merged, reduce the frequency of "A B"
if i:
prev = old_word[i - 1:i + 1]
stats[prev] -= freq
indices[prev][j] -= 1
if i < len(old_word) - 2:
# assuming a symbol sequence "A B C B", if "B C" is merged, reduce the frequency of "C B".
# however, skip this if the sequence is A B C B C, because the frequency of "C B" will be reduced by the previous code block
if old_word[i + 2] != first or i >= len(old_word) - 3 or old_word[i + 3] != second:
nex = old_word[i + 1:i + 3]
stats[nex] -= freq
indices[nex][j] -= 1
i += 2
else:
i += 1
i = 0
while True:
try:
# find new pair
i = word.index(new_pair, i)
except ValueError:
break
# assuming a symbol sequence "A BC D", if "B C" is merged, increase the frequency of "A BC"
if i:
prev = word[i - 1:i + 1]
stats[prev] += freq
indices[prev][j] += 1
# assuming a symbol sequence "A BC B", if "B C" is merged, increase the frequency of "BC B"
# however, if the sequence is A BC BC, skip this step because the count of "BC BC" will be incremented by the previous code block
if i < len(word) - 1 and word[i + 1] != new_pair:
nex = word[i:i + 2]
stats[nex] += freq
indices[nex][j] += 1
i += 1 | [
"def",
"update_pair_statistics",
"(",
"pair",
",",
"changed",
",",
"stats",
",",
"indices",
")",
":",
"stats",
"[",
"pair",
"]",
"=",
"0",
"indices",
"[",
"pair",
"]",
"=",
"defaultdict",
"(",
"int",
")",
"first",
",",
"second",
"=",
"pair",
"new_pair",
"=",
"first",
"+",
"second",
"for",
"j",
",",
"word",
",",
"old_word",
",",
"freq",
"in",
"changed",
":",
"# find all instances of pair, and update frequency/indices around it",
"i",
"=",
"0",
"while",
"True",
":",
"# find first symbol",
"try",
":",
"i",
"=",
"old_word",
".",
"index",
"(",
"first",
",",
"i",
")",
"except",
"ValueError",
":",
"break",
"# if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2])",
"if",
"i",
"<",
"len",
"(",
"old_word",
")",
"-",
"1",
"and",
"old_word",
"[",
"i",
"+",
"1",
"]",
"==",
"second",
":",
"# assuming a symbol sequence \"A B C\", if \"B C\" is merged, reduce the frequency of \"A B\"",
"if",
"i",
":",
"prev",
"=",
"old_word",
"[",
"i",
"-",
"1",
":",
"i",
"+",
"1",
"]",
"stats",
"[",
"prev",
"]",
"-=",
"freq",
"indices",
"[",
"prev",
"]",
"[",
"j",
"]",
"-=",
"1",
"if",
"i",
"<",
"len",
"(",
"old_word",
")",
"-",
"2",
":",
"# assuming a symbol sequence \"A B C B\", if \"B C\" is merged, reduce the frequency of \"C B\".",
"# however, skip this if the sequence is A B C B C, because the frequency of \"C B\" will be reduced by the previous code block",
"if",
"old_word",
"[",
"i",
"+",
"2",
"]",
"!=",
"first",
"or",
"i",
">=",
"len",
"(",
"old_word",
")",
"-",
"3",
"or",
"old_word",
"[",
"i",
"+",
"3",
"]",
"!=",
"second",
":",
"nex",
"=",
"old_word",
"[",
"i",
"+",
"1",
":",
"i",
"+",
"3",
"]",
"stats",
"[",
"nex",
"]",
"-=",
"freq",
"indices",
"[",
"nex",
"]",
"[",
"j",
"]",
"-=",
"1",
"i",
"+=",
"2",
"else",
":",
"i",
"+=",
"1",
"i",
"=",
"0",
"while",
"True",
":",
"try",
":",
"# find new pair",
"i",
"=",
"word",
".",
"index",
"(",
"new_pair",
",",
"i",
")",
"except",
"ValueError",
":",
"break",
"# assuming a symbol sequence \"A BC D\", if \"B C\" is merged, increase the frequency of \"A BC\"",
"if",
"i",
":",
"prev",
"=",
"word",
"[",
"i",
"-",
"1",
":",
"i",
"+",
"1",
"]",
"stats",
"[",
"prev",
"]",
"+=",
"freq",
"indices",
"[",
"prev",
"]",
"[",
"j",
"]",
"+=",
"1",
"# assuming a symbol sequence \"A BC B\", if \"B C\" is merged, increase the frequency of \"BC B\"",
"# however, if the sequence is A BC BC, skip this step because the count of \"BC BC\" will be incremented by the previous code block",
"if",
"i",
"<",
"len",
"(",
"word",
")",
"-",
"1",
"and",
"word",
"[",
"i",
"+",
"1",
"]",
"!=",
"new_pair",
":",
"nex",
"=",
"word",
"[",
"i",
":",
"i",
"+",
"2",
"]",
"stats",
"[",
"nex",
"]",
"+=",
"freq",
"indices",
"[",
"nex",
"]",
"[",
"j",
"]",
"+=",
"1",
"i",
"+=",
"1"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/learn_bpe.py#L73-L129 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/learn_bpe.py | python | get_pair_statistics | (vocab) | return stats, indices | Count frequency of all symbol pairs, and create index | Count frequency of all symbol pairs, and create index | [
"Count",
"frequency",
"of",
"all",
"symbol",
"pairs",
"and",
"create",
"index"
] | def get_pair_statistics(vocab):
"""Count frequency of all symbol pairs, and create index"""
# data structure of pair frequencies
stats = defaultdict(int)
# index from pairs to words
indices = defaultdict(lambda: defaultdict(int))
for i, (word, freq) in enumerate(vocab):
prev_char = word[0]
for char in word[1:]:
stats[prev_char, char] += freq
indices[prev_char, char][i] += 1
prev_char = char
return stats, indices | [
"def",
"get_pair_statistics",
"(",
"vocab",
")",
":",
"# data structure of pair frequencies",
"stats",
"=",
"defaultdict",
"(",
"int",
")",
"# index from pairs to words",
"indices",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"int",
")",
")",
"for",
"i",
",",
"(",
"word",
",",
"freq",
")",
"in",
"enumerate",
"(",
"vocab",
")",
":",
"prev_char",
"=",
"word",
"[",
"0",
"]",
"for",
"char",
"in",
"word",
"[",
"1",
":",
"]",
":",
"stats",
"[",
"prev_char",
",",
"char",
"]",
"+=",
"freq",
"indices",
"[",
"prev_char",
",",
"char",
"]",
"[",
"i",
"]",
"+=",
"1",
"prev_char",
"=",
"char",
"return",
"stats",
",",
"indices"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/learn_bpe.py#L132-L148 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/learn_bpe.py | python | replace_pair | (pair, vocab, indices) | return changes | Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB | Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB | [
"Replace",
"all",
"occurrences",
"of",
"a",
"symbol",
"pair",
"(",
"A",
"B",
")",
"with",
"a",
"new",
"symbol",
"AB"
] | def replace_pair(pair, vocab, indices):
"""Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'"""
first, second = pair
pair_str = ''.join(pair)
pair_str = pair_str.replace('\\', '\\\\')
changes = []
pattern = re.compile(
r'(?<!\S)' + re.escape(first + ' ' + second) + r'(?!\S)')
if sys.version_info < (3, 0):
iterator = indices[pair].iteritems()
else:
iterator = indices[pair].items()
for j, freq in iterator:
if freq < 1:
continue
word, freq = vocab[j]
new_word = ' '.join(word)
new_word = pattern.sub(pair_str, new_word)
new_word = tuple(new_word.split())
vocab[j] = (new_word, freq)
changes.append((j, new_word, word, freq))
return changes | [
"def",
"replace_pair",
"(",
"pair",
",",
"vocab",
",",
"indices",
")",
":",
"first",
",",
"second",
"=",
"pair",
"pair_str",
"=",
"''",
".",
"join",
"(",
"pair",
")",
"pair_str",
"=",
"pair_str",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
"changes",
"=",
"[",
"]",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'(?<!\\S)'",
"+",
"re",
".",
"escape",
"(",
"first",
"+",
"' '",
"+",
"second",
")",
"+",
"r'(?!\\S)'",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
":",
"iterator",
"=",
"indices",
"[",
"pair",
"]",
".",
"iteritems",
"(",
")",
"else",
":",
"iterator",
"=",
"indices",
"[",
"pair",
"]",
".",
"items",
"(",
")",
"for",
"j",
",",
"freq",
"in",
"iterator",
":",
"if",
"freq",
"<",
"1",
":",
"continue",
"word",
",",
"freq",
"=",
"vocab",
"[",
"j",
"]",
"new_word",
"=",
"' '",
".",
"join",
"(",
"word",
")",
"new_word",
"=",
"pattern",
".",
"sub",
"(",
"pair_str",
",",
"new_word",
")",
"new_word",
"=",
"tuple",
"(",
"new_word",
".",
"split",
"(",
")",
")",
"vocab",
"[",
"j",
"]",
"=",
"(",
"new_word",
",",
"freq",
")",
"changes",
".",
"append",
"(",
"(",
"j",
",",
"new_word",
",",
"word",
",",
"freq",
")",
")",
"return",
"changes"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/learn_bpe.py#L151-L174 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/learn_bpe.py | python | prune_stats | (stats, big_stats, threshold) | Prune statistics dict for efficiency of max()
The frequency of a symbol pair never increases, so pruning is generally safe
(until we the most frequent pair is less frequent than a pair we previously pruned)
big_stats keeps full statistics for when we need to access pruned items | Prune statistics dict for efficiency of max() | [
"Prune",
"statistics",
"dict",
"for",
"efficiency",
"of",
"max",
"()"
] | def prune_stats(stats, big_stats, threshold):
"""Prune statistics dict for efficiency of max()
The frequency of a symbol pair never increases, so pruning is generally safe
(until we the most frequent pair is less frequent than a pair we previously pruned)
big_stats keeps full statistics for when we need to access pruned items
"""
for item, freq in list(stats.items()):
if freq < threshold:
del stats[item]
if freq < 0:
big_stats[item] += freq
else:
big_stats[item] = freq | [
"def",
"prune_stats",
"(",
"stats",
",",
"big_stats",
",",
"threshold",
")",
":",
"for",
"item",
",",
"freq",
"in",
"list",
"(",
"stats",
".",
"items",
"(",
")",
")",
":",
"if",
"freq",
"<",
"threshold",
":",
"del",
"stats",
"[",
"item",
"]",
"if",
"freq",
"<",
"0",
":",
"big_stats",
"[",
"item",
"]",
"+=",
"freq",
"else",
":",
"big_stats",
"[",
"item",
"]",
"=",
"freq"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/learn_bpe.py#L177-L190 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/learn_bpe.py | python | main | (infile, outfile, num_symbols, min_frequency=2, verbose=False, is_dict=False) | Learn num_symbols BPE operations from vocabulary, and write to outfile. | Learn num_symbols BPE operations from vocabulary, and write to outfile. | [
"Learn",
"num_symbols",
"BPE",
"operations",
"from",
"vocabulary",
"and",
"write",
"to",
"outfile",
"."
] | def main(infile, outfile, num_symbols, min_frequency=2, verbose=False, is_dict=False):
"""Learn num_symbols BPE operations from vocabulary, and write to outfile.
"""
# version 0.2 changes the handling of the end-of-word token ('</w>');
# version numbering allows bckward compatibility
outfile.write('#version: 0.2\n')
vocab = get_vocabulary(infile, is_dict)
vocab = dict([(tuple(x[:-1]) + (x[-1] + '</w>',), y)
for (x, y) in vocab.items()])
sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
stats, indices = get_pair_statistics(sorted_vocab)
big_stats = copy.deepcopy(stats)
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = max(stats.values()) / 10
for i in range(num_symbols):
if stats:
most_frequent = max(stats, key=lambda x: (stats[x], x))
# we probably missed the best pair because of pruning; go back to full statistics
if not stats or (i and stats[most_frequent] < threshold):
prune_stats(stats, big_stats, threshold)
stats = copy.deepcopy(big_stats)
most_frequent = max(stats, key=lambda x: (stats[x], x))
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = stats[most_frequent] * i / (i + 10000.0)
prune_stats(stats, big_stats, threshold)
if stats[most_frequent] < min_frequency:
sys.stderr.write(
'no pair has frequency >= {0}. Stopping\n'.format(min_frequency))
break
if verbose:
sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(
i, most_frequent[0], most_frequent[1], stats[most_frequent]))
outfile.write('{0} {1}\n'.format(*most_frequent))
changes = replace_pair(most_frequent, sorted_vocab, indices)
update_pair_statistics(most_frequent, changes, stats, indices)
stats[most_frequent] = 0
if not i % 100:
prune_stats(stats, big_stats, threshold) | [
"def",
"main",
"(",
"infile",
",",
"outfile",
",",
"num_symbols",
",",
"min_frequency",
"=",
"2",
",",
"verbose",
"=",
"False",
",",
"is_dict",
"=",
"False",
")",
":",
"# version 0.2 changes the handling of the end-of-word token ('</w>');",
"# version numbering allows bckward compatibility",
"outfile",
".",
"write",
"(",
"'#version: 0.2\\n'",
")",
"vocab",
"=",
"get_vocabulary",
"(",
"infile",
",",
"is_dict",
")",
"vocab",
"=",
"dict",
"(",
"[",
"(",
"tuple",
"(",
"x",
"[",
":",
"-",
"1",
"]",
")",
"+",
"(",
"x",
"[",
"-",
"1",
"]",
"+",
"'</w>'",
",",
")",
",",
"y",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"vocab",
".",
"items",
"(",
")",
"]",
")",
"sorted_vocab",
"=",
"sorted",
"(",
"vocab",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"stats",
",",
"indices",
"=",
"get_pair_statistics",
"(",
"sorted_vocab",
")",
"big_stats",
"=",
"copy",
".",
"deepcopy",
"(",
"stats",
")",
"# threshold is inspired by Zipfian assumption, but should only affect speed",
"threshold",
"=",
"max",
"(",
"stats",
".",
"values",
"(",
")",
")",
"/",
"10",
"for",
"i",
"in",
"range",
"(",
"num_symbols",
")",
":",
"if",
"stats",
":",
"most_frequent",
"=",
"max",
"(",
"stats",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"stats",
"[",
"x",
"]",
",",
"x",
")",
")",
"# we probably missed the best pair because of pruning; go back to full statistics",
"if",
"not",
"stats",
"or",
"(",
"i",
"and",
"stats",
"[",
"most_frequent",
"]",
"<",
"threshold",
")",
":",
"prune_stats",
"(",
"stats",
",",
"big_stats",
",",
"threshold",
")",
"stats",
"=",
"copy",
".",
"deepcopy",
"(",
"big_stats",
")",
"most_frequent",
"=",
"max",
"(",
"stats",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"stats",
"[",
"x",
"]",
",",
"x",
")",
")",
"# threshold is inspired by Zipfian assumption, but should only affect speed",
"threshold",
"=",
"stats",
"[",
"most_frequent",
"]",
"*",
"i",
"/",
"(",
"i",
"+",
"10000.0",
")",
"prune_stats",
"(",
"stats",
",",
"big_stats",
",",
"threshold",
")",
"if",
"stats",
"[",
"most_frequent",
"]",
"<",
"min_frequency",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'no pair has frequency >= {0}. Stopping\\n'",
".",
"format",
"(",
"min_frequency",
")",
")",
"break",
"if",
"verbose",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'pair {0}: {1} {2} -> {1}{2} (frequency {3})\\n'",
".",
"format",
"(",
"i",
",",
"most_frequent",
"[",
"0",
"]",
",",
"most_frequent",
"[",
"1",
"]",
",",
"stats",
"[",
"most_frequent",
"]",
")",
")",
"outfile",
".",
"write",
"(",
"'{0} {1}\\n'",
".",
"format",
"(",
"*",
"most_frequent",
")",
")",
"changes",
"=",
"replace_pair",
"(",
"most_frequent",
",",
"sorted_vocab",
",",
"indices",
")",
"update_pair_statistics",
"(",
"most_frequent",
",",
"changes",
",",
"stats",
",",
"indices",
")",
"stats",
"[",
"most_frequent",
"]",
"=",
"0",
"if",
"not",
"i",
"%",
"100",
":",
"prune_stats",
"(",
"stats",
",",
"big_stats",
",",
"threshold",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/learn_bpe.py#L193-L236 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/apply_bpe.py | python | get_pairs | (word) | return pairs | Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings) | Return set of symbol pairs in a word. | [
"Return",
"set",
"of",
"symbol",
"pairs",
"in",
"a",
"word",
"."
] | def get_pairs(word):
"""Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | [
"def",
"get_pairs",
"(",
"word",
")",
":",
"pairs",
"=",
"set",
"(",
")",
"prev_char",
"=",
"word",
"[",
"0",
"]",
"for",
"char",
"in",
"word",
"[",
"1",
":",
"]",
":",
"pairs",
".",
"add",
"(",
"(",
"prev_char",
",",
"char",
")",
")",
"prev_char",
"=",
"char",
"return",
"pairs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/apply_bpe.py#L126-L136 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/apply_bpe.py | python | encode | (orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries=None) | return word | Encode word based on list of BPE merge operations, which are applied consecutively | Encode word based on list of BPE merge operations, which are applied consecutively | [
"Encode",
"word",
"based",
"on",
"list",
"of",
"BPE",
"merge",
"operations",
"which",
"are",
"applied",
"consecutively"
] | def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries=None):
"""Encode word based on list of BPE merge operations, which are applied consecutively
"""
if orig in cache:
return cache[orig]
if orig in glossaries:
cache[orig] = (orig,)
return (orig,)
if version == (0, 1):
word = tuple(orig) + ('</w>',)
elif version == (0, 2): # more consistent handling of word-final segments
word = tuple(orig[:-1]) + (orig[-1] + '</w>',)
else:
raise NotImplementedError
pairs = get_pairs(word)
if not pairs:
return orig
while True:
bigram = min(pairs, key=lambda pair: bpe_codes.get(pair, float('inf')))
if bigram not in bpe_codes:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
# don't print end-of-word symbols
if word[-1] == '</w>':
word = word[:-1]
elif word[-1].endswith('</w>'):
word = word[:-1] + (word[-1].replace('</w>', ''),)
if vocab:
word = check_vocab_and_split(word, bpe_codes_reverse, vocab, separator)
cache[orig] = word
return word | [
"def",
"encode",
"(",
"orig",
",",
"bpe_codes",
",",
"bpe_codes_reverse",
",",
"vocab",
",",
"separator",
",",
"version",
",",
"cache",
",",
"glossaries",
"=",
"None",
")",
":",
"if",
"orig",
"in",
"cache",
":",
"return",
"cache",
"[",
"orig",
"]",
"if",
"orig",
"in",
"glossaries",
":",
"cache",
"[",
"orig",
"]",
"=",
"(",
"orig",
",",
")",
"return",
"(",
"orig",
",",
")",
"if",
"version",
"==",
"(",
"0",
",",
"1",
")",
":",
"word",
"=",
"tuple",
"(",
"orig",
")",
"+",
"(",
"'</w>'",
",",
")",
"elif",
"version",
"==",
"(",
"0",
",",
"2",
")",
":",
"# more consistent handling of word-final segments",
"word",
"=",
"tuple",
"(",
"orig",
"[",
":",
"-",
"1",
"]",
")",
"+",
"(",
"orig",
"[",
"-",
"1",
"]",
"+",
"'</w>'",
",",
")",
"else",
":",
"raise",
"NotImplementedError",
"pairs",
"=",
"get_pairs",
"(",
"word",
")",
"if",
"not",
"pairs",
":",
"return",
"orig",
"while",
"True",
":",
"bigram",
"=",
"min",
"(",
"pairs",
",",
"key",
"=",
"lambda",
"pair",
":",
"bpe_codes",
".",
"get",
"(",
"pair",
",",
"float",
"(",
"'inf'",
")",
")",
")",
"if",
"bigram",
"not",
"in",
"bpe_codes",
":",
"break",
"first",
",",
"second",
"=",
"bigram",
"new_word",
"=",
"[",
"]",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"word",
")",
":",
"try",
":",
"j",
"=",
"word",
".",
"index",
"(",
"first",
",",
"i",
")",
"new_word",
".",
"extend",
"(",
"word",
"[",
"i",
":",
"j",
"]",
")",
"i",
"=",
"j",
"except",
":",
"new_word",
".",
"extend",
"(",
"word",
"[",
"i",
":",
"]",
")",
"break",
"if",
"word",
"[",
"i",
"]",
"==",
"first",
"and",
"i",
"<",
"len",
"(",
"word",
")",
"-",
"1",
"and",
"word",
"[",
"i",
"+",
"1",
"]",
"==",
"second",
":",
"new_word",
".",
"append",
"(",
"first",
"+",
"second",
")",
"i",
"+=",
"2",
"else",
":",
"new_word",
".",
"append",
"(",
"word",
"[",
"i",
"]",
")",
"i",
"+=",
"1",
"new_word",
"=",
"tuple",
"(",
"new_word",
")",
"word",
"=",
"new_word",
"if",
"len",
"(",
"word",
")",
"==",
"1",
":",
"break",
"else",
":",
"pairs",
"=",
"get_pairs",
"(",
"word",
")",
"# don't print end-of-word symbols",
"if",
"word",
"[",
"-",
"1",
"]",
"==",
"'</w>'",
":",
"word",
"=",
"word",
"[",
":",
"-",
"1",
"]",
"elif",
"word",
"[",
"-",
"1",
"]",
".",
"endswith",
"(",
"'</w>'",
")",
":",
"word",
"=",
"word",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"word",
"[",
"-",
"1",
"]",
".",
"replace",
"(",
"'</w>'",
",",
"''",
")",
",",
")",
"if",
"vocab",
":",
"word",
"=",
"check_vocab_and_split",
"(",
"word",
",",
"bpe_codes_reverse",
",",
"vocab",
",",
"separator",
")",
"cache",
"[",
"orig",
"]",
"=",
"word",
"return",
"word"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/apply_bpe.py#L139-L201 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/apply_bpe.py | python | recursive_split | (segment, bpe_codes, vocab, separator, final=False) | Recursively split segment into smaller units (by reversing BPE merges)
until all units are either in-vocabulary, or cannot be split futher. | Recursively split segment into smaller units (by reversing BPE merges)
until all units are either in-vocabulary, or cannot be split futher. | [
"Recursively",
"split",
"segment",
"into",
"smaller",
"units",
"(",
"by",
"reversing",
"BPE",
"merges",
")",
"until",
"all",
"units",
"are",
"either",
"in",
"-",
"vocabulary",
"or",
"cannot",
"be",
"split",
"futher",
"."
] | def recursive_split(segment, bpe_codes, vocab, separator, final=False):
"""Recursively split segment into smaller units (by reversing BPE merges)
until all units are either in-vocabulary, or cannot be split futher."""
try:
if final:
left, right = bpe_codes[segment + '</w>']
right = right[:-4]
else:
left, right = bpe_codes[segment]
except:
#sys.stderr.write('cannot split {0} further.\n'.format(segment))
yield segment
return
if left + separator in vocab:
yield left
else:
for item in recursive_split(left, bpe_codes, vocab, separator, False):
yield item
if (final and right in vocab) or (not final and right + separator in vocab):
yield right
else:
for item in recursive_split(right, bpe_codes, vocab, separator, final):
yield item | [
"def",
"recursive_split",
"(",
"segment",
",",
"bpe_codes",
",",
"vocab",
",",
"separator",
",",
"final",
"=",
"False",
")",
":",
"try",
":",
"if",
"final",
":",
"left",
",",
"right",
"=",
"bpe_codes",
"[",
"segment",
"+",
"'</w>'",
"]",
"right",
"=",
"right",
"[",
":",
"-",
"4",
"]",
"else",
":",
"left",
",",
"right",
"=",
"bpe_codes",
"[",
"segment",
"]",
"except",
":",
"#sys.stderr.write('cannot split {0} further.\\n'.format(segment))",
"yield",
"segment",
"return",
"if",
"left",
"+",
"separator",
"in",
"vocab",
":",
"yield",
"left",
"else",
":",
"for",
"item",
"in",
"recursive_split",
"(",
"left",
",",
"bpe_codes",
",",
"vocab",
",",
"separator",
",",
"False",
")",
":",
"yield",
"item",
"if",
"(",
"final",
"and",
"right",
"in",
"vocab",
")",
"or",
"(",
"not",
"final",
"and",
"right",
"+",
"separator",
"in",
"vocab",
")",
":",
"yield",
"right",
"else",
":",
"for",
"item",
"in",
"recursive_split",
"(",
"right",
",",
"bpe_codes",
",",
"vocab",
",",
"separator",
",",
"final",
")",
":",
"yield",
"item"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/apply_bpe.py#L204-L229 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/apply_bpe.py | python | check_vocab_and_split | (orig, bpe_codes, vocab, separator) | return out | Check for each segment in word if it is in-vocabulary,
and segment OOV segments into smaller units by reversing the BPE merge operations | Check for each segment in word if it is in-vocabulary,
and segment OOV segments into smaller units by reversing the BPE merge operations | [
"Check",
"for",
"each",
"segment",
"in",
"word",
"if",
"it",
"is",
"in",
"-",
"vocabulary",
"and",
"segment",
"OOV",
"segments",
"into",
"smaller",
"units",
"by",
"reversing",
"the",
"BPE",
"merge",
"operations"
] | def check_vocab_and_split(orig, bpe_codes, vocab, separator):
"""Check for each segment in word if it is in-vocabulary,
and segment OOV segments into smaller units by reversing the BPE merge operations"""
out = []
for segment in orig[:-1]:
if segment + separator in vocab:
out.append(segment)
else:
#sys.stderr.write('OOV: {0}\n'.format(segment))
for item in recursive_split(segment, bpe_codes, vocab, separator, False):
out.append(item)
segment = orig[-1]
if segment in vocab:
out.append(segment)
else:
#sys.stderr.write('OOV: {0}\n'.format(segment))
for item in recursive_split(segment, bpe_codes, vocab, separator, True):
out.append(item)
return out | [
"def",
"check_vocab_and_split",
"(",
"orig",
",",
"bpe_codes",
",",
"vocab",
",",
"separator",
")",
":",
"out",
"=",
"[",
"]",
"for",
"segment",
"in",
"orig",
"[",
":",
"-",
"1",
"]",
":",
"if",
"segment",
"+",
"separator",
"in",
"vocab",
":",
"out",
".",
"append",
"(",
"segment",
")",
"else",
":",
"#sys.stderr.write('OOV: {0}\\n'.format(segment))",
"for",
"item",
"in",
"recursive_split",
"(",
"segment",
",",
"bpe_codes",
",",
"vocab",
",",
"separator",
",",
"False",
")",
":",
"out",
".",
"append",
"(",
"item",
")",
"segment",
"=",
"orig",
"[",
"-",
"1",
"]",
"if",
"segment",
"in",
"vocab",
":",
"out",
".",
"append",
"(",
"segment",
")",
"else",
":",
"#sys.stderr.write('OOV: {0}\\n'.format(segment))",
"for",
"item",
"in",
"recursive_split",
"(",
"segment",
",",
"bpe_codes",
",",
"vocab",
",",
"separator",
",",
"True",
")",
":",
"out",
".",
"append",
"(",
"item",
")",
"return",
"out"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/apply_bpe.py#L232-L254 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/apply_bpe.py | python | read_vocabulary | (vocab_file, threshold) | return vocabulary | read vocabulary file produced by get_vocab.py, and filter according to frequency threshold. | read vocabulary file produced by get_vocab.py, and filter according to frequency threshold. | [
"read",
"vocabulary",
"file",
"produced",
"by",
"get_vocab",
".",
"py",
"and",
"filter",
"according",
"to",
"frequency",
"threshold",
"."
] | def read_vocabulary(vocab_file, threshold):
"""read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.
"""
vocabulary = set()
for line in vocab_file:
word, freq = line.split()
freq = int(freq)
if threshold == None or freq >= threshold:
vocabulary.add(word)
return vocabulary | [
"def",
"read_vocabulary",
"(",
"vocab_file",
",",
"threshold",
")",
":",
"vocabulary",
"=",
"set",
"(",
")",
"for",
"line",
"in",
"vocab_file",
":",
"word",
",",
"freq",
"=",
"line",
".",
"split",
"(",
")",
"freq",
"=",
"int",
"(",
"freq",
")",
"if",
"threshold",
"==",
"None",
"or",
"freq",
">=",
"threshold",
":",
"vocabulary",
".",
"add",
"(",
"word",
")",
"return",
"vocabulary"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/apply_bpe.py#L257-L269 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/apply_bpe.py | python | isolate_glossary | (word, glossary) | Isolate a glossary present inside a word.
Returns a list of subwords. In which all 'glossary' glossaries are isolated
For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is:
['1934', 'USA', 'B', 'USA'] | Isolate a glossary present inside a word. | [
"Isolate",
"a",
"glossary",
"present",
"inside",
"a",
"word",
"."
] | def isolate_glossary(word, glossary):
"""
Isolate a glossary present inside a word.
Returns a list of subwords. In which all 'glossary' glossaries are isolated
For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is:
['1934', 'USA', 'B', 'USA']
"""
if word == glossary or glossary not in word:
return [word]
else:
splits = word.split(glossary)
segments = [segment.strip() for split in splits[:-1]
for segment in [split, glossary] if segment != '']
return segments + [splits[-1].strip()] if splits[-1] != '' else segments | [
"def",
"isolate_glossary",
"(",
"word",
",",
"glossary",
")",
":",
"if",
"word",
"==",
"glossary",
"or",
"glossary",
"not",
"in",
"word",
":",
"return",
"[",
"word",
"]",
"else",
":",
"splits",
"=",
"word",
".",
"split",
"(",
"glossary",
")",
"segments",
"=",
"[",
"segment",
".",
"strip",
"(",
")",
"for",
"split",
"in",
"splits",
"[",
":",
"-",
"1",
"]",
"for",
"segment",
"in",
"[",
"split",
",",
"glossary",
"]",
"if",
"segment",
"!=",
"''",
"]",
"return",
"segments",
"+",
"[",
"splits",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"]",
"if",
"splits",
"[",
"-",
"1",
"]",
"!=",
"''",
"else",
"segments"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/apply_bpe.py#L272-L287 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/tools/apply_bpe.py | python | BPE.segment | (self, sentence) | return ' '.join(output) | segment single sentence (whitespace-tokenized string) with BPE encoding | segment single sentence (whitespace-tokenized string) with BPE encoding | [
"segment",
"single",
"sentence",
"(",
"whitespace",
"-",
"tokenized",
"string",
")",
"with",
"BPE",
"encoding"
] | def segment(self, sentence):
"""segment single sentence (whitespace-tokenized string) with BPE encoding"""
output = []
for word in sentence.split():
new_word = [out for segment in self._isolate_glossaries(word)
for out in encode(segment,
self.bpe_codes,
self.bpe_codes_reverse,
self.vocab,
self.separator,
self.version,
self.cache,
self.glossaries)]
for item in new_word[:-1]:
output.append(item + self.separator)
output.append(new_word[-1])
return ' '.join(output) | [
"def",
"segment",
"(",
"self",
",",
"sentence",
")",
":",
"output",
"=",
"[",
"]",
"for",
"word",
"in",
"sentence",
".",
"split",
"(",
")",
":",
"new_word",
"=",
"[",
"out",
"for",
"segment",
"in",
"self",
".",
"_isolate_glossaries",
"(",
"word",
")",
"for",
"out",
"in",
"encode",
"(",
"segment",
",",
"self",
".",
"bpe_codes",
",",
"self",
".",
"bpe_codes_reverse",
",",
"self",
".",
"vocab",
",",
"self",
".",
"separator",
",",
"self",
".",
"version",
",",
"self",
".",
"cache",
",",
"self",
".",
"glossaries",
")",
"]",
"for",
"item",
"in",
"new_word",
"[",
":",
"-",
"1",
"]",
":",
"output",
".",
"append",
"(",
"item",
"+",
"self",
".",
"separator",
")",
"output",
".",
"append",
"(",
"new_word",
"[",
"-",
"1",
"]",
")",
"return",
"' '",
".",
"join",
"(",
"output",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/tools/apply_bpe.py#L61-L79 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/preprocess.py | python | check_existing_pt_files | (opt) | Checking if there are existing .pt files to avoid tampering | Checking if there are existing .pt files to avoid tampering | [
"Checking",
"if",
"there",
"are",
"existing",
".",
"pt",
"files",
"to",
"avoid",
"tampering"
] | def check_existing_pt_files(opt):
""" Checking if there are existing .pt files to avoid tampering """
# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt
# when training, so check to avoid tampering with existing pt files
# or mixing them up.
for t in ['train', 'valid', 'vocab']:
pattern = opt.save_data + '.' + t + '*.pt'
if glob.glob(pattern):
sys.stderr.write("Please backup existing pt file: %s, "
"to avoid tampering!\n" % pattern)
sys.exit(1) | [
"def",
"check_existing_pt_files",
"(",
"opt",
")",
":",
"# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt",
"# when training, so check to avoid tampering with existing pt files",
"# or mixing them up.",
"for",
"t",
"in",
"[",
"'train'",
",",
"'valid'",
",",
"'vocab'",
"]",
":",
"pattern",
"=",
"opt",
".",
"save_data",
"+",
"'.'",
"+",
"t",
"+",
"'*.pt'",
"if",
"glob",
".",
"glob",
"(",
"pattern",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Please backup existing pt file: %s, \"",
"\"to avoid tampering!\\n\"",
"%",
"pattern",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/preprocess.py#L20-L30 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/preprocess.py | python | parse_args | () | return opt | Parsing arguments | Parsing arguments | [
"Parsing",
"arguments"
] | def parse_args():
""" Parsing arguments """
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.preprocess_opts(parser)
opt = parser.parse_args()
torch.manual_seed(opt.seed)
check_existing_pt_files(opt)
return opt | [
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'preprocess.py'",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"opts",
".",
"add_md_help_argument",
"(",
"parser",
")",
"opts",
".",
"preprocess_opts",
"(",
"parser",
")",
"opt",
"=",
"parser",
".",
"parse_args",
"(",
")",
"torch",
".",
"manual_seed",
"(",
"opt",
".",
"seed",
")",
"check_existing_pt_files",
"(",
"opt",
")",
"return",
"opt"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/preprocess.py#L33-L47 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/preprocess.py | python | build_save_in_shards | (src_corpus, tgt_corpus, fields,
corpus_type, opt) | return ret_list | Divide the big corpus into shards, and build dataset separately.
This is currently only for data_type=='text'.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
To tackle this, we only read in part of the corpus file of size
`max_shard_size`(actually it is multiples of 64 bytes that equals
or is slightly larger than this size), and process it into dataset,
then write it to disk along the way. By doing this, we only focus on
part of the corpus at any moment, thus effectively reducing memory use.
According to test, this method can reduce memory footprint by ~50%.
Note! As we process along the shards, previous shards might still
stay in memory, but since we are done with them, and no more
reference to them, if there is memory tight situation, the OS could
easily reclaim these memory.
If `max_shard_size` is 0 or is larger than the corpus size, it is
effectively preprocessed into one dataset, i.e. no sharding.
NOTE! `max_shard_size` is measuring the input corpus size, not the
output pt file size. So a shard pt file consists of examples of size
2 * `max_shard_size`(source + target). | Divide the big corpus into shards, and build dataset separately.
This is currently only for data_type=='text'. | [
"Divide",
"the",
"big",
"corpus",
"into",
"shards",
"and",
"build",
"dataset",
"separately",
".",
"This",
"is",
"currently",
"only",
"for",
"data_type",
"==",
"text",
"."
] | def build_save_in_shards(src_corpus, tgt_corpus, fields,
corpus_type, opt):
"""
Divide the big corpus into shards, and build dataset separately.
This is currently only for data_type=='text'.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
To tackle this, we only read in part of the corpus file of size
`max_shard_size`(actually it is multiples of 64 bytes that equals
or is slightly larger than this size), and process it into dataset,
then write it to disk along the way. By doing this, we only focus on
part of the corpus at any moment, thus effectively reducing memory use.
According to test, this method can reduce memory footprint by ~50%.
Note! As we process along the shards, previous shards might still
stay in memory, but since we are done with them, and no more
reference to them, if there is memory tight situation, the OS could
easily reclaim these memory.
If `max_shard_size` is 0 or is larger than the corpus size, it is
effectively preprocessed into one dataset, i.e. no sharding.
NOTE! `max_shard_size` is measuring the input corpus size, not the
output pt file size. So a shard pt file consists of examples of size
2 * `max_shard_size`(source + target).
"""
corpus_size = os.path.getsize(src_corpus)
if corpus_size > 10 * (1024 ** 2) and opt.max_shard_size == 0:
logger.info("Warning. The corpus %s is larger than 10M bytes, "
"you can set '-max_shard_size' to process it by "
"small shards to use less memory." % src_corpus)
if opt.max_shard_size != 0:
logger.info(' * divide corpus into shards and build dataset '
'separately (shard_size = %d bytes).'
% opt.max_shard_size)
ret_list = []
src_iter = inputters.ShardedTextCorpusIterator(
src_corpus, opt.src_seq_length_trunc,
"src", opt.max_shard_size)
tgt_iter = inputters.ShardedTextCorpusIterator(
tgt_corpus, opt.tgt_seq_length_trunc,
"tgt", opt.max_shard_size,
assoc_iter=src_iter)
index = 0
while not src_iter.hit_end():
index += 1
dataset = inputters.TextDataset(
fields, src_iter, tgt_iter,
src_iter.num_feats, tgt_iter.num_feats,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
dynamic_dict=opt.dynamic_dict)
# We save fields in vocab.pt separately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.{:d}.pt".format(
opt.save_data, corpus_type, index)
logger.info(" * saving %s data shard to %s."
% (corpus_type, pt_file))
torch.save(dataset, pt_file)
ret_list.append(pt_file)
return ret_list | [
"def",
"build_save_in_shards",
"(",
"src_corpus",
",",
"tgt_corpus",
",",
"fields",
",",
"corpus_type",
",",
"opt",
")",
":",
"corpus_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"src_corpus",
")",
"if",
"corpus_size",
">",
"10",
"*",
"(",
"1024",
"**",
"2",
")",
"and",
"opt",
".",
"max_shard_size",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"\"Warning. The corpus %s is larger than 10M bytes, \"",
"\"you can set '-max_shard_size' to process it by \"",
"\"small shards to use less memory.\"",
"%",
"src_corpus",
")",
"if",
"opt",
".",
"max_shard_size",
"!=",
"0",
":",
"logger",
".",
"info",
"(",
"' * divide corpus into shards and build dataset '",
"'separately (shard_size = %d bytes).'",
"%",
"opt",
".",
"max_shard_size",
")",
"ret_list",
"=",
"[",
"]",
"src_iter",
"=",
"inputters",
".",
"ShardedTextCorpusIterator",
"(",
"src_corpus",
",",
"opt",
".",
"src_seq_length_trunc",
",",
"\"src\"",
",",
"opt",
".",
"max_shard_size",
")",
"tgt_iter",
"=",
"inputters",
".",
"ShardedTextCorpusIterator",
"(",
"tgt_corpus",
",",
"opt",
".",
"tgt_seq_length_trunc",
",",
"\"tgt\"",
",",
"opt",
".",
"max_shard_size",
",",
"assoc_iter",
"=",
"src_iter",
")",
"index",
"=",
"0",
"while",
"not",
"src_iter",
".",
"hit_end",
"(",
")",
":",
"index",
"+=",
"1",
"dataset",
"=",
"inputters",
".",
"TextDataset",
"(",
"fields",
",",
"src_iter",
",",
"tgt_iter",
",",
"src_iter",
".",
"num_feats",
",",
"tgt_iter",
".",
"num_feats",
",",
"src_seq_length",
"=",
"opt",
".",
"src_seq_length",
",",
"tgt_seq_length",
"=",
"opt",
".",
"tgt_seq_length",
",",
"dynamic_dict",
"=",
"opt",
".",
"dynamic_dict",
")",
"# We save fields in vocab.pt separately, so make it empty.",
"dataset",
".",
"fields",
"=",
"[",
"]",
"pt_file",
"=",
"\"{:s}.{:s}.{:d}.pt\"",
".",
"format",
"(",
"opt",
".",
"save_data",
",",
"corpus_type",
",",
"index",
")",
"logger",
".",
"info",
"(",
"\" * saving %s data shard to %s.\"",
"%",
"(",
"corpus_type",
",",
"pt_file",
")",
")",
"torch",
".",
"save",
"(",
"dataset",
",",
"pt_file",
")",
"ret_list",
".",
"append",
"(",
"pt_file",
")",
"return",
"ret_list"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/preprocess.py#L50-L121 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/preprocess.py | python | build_save_in_shards_using_shards_size | (src_corpus, tgt_corpus, fields,
corpus_type, opt) | return ret_list | Divide src_corpus and tgt_corpus into smaller multiples
src_copus and tgt corpus files, then build shards, each
shard will have opt.shard_size samples except last shard.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file. | Divide src_corpus and tgt_corpus into smaller multiples
src_copus and tgt corpus files, then build shards, each
shard will have opt.shard_size samples except last shard. | [
"Divide",
"src_corpus",
"and",
"tgt_corpus",
"into",
"smaller",
"multiples",
"src_copus",
"and",
"tgt",
"corpus",
"files",
"then",
"build",
"shards",
"each",
"shard",
"will",
"have",
"opt",
".",
"shard_size",
"samples",
"except",
"last",
"shard",
"."
] | def build_save_in_shards_using_shards_size(src_corpus, tgt_corpus, fields,
corpus_type, opt):
"""
Divide src_corpus and tgt_corpus into smaller multiples
src_copus and tgt corpus files, then build shards, each
shard will have opt.shard_size samples except last shard.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
"""
src_data = open(src_corpus, "r", encoding="utf-8").readlines()
tgt_data = open(tgt_corpus, "r", encoding="utf-8").readlines()
src_corpus = "".join(src_corpus.split(".")[:-1])
tgt_corpus = "".join(tgt_corpus.split(".")[:-1])
for x in range(int(len(src_data) / opt.shard_size)):
open(src_corpus + ".{0}.txt".format(x), "w",
encoding="utf-8").writelines(
src_data[x * opt.shard_size: (x + 1) * opt.shard_size])
open(tgt_corpus + ".{0}.txt".format(x), "w",
encoding="utf-8").writelines(
tgt_data[x * opt.shard_size: (x + 1) * opt.shard_size])
src_list = sorted(glob.glob(src_corpus + '.*.txt'))
tgt_list = sorted(glob.glob(tgt_corpus + '.*.txt'))
ret_list = []
for index, src in enumerate(src_list):
dataset = inputters.build_dataset(
fields, opt.data_type,
src_path=src,
tgt_path=tgt_list[index],
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
image_channel_size=opt.image_channel_size
)
pt_file = "{:s}.{:s}.{:d}.pt".format(
opt.save_data, corpus_type, index)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
logger.info(" * saving %sth %s data image shard to %s."
% (index, corpus_type, pt_file))
torch.save(dataset, pt_file)
ret_list.append(pt_file)
del dataset.examples
gc.collect()
del dataset
gc.collect()
return ret_list | [
"def",
"build_save_in_shards_using_shards_size",
"(",
"src_corpus",
",",
"tgt_corpus",
",",
"fields",
",",
"corpus_type",
",",
"opt",
")",
":",
"src_data",
"=",
"open",
"(",
"src_corpus",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"readlines",
"(",
")",
"tgt_data",
"=",
"open",
"(",
"tgt_corpus",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"readlines",
"(",
")",
"src_corpus",
"=",
"\"\"",
".",
"join",
"(",
"src_corpus",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"tgt_corpus",
"=",
"\"\"",
".",
"join",
"(",
"tgt_corpus",
".",
"split",
"(",
"\".\"",
")",
"[",
":",
"-",
"1",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"len",
"(",
"src_data",
")",
"/",
"opt",
".",
"shard_size",
")",
")",
":",
"open",
"(",
"src_corpus",
"+",
"\".{0}.txt\"",
".",
"format",
"(",
"x",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"writelines",
"(",
"src_data",
"[",
"x",
"*",
"opt",
".",
"shard_size",
":",
"(",
"x",
"+",
"1",
")",
"*",
"opt",
".",
"shard_size",
"]",
")",
"open",
"(",
"tgt_corpus",
"+",
"\".{0}.txt\"",
".",
"format",
"(",
"x",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
".",
"writelines",
"(",
"tgt_data",
"[",
"x",
"*",
"opt",
".",
"shard_size",
":",
"(",
"x",
"+",
"1",
")",
"*",
"opt",
".",
"shard_size",
"]",
")",
"src_list",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"src_corpus",
"+",
"'.*.txt'",
")",
")",
"tgt_list",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"tgt_corpus",
"+",
"'.*.txt'",
")",
")",
"ret_list",
"=",
"[",
"]",
"for",
"index",
",",
"src",
"in",
"enumerate",
"(",
"src_list",
")",
":",
"dataset",
"=",
"inputters",
".",
"build_dataset",
"(",
"fields",
",",
"opt",
".",
"data_type",
",",
"src_path",
"=",
"src",
",",
"tgt_path",
"=",
"tgt_list",
"[",
"index",
"]",
",",
"src_dir",
"=",
"opt",
".",
"src_dir",
",",
"src_seq_length",
"=",
"opt",
".",
"src_seq_length",
",",
"tgt_seq_length",
"=",
"opt",
".",
"tgt_seq_length",
",",
"src_seq_length_trunc",
"=",
"opt",
".",
"src_seq_length_trunc",
",",
"tgt_seq_length_trunc",
"=",
"opt",
".",
"tgt_seq_length_trunc",
",",
"dynamic_dict",
"=",
"opt",
".",
"dynamic_dict",
",",
"sample_rate",
"=",
"opt",
".",
"sample_rate",
",",
"window_size",
"=",
"opt",
".",
"window_size",
",",
"window_stride",
"=",
"opt",
".",
"window_stride",
",",
"window",
"=",
"opt",
".",
"window",
",",
"image_channel_size",
"=",
"opt",
".",
"image_channel_size",
")",
"pt_file",
"=",
"\"{:s}.{:s}.{:d}.pt\"",
".",
"format",
"(",
"opt",
".",
"save_data",
",",
"corpus_type",
",",
"index",
")",
"# We save fields in vocab.pt seperately, so make it empty.",
"dataset",
".",
"fields",
"=",
"[",
"]",
"logger",
".",
"info",
"(",
"\" * saving %sth %s data image shard to %s.\"",
"%",
"(",
"index",
",",
"corpus_type",
",",
"pt_file",
")",
")",
"torch",
".",
"save",
"(",
"dataset",
",",
"pt_file",
")",
"ret_list",
".",
"append",
"(",
"pt_file",
")",
"del",
"dataset",
".",
"examples",
"gc",
".",
"collect",
"(",
")",
"del",
"dataset",
"gc",
".",
"collect",
"(",
")",
"return",
"ret_list"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/preprocess.py#L124-L189 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/preprocess.py | python | build_save_dataset | (corpus_type, fields, opt) | return [pt_file] | Building and saving the dataset | Building and saving the dataset | [
"Building",
"and",
"saving",
"the",
"dataset"
] | def build_save_dataset(corpus_type, fields, opt):
""" Building and saving the dataset """
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
src_corpus = opt.train_src
tgt_corpus = opt.train_tgt
else:
src_corpus = opt.valid_src
tgt_corpus = opt.valid_tgt
# Currently we only do preprocess sharding for corpus: data_type=='text'.
if opt.data_type == 'text':
return build_save_in_shards(
src_corpus, tgt_corpus, fields,
corpus_type, opt)
if (opt.shard_size > 0):
return build_save_in_shards_using_shards_size(src_corpus,
tgt_corpus,
fields,
corpus_type,
opt)
# For data_type == 'img' or 'audio', currently we don't do
# preprocess sharding. We only build a monolithic dataset.
# But since the interfaces are uniform, it would be not hard
# to do this should users need this feature.
dataset = inputters.build_dataset(
fields, opt.data_type,
src_path=src_corpus,
tgt_path=tgt_corpus,
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
image_channel_size=opt.image_channel_size)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.pt".format(opt.save_data, corpus_type)
logger.info(" * saving %s dataset to %s." % (corpus_type, pt_file))
torch.save(dataset, pt_file)
return [pt_file] | [
"def",
"build_save_dataset",
"(",
"corpus_type",
",",
"fields",
",",
"opt",
")",
":",
"assert",
"corpus_type",
"in",
"[",
"'train'",
",",
"'valid'",
"]",
"if",
"corpus_type",
"==",
"'train'",
":",
"src_corpus",
"=",
"opt",
".",
"train_src",
"tgt_corpus",
"=",
"opt",
".",
"train_tgt",
"else",
":",
"src_corpus",
"=",
"opt",
".",
"valid_src",
"tgt_corpus",
"=",
"opt",
".",
"valid_tgt",
"# Currently we only do preprocess sharding for corpus: data_type=='text'.",
"if",
"opt",
".",
"data_type",
"==",
"'text'",
":",
"return",
"build_save_in_shards",
"(",
"src_corpus",
",",
"tgt_corpus",
",",
"fields",
",",
"corpus_type",
",",
"opt",
")",
"if",
"(",
"opt",
".",
"shard_size",
">",
"0",
")",
":",
"return",
"build_save_in_shards_using_shards_size",
"(",
"src_corpus",
",",
"tgt_corpus",
",",
"fields",
",",
"corpus_type",
",",
"opt",
")",
"# For data_type == 'img' or 'audio', currently we don't do",
"# preprocess sharding. We only build a monolithic dataset.",
"# But since the interfaces are uniform, it would be not hard",
"# to do this should users need this feature.",
"dataset",
"=",
"inputters",
".",
"build_dataset",
"(",
"fields",
",",
"opt",
".",
"data_type",
",",
"src_path",
"=",
"src_corpus",
",",
"tgt_path",
"=",
"tgt_corpus",
",",
"src_dir",
"=",
"opt",
".",
"src_dir",
",",
"src_seq_length",
"=",
"opt",
".",
"src_seq_length",
",",
"tgt_seq_length",
"=",
"opt",
".",
"tgt_seq_length",
",",
"src_seq_length_trunc",
"=",
"opt",
".",
"src_seq_length_trunc",
",",
"tgt_seq_length_trunc",
"=",
"opt",
".",
"tgt_seq_length_trunc",
",",
"dynamic_dict",
"=",
"opt",
".",
"dynamic_dict",
",",
"sample_rate",
"=",
"opt",
".",
"sample_rate",
",",
"window_size",
"=",
"opt",
".",
"window_size",
",",
"window_stride",
"=",
"opt",
".",
"window_stride",
",",
"window",
"=",
"opt",
".",
"window",
",",
"image_channel_size",
"=",
"opt",
".",
"image_channel_size",
")",
"# We save fields in vocab.pt seperately, so make it empty.",
"dataset",
".",
"fields",
"=",
"[",
"]",
"pt_file",
"=",
"\"{:s}.{:s}.pt\"",
".",
"format",
"(",
"opt",
".",
"save_data",
",",
"corpus_type",
")",
"logger",
".",
"info",
"(",
"\" * saving %s dataset to %s.\"",
"%",
"(",
"corpus_type",
",",
"pt_file",
")",
")",
"torch",
".",
"save",
"(",
"dataset",
",",
"pt_file",
")",
"return",
"[",
"pt_file",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/preprocess.py#L192-L243 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/preprocess.py | python | build_save_vocab | (train_dataset, fields, opt) | Building and saving the vocab | Building and saving the vocab | [
"Building",
"and",
"saving",
"the",
"vocab"
] | def build_save_vocab(train_dataset, fields, opt):
""" Building and saving the vocab """
fields = inputters.build_vocab(train_dataset, fields, opt.data_type,
opt.share_vocab,
opt.src_vocab,
opt.src_vocab_size,
opt.src_words_min_frequency,
opt.tgt_vocab,
opt.tgt_vocab_size,
opt.tgt_words_min_frequency)
# Can't save fields, so remove/reconstruct at training time.
vocab_file = opt.save_data + '.vocab.pt'
torch.save(inputters.save_fields_to_vocab(fields), vocab_file) | [
"def",
"build_save_vocab",
"(",
"train_dataset",
",",
"fields",
",",
"opt",
")",
":",
"fields",
"=",
"inputters",
".",
"build_vocab",
"(",
"train_dataset",
",",
"fields",
",",
"opt",
".",
"data_type",
",",
"opt",
".",
"share_vocab",
",",
"opt",
".",
"src_vocab",
",",
"opt",
".",
"src_vocab_size",
",",
"opt",
".",
"src_words_min_frequency",
",",
"opt",
".",
"tgt_vocab",
",",
"opt",
".",
"tgt_vocab_size",
",",
"opt",
".",
"tgt_words_min_frequency",
")",
"# Can't save fields, so remove/reconstruct at training time.",
"vocab_file",
"=",
"opt",
".",
"save_data",
"+",
"'.vocab.pt'",
"torch",
".",
"save",
"(",
"inputters",
".",
"save_fields_to_vocab",
"(",
"fields",
")",
",",
"vocab_file",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/preprocess.py#L246-L259 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/train.py | python | run | (opt, device_id, error_queue) | run process | run process | [
"run",
"process"
] | def run(opt, device_id, error_queue):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc())) | [
"def",
"run",
"(",
"opt",
",",
"device_id",
",",
"error_queue",
")",
":",
"try",
":",
"gpu_rank",
"=",
"onmt",
".",
"utils",
".",
"distributed",
".",
"multi_init",
"(",
"opt",
",",
"device_id",
")",
"if",
"gpu_rank",
"!=",
"opt",
".",
"gpu_ranks",
"[",
"device_id",
"]",
":",
"raise",
"AssertionError",
"(",
"\"An error occurred in \\\n Distributed initialization\"",
")",
"single_main",
"(",
"opt",
",",
"device_id",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"# killed by parent, do nothing",
"except",
"Exception",
":",
"# propagate exception to parent process, keeping original traceback",
"import",
"traceback",
"error_queue",
".",
"put",
"(",
"(",
"opt",
".",
"gpu_ranks",
"[",
"device_id",
"]",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/train.py#L62-L75 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/train.py | python | ErrorHandler.__init__ | (self, error_queue) | init error handler | init error handler | [
"init",
"error",
"handler"
] | def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler) | [
"def",
"__init__",
"(",
"self",
",",
"error_queue",
")",
":",
"import",
"signal",
"import",
"threading",
"self",
".",
"error_queue",
"=",
"error_queue",
"self",
".",
"children_pids",
"=",
"[",
"]",
"self",
".",
"error_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"error_listener",
",",
"daemon",
"=",
"True",
")",
"self",
".",
"error_thread",
".",
"start",
"(",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGUSR1",
",",
"self",
".",
"signal_handler",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/train.py#L82-L91 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/train.py | python | ErrorHandler.add_child | (self, pid) | error handler | error handler | [
"error",
"handler"
] | def add_child(self, pid):
""" error handler """
self.children_pids.append(pid) | [
"def",
"add_child",
"(",
"self",
",",
"pid",
")",
":",
"self",
".",
"children_pids",
".",
"append",
"(",
"pid",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/train.py#L93-L95 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/train.py | python | ErrorHandler.error_listener | (self) | error listener | error listener | [
"error",
"listener"
] | def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1) | [
"def",
"error_listener",
"(",
"self",
")",
":",
"(",
"rank",
",",
"original_trace",
")",
"=",
"self",
".",
"error_queue",
".",
"get",
"(",
")",
"self",
".",
"error_queue",
".",
"put",
"(",
"(",
"rank",
",",
"original_trace",
")",
")",
"os",
".",
"kill",
"(",
"os",
".",
"getpid",
"(",
")",
",",
"signal",
".",
"SIGUSR1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/train.py#L97-L101 |