nwo
stringlengths 6
76
| sha
stringlengths 40
40
| path
stringlengths 5
118
| language
stringclasses 1
value | identifier
stringlengths 1
89
| parameters
stringlengths 2
5.4k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
51.1k
| docstring
stringlengths 1
17.6k
| docstring_summary
stringlengths 0
7.02k
| docstring_tokens
sequence | function
stringlengths 30
51.1k
| function_tokens
sequence | url
stringlengths 85
218
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/train.py | python | ErrorHandler.signal_handler | (self, signalnum, stackframe) | signal handler | signal handler | [
"signal",
"handler"
] | def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg) | [
"def",
"signal_handler",
"(",
"self",
",",
"signalnum",
",",
"stackframe",
")",
":",
"for",
"pid",
"in",
"self",
".",
"children_pids",
":",
"os",
".",
"kill",
"(",
"pid",
",",
"signal",
".",
"SIGINT",
")",
"# kill children processes",
"(",
"rank",
",",
"original_trace",
")",
"=",
"self",
".",
"error_queue",
".",
"get",
"(",
")",
"msg",
"=",
"\"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"",
"msg",
"+=",
"original_trace",
"raise",
"Exception",
"(",
"msg",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/train.py#L103-L111 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/model_builder.py | python | build_embeddings | (opt, word_dict, feature_dicts, for_encoder=True) | return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam") | Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder? | Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder? | [
"Build",
"an",
"Embeddings",
"instance",
".",
"Args",
":",
"opt",
":",
"the",
"option",
"in",
"current",
"environment",
".",
"word_dict",
"(",
"Vocab",
")",
":",
"words",
"dictionary",
".",
"feature_dicts",
"(",
"[",
"Vocab",
"]",
"optional",
")",
":",
"a",
"list",
"of",
"feature",
"dictionary",
".",
"for_encoder",
"(",
"bool",
")",
":",
"build",
"Embeddings",
"for",
"encoder",
"or",
"decoder?"
] | def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[inputters.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam") | [
"def",
"build_embeddings",
"(",
"opt",
",",
"word_dict",
",",
"feature_dicts",
",",
"for_encoder",
"=",
"True",
")",
":",
"if",
"for_encoder",
":",
"embedding_dim",
"=",
"opt",
".",
"src_word_vec_size",
"else",
":",
"embedding_dim",
"=",
"opt",
".",
"tgt_word_vec_size",
"word_padding_idx",
"=",
"word_dict",
".",
"stoi",
"[",
"inputters",
".",
"PAD_WORD",
"]",
"num_word_embeddings",
"=",
"len",
"(",
"word_dict",
")",
"feats_padding_idx",
"=",
"[",
"feat_dict",
".",
"stoi",
"[",
"inputters",
".",
"PAD_WORD",
"]",
"for",
"feat_dict",
"in",
"feature_dicts",
"]",
"num_feat_embeddings",
"=",
"[",
"len",
"(",
"feat_dict",
")",
"for",
"feat_dict",
"in",
"feature_dicts",
"]",
"return",
"Embeddings",
"(",
"word_vec_size",
"=",
"embedding_dim",
",",
"position_encoding",
"=",
"opt",
".",
"position_encoding",
",",
"feat_merge",
"=",
"opt",
".",
"feat_merge",
",",
"feat_vec_exponent",
"=",
"opt",
".",
"feat_vec_exponent",
",",
"feat_vec_size",
"=",
"opt",
".",
"feat_vec_size",
",",
"dropout",
"=",
"opt",
".",
"dropout",
",",
"word_padding_idx",
"=",
"word_padding_idx",
",",
"feat_padding_idx",
"=",
"feats_padding_idx",
",",
"word_vocab_size",
"=",
"num_word_embeddings",
",",
"feat_vocab_sizes",
"=",
"num_feat_embeddings",
",",
"sparse",
"=",
"opt",
".",
"optim",
"==",
"\"sparseadam\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/model_builder.py#L29-L62 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/model_builder.py | python | build_encoder | (opt, embeddings) | Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder. | Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder. | [
"Various",
"encoder",
"dispatcher",
"function",
".",
"Args",
":",
"opt",
":",
"the",
"option",
"in",
"current",
"environment",
".",
"embeddings",
"(",
"Embeddings",
")",
":",
"vocab",
"embeddings",
"for",
"this",
"encoder",
"."
] | def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
# return TransformerEncoder(opt.enc_layers, opt.rnn_size,
# opt.heads, opt.transformer_ff,
# opt.dropout, embeddings)
return None
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.rnn_size, opt.dropout, embeddings,
opt.bridge) | [
"def",
"build_encoder",
"(",
"opt",
",",
"embeddings",
")",
":",
"if",
"opt",
".",
"encoder_type",
"==",
"\"transformer\"",
":",
"# return TransformerEncoder(opt.enc_layers, opt.rnn_size,",
"# opt.heads, opt.transformer_ff,",
"# opt.dropout, embeddings)",
"return",
"None",
"elif",
"opt",
".",
"encoder_type",
"==",
"\"cnn\"",
":",
"return",
"CNNEncoder",
"(",
"opt",
".",
"enc_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"cnn_kernel_width",
",",
"opt",
".",
"dropout",
",",
"embeddings",
")",
"elif",
"opt",
".",
"encoder_type",
"==",
"\"mean\"",
":",
"return",
"MeanEncoder",
"(",
"opt",
".",
"enc_layers",
",",
"embeddings",
")",
"else",
":",
"# \"rnn\" or \"brnn\"",
"return",
"RNNEncoder",
"(",
"opt",
".",
"rnn_type",
",",
"opt",
".",
"brnn",
",",
"opt",
".",
"enc_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"dropout",
",",
"embeddings",
",",
"opt",
".",
"bridge",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/model_builder.py#L65-L87 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/model_builder.py | python | build_decoder | (opt, embeddings) | Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder. | Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder. | [
"Various",
"decoder",
"dispatcher",
"function",
".",
"Args",
":",
"opt",
":",
"the",
"option",
"in",
"current",
"environment",
".",
"embeddings",
"(",
"Embeddings",
")",
":",
"vocab",
"embeddings",
"for",
"this",
"decoder",
"."
] | def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.rnn_size,
opt.heads, opt.transformer_ff,
opt.global_attention, opt.copy_attn,
opt.self_attn_type,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
# others
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn) | [
"def",
"build_decoder",
"(",
"opt",
",",
"embeddings",
")",
":",
"if",
"opt",
".",
"decoder_type",
"==",
"\"transformer\"",
":",
"return",
"TransformerDecoder",
"(",
"opt",
".",
"dec_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"heads",
",",
"opt",
".",
"transformer_ff",
",",
"opt",
".",
"global_attention",
",",
"opt",
".",
"copy_attn",
",",
"opt",
".",
"self_attn_type",
",",
"opt",
".",
"dropout",
",",
"embeddings",
")",
"elif",
"opt",
".",
"decoder_type",
"==",
"\"cnn\"",
":",
"return",
"CNNDecoder",
"(",
"opt",
".",
"dec_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"global_attention",
",",
"opt",
".",
"copy_attn",
",",
"opt",
".",
"cnn_kernel_width",
",",
"opt",
".",
"dropout",
",",
"embeddings",
")",
"elif",
"opt",
".",
"input_feed",
":",
"# others",
"return",
"InputFeedRNNDecoder",
"(",
"opt",
".",
"rnn_type",
",",
"opt",
".",
"brnn",
",",
"opt",
".",
"dec_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"global_attention",
",",
"opt",
".",
"global_attention_function",
",",
"opt",
".",
"coverage_attn",
",",
"opt",
".",
"context_gate",
",",
"opt",
".",
"copy_attn",
",",
"opt",
".",
"dropout",
",",
"embeddings",
",",
"opt",
".",
"reuse_copy_attn",
")",
"else",
":",
"return",
"StdRNNDecoder",
"(",
"opt",
".",
"rnn_type",
",",
"opt",
".",
"brnn",
",",
"opt",
".",
"dec_layers",
",",
"opt",
".",
"rnn_size",
",",
"opt",
".",
"global_attention",
",",
"opt",
".",
"global_attention_function",
",",
"opt",
".",
"coverage_attn",
",",
"opt",
".",
"context_gate",
",",
"opt",
".",
"copy_attn",
",",
"opt",
".",
"dropout",
",",
"embeddings",
",",
"opt",
".",
"reuse_copy_attn",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/model_builder.py#L90-L130 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/model_builder.py | python | build_base_model | (model_opt, fields, gpu, checkpoint=None) | return model | Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel. | Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel. | [
"Args",
":",
"model_opt",
":",
"the",
"option",
"loaded",
"from",
"checkpoint",
".",
"fields",
":",
"Field",
"objects",
"for",
"the",
"model",
".",
"gpu",
"(",
"bool",
")",
":",
"whether",
"to",
"use",
"gpu",
".",
"checkpoint",
":",
"the",
"model",
"gnerated",
"by",
"train",
"phase",
"or",
"a",
"resumed",
"snapshot",
"model",
"from",
"a",
"stopped",
"training",
".",
"Returns",
":",
"the",
"NMTModel",
"."
] | def build_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], \
("Unsupported model type %s" % (model_opt.model_type))
# Build encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'src')
src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)
encoder = build_encoder(model_opt, src_embeddings)
elif model_opt.model_type == "img":
if ("image_channel_size" not in model_opt.__dict__):
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.rnn_size,
model_opt.dropout,
image_channel_size)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.rnn_size,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size)
model_parameters = filter(lambda p: p.requires_grad, encoder.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
# Build decoder.
tgt_dict = fields["tgt"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = build_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
if src_dict != tgt_dict:
raise AssertionError('The `-share_vocab` should be set during '
'preprocess if you use share_embeddings!')
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = build_decoder(model_opt, tgt_embeddings)
# Build NMTModel(= encoder + decoder).
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
model.model_type = model_opt.model_type
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)), gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt.rnn_size,
fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'])
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
model.to(device)
return model | [
"def",
"build_base_model",
"(",
"model_opt",
",",
"fields",
",",
"gpu",
",",
"checkpoint",
"=",
"None",
")",
":",
"assert",
"model_opt",
".",
"model_type",
"in",
"[",
"\"text\"",
",",
"\"img\"",
",",
"\"audio\"",
"]",
",",
"(",
"\"Unsupported model type %s\"",
"%",
"(",
"model_opt",
".",
"model_type",
")",
")",
"# Build encoder.",
"if",
"model_opt",
".",
"model_type",
"==",
"\"text\"",
":",
"src_dict",
"=",
"fields",
"[",
"\"src\"",
"]",
".",
"vocab",
"feature_dicts",
"=",
"inputters",
".",
"collect_feature_vocabs",
"(",
"fields",
",",
"'src'",
")",
"src_embeddings",
"=",
"build_embeddings",
"(",
"model_opt",
",",
"src_dict",
",",
"feature_dicts",
")",
"encoder",
"=",
"build_encoder",
"(",
"model_opt",
",",
"src_embeddings",
")",
"elif",
"model_opt",
".",
"model_type",
"==",
"\"img\"",
":",
"if",
"(",
"\"image_channel_size\"",
"not",
"in",
"model_opt",
".",
"__dict__",
")",
":",
"image_channel_size",
"=",
"3",
"else",
":",
"image_channel_size",
"=",
"model_opt",
".",
"image_channel_size",
"encoder",
"=",
"ImageEncoder",
"(",
"model_opt",
".",
"enc_layers",
",",
"model_opt",
".",
"brnn",
",",
"model_opt",
".",
"rnn_size",
",",
"model_opt",
".",
"dropout",
",",
"image_channel_size",
")",
"elif",
"model_opt",
".",
"model_type",
"==",
"\"audio\"",
":",
"encoder",
"=",
"AudioEncoder",
"(",
"model_opt",
".",
"enc_layers",
",",
"model_opt",
".",
"brnn",
",",
"model_opt",
".",
"rnn_size",
",",
"model_opt",
".",
"dropout",
",",
"model_opt",
".",
"sample_rate",
",",
"model_opt",
".",
"window_size",
")",
"model_parameters",
"=",
"filter",
"(",
"lambda",
"p",
":",
"p",
".",
"requires_grad",
",",
"encoder",
".",
"parameters",
"(",
")",
")",
"params",
"=",
"sum",
"(",
"[",
"np",
".",
"prod",
"(",
"p",
".",
"size",
"(",
")",
")",
"for",
"p",
"in",
"model_parameters",
"]",
")",
"# Build decoder.",
"tgt_dict",
"=",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
"feature_dicts",
"=",
"inputters",
".",
"collect_feature_vocabs",
"(",
"fields",
",",
"'tgt'",
")",
"tgt_embeddings",
"=",
"build_embeddings",
"(",
"model_opt",
",",
"tgt_dict",
",",
"feature_dicts",
",",
"for_encoder",
"=",
"False",
")",
"# Share the embedding matrix - preprocess with share_vocab required.",
"if",
"model_opt",
".",
"share_embeddings",
":",
"# src/tgt vocab should be the same if `-share_vocab` is specified.",
"if",
"src_dict",
"!=",
"tgt_dict",
":",
"raise",
"AssertionError",
"(",
"'The `-share_vocab` should be set during '",
"'preprocess if you use share_embeddings!'",
")",
"tgt_embeddings",
".",
"word_lut",
".",
"weight",
"=",
"src_embeddings",
".",
"word_lut",
".",
"weight",
"decoder",
"=",
"build_decoder",
"(",
"model_opt",
",",
"tgt_embeddings",
")",
"# Build NMTModel(= encoder + decoder).",
"device",
"=",
"torch",
".",
"device",
"(",
"\"cuda\"",
"if",
"gpu",
"else",
"\"cpu\"",
")",
"model",
"=",
"onmt",
".",
"models",
".",
"NMTModel",
"(",
"encoder",
",",
"decoder",
")",
"model",
".",
"model_type",
"=",
"model_opt",
".",
"model_type",
"# Build Generator.",
"if",
"not",
"model_opt",
".",
"copy_attn",
":",
"if",
"model_opt",
".",
"generator_function",
"==",
"\"sparsemax\"",
":",
"gen_func",
"=",
"onmt",
".",
"modules",
".",
"sparse_activations",
".",
"LogSparsemax",
"(",
"dim",
"=",
"-",
"1",
")",
"else",
":",
"gen_func",
"=",
"nn",
".",
"LogSoftmax",
"(",
"dim",
"=",
"-",
"1",
")",
"generator",
"=",
"nn",
".",
"Sequential",
"(",
"nn",
".",
"Linear",
"(",
"model_opt",
".",
"rnn_size",
",",
"len",
"(",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
")",
")",
",",
"gen_func",
")",
"if",
"model_opt",
".",
"share_decoder_embeddings",
":",
"generator",
"[",
"0",
"]",
".",
"weight",
"=",
"decoder",
".",
"embeddings",
".",
"word_lut",
".",
"weight",
"else",
":",
"generator",
"=",
"CopyGenerator",
"(",
"model_opt",
".",
"rnn_size",
",",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
")",
"# Load the model states from checkpoint or initialize them.",
"if",
"checkpoint",
"is",
"not",
"None",
":",
"model",
".",
"load_state_dict",
"(",
"checkpoint",
"[",
"'model'",
"]",
",",
"strict",
"=",
"False",
")",
"generator",
".",
"load_state_dict",
"(",
"checkpoint",
"[",
"'generator'",
"]",
")",
"else",
":",
"if",
"model_opt",
".",
"param_init",
"!=",
"0.0",
":",
"for",
"p",
"in",
"model",
".",
"parameters",
"(",
")",
":",
"p",
".",
"data",
".",
"uniform_",
"(",
"-",
"model_opt",
".",
"param_init",
",",
"model_opt",
".",
"param_init",
")",
"for",
"p",
"in",
"generator",
".",
"parameters",
"(",
")",
":",
"p",
".",
"data",
".",
"uniform_",
"(",
"-",
"model_opt",
".",
"param_init",
",",
"model_opt",
".",
"param_init",
")",
"if",
"model_opt",
".",
"param_init_glorot",
":",
"for",
"p",
"in",
"model",
".",
"parameters",
"(",
")",
":",
"if",
"p",
".",
"dim",
"(",
")",
">",
"1",
":",
"xavier_uniform_",
"(",
"p",
")",
"for",
"p",
"in",
"generator",
".",
"parameters",
"(",
")",
":",
"if",
"p",
".",
"dim",
"(",
")",
">",
"1",
":",
"xavier_uniform_",
"(",
"p",
")",
"if",
"hasattr",
"(",
"model",
".",
"encoder",
",",
"'embeddings'",
")",
":",
"model",
".",
"encoder",
".",
"embeddings",
".",
"load_pretrained_vectors",
"(",
"model_opt",
".",
"pre_word_vecs_enc",
",",
"model_opt",
".",
"fix_word_vecs_enc",
")",
"if",
"hasattr",
"(",
"model",
".",
"decoder",
",",
"'embeddings'",
")",
":",
"model",
".",
"decoder",
".",
"embeddings",
".",
"load_pretrained_vectors",
"(",
"model_opt",
".",
"pre_word_vecs_dec",
",",
"model_opt",
".",
"fix_word_vecs_dec",
")",
"# Add generator to model (this registers it as parameter of model).",
"model",
".",
"generator",
"=",
"generator",
"model",
".",
"to",
"(",
"device",
")",
"return",
"model"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/model_builder.py#L155-L266 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/model_builder.py | python | build_model | (model_opt, opt, fields, checkpoint) | return model | Build the Model | Build the Model | [
"Build",
"the",
"Model"
] | def build_model(model_opt, opt, fields, checkpoint):
""" Build the Model """
logger.info('Building model...')
model = build_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
logger.info(model)
return model | [
"def",
"build_model",
"(",
"model_opt",
",",
"opt",
",",
"fields",
",",
"checkpoint",
")",
":",
"logger",
".",
"info",
"(",
"'Building model...'",
")",
"model",
"=",
"build_base_model",
"(",
"model_opt",
",",
"fields",
",",
"use_gpu",
"(",
"opt",
")",
",",
"checkpoint",
")",
"logger",
".",
"info",
"(",
"model",
")",
"return",
"model"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/model_builder.py#L269-L275 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/trainer.py | python | build_trainer | (opt, device_id, model, fields,
optim, data_type, model_saver=None) | return trainer | Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model | Simplify `Trainer` creation based on user `opt`s* | [
"Simplify",
"Trainer",
"creation",
"based",
"on",
"user",
"opt",
"s",
"*"
] | def build_trainer(opt, device_id, model, fields,
optim, data_type, model_saver=None):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
# fine
train_loss = onmt.utils.loss.build_loss_compute(
model, fields["tgt"].vocab, opt)
valid_loss = onmt.utils.loss.build_loss_compute(
model, fields["tgt"].vocab, opt, train=False)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
norm_method = opt.normalization
grad_accum_count = opt.accum_count
n_gpu = opt.world_size
if device_id >= 0:
gpu_rank = opt.gpu_ranks[device_id]
else:
gpu_rank = 0
n_gpu = 0
gpu_verbose_level = opt.gpu_verbose_level
report_manager = onmt.utils.build_report_manager(opt)
trainer = onmt.Trainer(model, train_loss, valid_loss, optim, trunc_size,
shard_size, data_type, norm_method,
grad_accum_count, n_gpu, gpu_rank,
gpu_verbose_level, report_manager,
model_saver=model_saver)
return trainer | [
"def",
"build_trainer",
"(",
"opt",
",",
"device_id",
",",
"model",
",",
"fields",
",",
"optim",
",",
"data_type",
",",
"model_saver",
"=",
"None",
")",
":",
"# fine",
"train_loss",
"=",
"onmt",
".",
"utils",
".",
"loss",
".",
"build_loss_compute",
"(",
"model",
",",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
",",
"opt",
")",
"valid_loss",
"=",
"onmt",
".",
"utils",
".",
"loss",
".",
"build_loss_compute",
"(",
"model",
",",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
",",
"opt",
",",
"train",
"=",
"False",
")",
"trunc_size",
"=",
"opt",
".",
"truncated_decoder",
"# Badly named...",
"shard_size",
"=",
"opt",
".",
"max_generator_batches",
"norm_method",
"=",
"opt",
".",
"normalization",
"grad_accum_count",
"=",
"opt",
".",
"accum_count",
"n_gpu",
"=",
"opt",
".",
"world_size",
"if",
"device_id",
">=",
"0",
":",
"gpu_rank",
"=",
"opt",
".",
"gpu_ranks",
"[",
"device_id",
"]",
"else",
":",
"gpu_rank",
"=",
"0",
"n_gpu",
"=",
"0",
"gpu_verbose_level",
"=",
"opt",
".",
"gpu_verbose_level",
"report_manager",
"=",
"onmt",
".",
"utils",
".",
"build_report_manager",
"(",
"opt",
")",
"trainer",
"=",
"onmt",
".",
"Trainer",
"(",
"model",
",",
"train_loss",
",",
"valid_loss",
",",
"optim",
",",
"trunc_size",
",",
"shard_size",
",",
"data_type",
",",
"norm_method",
",",
"grad_accum_count",
",",
"n_gpu",
",",
"gpu_rank",
",",
"gpu_verbose_level",
",",
"report_manager",
",",
"model_saver",
"=",
"model_saver",
")",
"return",
"trainer"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/trainer.py#L23-L63 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/trainer.py | python | Trainer.train | (self, train_iter_fct, valid_iter_fct, train_steps, valid_steps) | return total_stats | The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct`
Args:
train_iter_fct(function): a function that returns the train
iterator. e.g. something like
train_iter_fct = lambda: generator(*args, **kwargs)
valid_iter_fct(function): same as train_iter_fct, for valid data
train_steps(int):
valid_steps(int):
save_checkpoint_steps(int):
Return:
None | The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct` | [
"The",
"main",
"training",
"loops",
".",
"by",
"iterating",
"over",
"training",
"data",
"(",
"i",
".",
"e",
".",
"train_iter_fct",
")",
"and",
"running",
"validation",
"(",
"i",
".",
"e",
".",
"iterating",
"over",
"valid_iter_fct"
] | def train(self, train_iter_fct, valid_iter_fct, train_steps, valid_steps):
"""
The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct`
Args:
train_iter_fct(function): a function that returns the train
iterator. e.g. something like
train_iter_fct = lambda: generator(*args, **kwargs)
valid_iter_fct(function): same as train_iter_fct, for valid data
train_steps(int):
valid_steps(int):
save_checkpoint_steps(int):
Return:
None
"""
logger.info('Start training...')
step = self.optim._step + 1
true_batchs = []
accum = 0
normalization = 0
train_iter = train_iter_fct()
total_stats = onmt.utils.Statistics()
report_stats = onmt.utils.Statistics()
self._start_report_manager(start_time=total_stats.start_time)
while step <= train_steps:
reduce_counter = 0
for i, batch in enumerate(train_iter):
# import pdb; pdb.set_trace()
if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):
if self.gpu_verbose_level > 1:
logger.info("GpuRank %d: index: %d accum: %d"
% (self.gpu_rank, i, accum))
true_batchs.append(batch)
if self.norm_method == "tokens":
num_tokens = batch.tgt[1:].ne(
self.train_loss.padding_idx).sum()
normalization += num_tokens.item()
else:
normalization += batch.batch_size
accum += 1
if accum == self.grad_accum_count:
reduce_counter += 1
if self.gpu_verbose_level > 0:
logger.info("GpuRank %d: reduce_counter: %d \
n_minibatch %d"
% (self.gpu_rank, reduce_counter,
len(true_batchs)))
if self.n_gpu > 1:
normalization = sum(onmt.utils.distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
true_batchs, normalization, total_stats,
report_stats)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate,
report_stats)
true_batchs = []
accum = 0
normalization = 0
if (step % valid_steps == 0):
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: validate step %d'
% (self.gpu_rank, step))
valid_iter = valid_iter_fct()
with torch.no_grad():
valid_stats = self.validate(valid_iter)
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: gather valid stat \
step %d' % (self.gpu_rank, step))
valid_stats = self._maybe_gather_stats(valid_stats)
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: report stat step %d'
% (self.gpu_rank, step))
self._report_step(self.optim.learning_rate,
step, valid_stats=valid_stats)
if self.gpu_rank == 0:
self._maybe_save(step)
step += 1
if step > train_steps:
break
if self.gpu_verbose_level > 0:
logger.info('GpuRank %d: we completed an epoch \
at step %d' % (self.gpu_rank, step))
train_iter = train_iter_fct()
return total_stats | [
"def",
"train",
"(",
"self",
",",
"train_iter_fct",
",",
"valid_iter_fct",
",",
"train_steps",
",",
"valid_steps",
")",
":",
"logger",
".",
"info",
"(",
"'Start training...'",
")",
"step",
"=",
"self",
".",
"optim",
".",
"_step",
"+",
"1",
"true_batchs",
"=",
"[",
"]",
"accum",
"=",
"0",
"normalization",
"=",
"0",
"train_iter",
"=",
"train_iter_fct",
"(",
")",
"total_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"report_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"self",
".",
"_start_report_manager",
"(",
"start_time",
"=",
"total_stats",
".",
"start_time",
")",
"while",
"step",
"<=",
"train_steps",
":",
"reduce_counter",
"=",
"0",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"train_iter",
")",
":",
"# import pdb; pdb.set_trace()",
"if",
"self",
".",
"n_gpu",
"==",
"0",
"or",
"(",
"i",
"%",
"self",
".",
"n_gpu",
"==",
"self",
".",
"gpu_rank",
")",
":",
"if",
"self",
".",
"gpu_verbose_level",
">",
"1",
":",
"logger",
".",
"info",
"(",
"\"GpuRank %d: index: %d accum: %d\"",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"i",
",",
"accum",
")",
")",
"true_batchs",
".",
"append",
"(",
"batch",
")",
"if",
"self",
".",
"norm_method",
"==",
"\"tokens\"",
":",
"num_tokens",
"=",
"batch",
".",
"tgt",
"[",
"1",
":",
"]",
".",
"ne",
"(",
"self",
".",
"train_loss",
".",
"padding_idx",
")",
".",
"sum",
"(",
")",
"normalization",
"+=",
"num_tokens",
".",
"item",
"(",
")",
"else",
":",
"normalization",
"+=",
"batch",
".",
"batch_size",
"accum",
"+=",
"1",
"if",
"accum",
"==",
"self",
".",
"grad_accum_count",
":",
"reduce_counter",
"+=",
"1",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"\"GpuRank %d: reduce_counter: %d \\\n n_minibatch %d\"",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"reduce_counter",
",",
"len",
"(",
"true_batchs",
")",
")",
")",
"if",
"self",
".",
"n_gpu",
">",
"1",
":",
"normalization",
"=",
"sum",
"(",
"onmt",
".",
"utils",
".",
"distributed",
".",
"all_gather_list",
"(",
"normalization",
")",
")",
"self",
".",
"_gradient_accumulation",
"(",
"true_batchs",
",",
"normalization",
",",
"total_stats",
",",
"report_stats",
")",
"report_stats",
"=",
"self",
".",
"_maybe_report_training",
"(",
"step",
",",
"train_steps",
",",
"self",
".",
"optim",
".",
"learning_rate",
",",
"report_stats",
")",
"true_batchs",
"=",
"[",
"]",
"accum",
"=",
"0",
"normalization",
"=",
"0",
"if",
"(",
"step",
"%",
"valid_steps",
"==",
"0",
")",
":",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'GpuRank %d: validate step %d'",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"step",
")",
")",
"valid_iter",
"=",
"valid_iter_fct",
"(",
")",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"valid_stats",
"=",
"self",
".",
"validate",
"(",
"valid_iter",
")",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'GpuRank %d: gather valid stat \\\n step %d'",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"step",
")",
")",
"valid_stats",
"=",
"self",
".",
"_maybe_gather_stats",
"(",
"valid_stats",
")",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'GpuRank %d: report stat step %d'",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"step",
")",
")",
"self",
".",
"_report_step",
"(",
"self",
".",
"optim",
".",
"learning_rate",
",",
"step",
",",
"valid_stats",
"=",
"valid_stats",
")",
"if",
"self",
".",
"gpu_rank",
"==",
"0",
":",
"self",
".",
"_maybe_save",
"(",
"step",
")",
"step",
"+=",
"1",
"if",
"step",
">",
"train_steps",
":",
"break",
"if",
"self",
".",
"gpu_verbose_level",
">",
"0",
":",
"logger",
".",
"info",
"(",
"'GpuRank %d: we completed an epoch \\\n at step %d'",
"%",
"(",
"self",
".",
"gpu_rank",
",",
"step",
")",
")",
"train_iter",
"=",
"train_iter_fct",
"(",
")",
"return",
"total_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/trainer.py#L120-L222 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/trainer.py | python | Trainer.validate | (self, valid_iter) | return stats | Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics | Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics | [
"Validate",
"model",
".",
"valid_iter",
":",
"validate",
"data",
"iterator",
"Returns",
":",
":",
"obj",
":",
"nmt",
".",
"Statistics",
":",
"validation",
"loss",
"statistics"
] | def validate(self, valid_iter):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = onmt.utils.Statistics()
for batch in valid_iter:
src = inputters.make_features(batch, 'src', self.data_type)
if self.data_type == 'text':
_, src_lengths = batch.src
else:
src_lengths = None
tgt = inputters.make_features(batch, 'tgt')
# F-prop through the model.
src_sents = batch.src_sents
outputs, attns, _ = self.model(src, tgt, src_sents,src_lengths)
# Compute loss.
batch_stats = self.valid_loss.monolithic_compute_loss(
batch, outputs, attns)
# Update statistics.
stats.update(batch_stats)
# Set model back to training mode.
self.model.train()
return stats | [
"def",
"validate",
"(",
"self",
",",
"valid_iter",
")",
":",
"# Set model in validating mode.",
"self",
".",
"model",
".",
"eval",
"(",
")",
"stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"for",
"batch",
"in",
"valid_iter",
":",
"src",
"=",
"inputters",
".",
"make_features",
"(",
"batch",
",",
"'src'",
",",
"self",
".",
"data_type",
")",
"if",
"self",
".",
"data_type",
"==",
"'text'",
":",
"_",
",",
"src_lengths",
"=",
"batch",
".",
"src",
"else",
":",
"src_lengths",
"=",
"None",
"tgt",
"=",
"inputters",
".",
"make_features",
"(",
"batch",
",",
"'tgt'",
")",
"# F-prop through the model.",
"src_sents",
"=",
"batch",
".",
"src_sents",
"outputs",
",",
"attns",
",",
"_",
"=",
"self",
".",
"model",
"(",
"src",
",",
"tgt",
",",
"src_sents",
",",
"src_lengths",
")",
"# Compute loss.",
"batch_stats",
"=",
"self",
".",
"valid_loss",
".",
"monolithic_compute_loss",
"(",
"batch",
",",
"outputs",
",",
"attns",
")",
"# Update statistics.",
"stats",
".",
"update",
"(",
"batch_stats",
")",
"# Set model back to training mode.",
"self",
".",
"model",
".",
"train",
"(",
")",
"return",
"stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/trainer.py#L224-L259 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/trainer.py | python | Trainer._start_report_manager | (self, start_time=None) | Simple function to start report manager (if any) | Simple function to start report manager (if any) | [
"Simple",
"function",
"to",
"start",
"report",
"manager",
"(",
"if",
"any",
")"
] | def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time | [
"def",
"_start_report_manager",
"(",
"self",
",",
"start_time",
"=",
"None",
")",
":",
"if",
"self",
".",
"report_manager",
"is",
"not",
"None",
":",
"if",
"start_time",
"is",
"None",
":",
"self",
".",
"report_manager",
".",
"start",
"(",
")",
"else",
":",
"self",
".",
"report_manager",
".",
"start_time",
"=",
"start_time"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/trainer.py#L338-L346 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/trainer.py | python | Trainer._maybe_gather_stats | (self, stat) | return stat | Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object | Gather statistics in multi-processes cases | [
"Gather",
"statistics",
"in",
"multi",
"-",
"processes",
"cases"
] | def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return onmt.utils.Statistics.all_gather_stats(stat)
return stat | [
"def",
"_maybe_gather_stats",
"(",
"self",
",",
"stat",
")",
":",
"if",
"stat",
"is",
"not",
"None",
"and",
"self",
".",
"n_gpu",
">",
"1",
":",
"return",
"onmt",
".",
"utils",
".",
"Statistics",
".",
"all_gather_stats",
"(",
"stat",
")",
"return",
"stat"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/trainer.py#L348-L361 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/trainer.py | python | Trainer._maybe_report_training | (self, step, num_steps, learning_rate,
report_stats) | Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc | Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc | [
"Simple",
"function",
"to",
"report",
"training",
"stats",
"(",
"if",
"report_manager",
"is",
"set",
")",
"see",
"onmt",
".",
"utils",
".",
"ReportManagerBase",
".",
"report_training",
"for",
"doc"
] | def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1) | [
"def",
"_maybe_report_training",
"(",
"self",
",",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
")",
":",
"if",
"self",
".",
"report_manager",
"is",
"not",
"None",
":",
"return",
"self",
".",
"report_manager",
".",
"report_training",
"(",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
",",
"multigpu",
"=",
"self",
".",
"n_gpu",
">",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/trainer.py#L363-L372 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/trainer.py | python | Trainer._report_step | (self, learning_rate, step, train_stats=None,
valid_stats=None) | Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc | Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc | [
"Simple",
"function",
"to",
"report",
"stats",
"(",
"if",
"report_manager",
"is",
"set",
")",
"see",
"onmt",
".",
"utils",
".",
"ReportManagerBase",
".",
"report_step",
"for",
"doc"
] | def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats) | [
"def",
"_report_step",
"(",
"self",
",",
"learning_rate",
",",
"step",
",",
"train_stats",
"=",
"None",
",",
"valid_stats",
"=",
"None",
")",
":",
"if",
"self",
".",
"report_manager",
"is",
"not",
"None",
":",
"return",
"self",
".",
"report_manager",
".",
"report_step",
"(",
"learning_rate",
",",
"step",
",",
"train_stats",
"=",
"train_stats",
",",
"valid_stats",
"=",
"valid_stats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/trainer.py#L374-L383 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/trainer.py | python | Trainer._maybe_save | (self, step) | Save the model if a model saver is set | Save the model if a model saver is set | [
"Save",
"the",
"model",
"if",
"a",
"model",
"saver",
"is",
"set"
] | def _maybe_save(self, step):
"""
Save the model if a model saver is set
"""
if self.model_saver is not None:
self.model_saver.maybe_save(step) | [
"def",
"_maybe_save",
"(",
"self",
",",
"step",
")",
":",
"if",
"self",
".",
"model_saver",
"is",
"not",
"None",
":",
"self",
".",
"model_saver",
".",
"maybe_save",
"(",
"step",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/trainer.py#L385-L390 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/opts.py | python | model_opts | (parser) | These options are passed to the construction of the model.
Be careful with these as they will be used during translation. | These options are passed to the construction of the model.
Be careful with these as they will be used during translation. | [
"These",
"options",
"are",
"passed",
"to",
"the",
"construction",
"of",
"the",
"model",
".",
"Be",
"careful",
"with",
"these",
"as",
"they",
"will",
"be",
"used",
"during",
"translation",
"."
] | def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Embedding Options
group = parser.add_argument_group('Model-Embeddings')
group.add_argument('-src_word_vec_size', type=int, default=500,
help='Word embedding size for src.')
group.add_argument('-tgt_word_vec_size', type=int, default=500,
help='Word embedding size for tgt.')
group.add_argument('-word_vec_size', type=int, default=-1,
help='Word embedding size for src and tgt.')
group.add_argument('-share_decoder_embeddings', action='store_true',
help="""Use a shared weight matrix for the input and
output word embeddings in the decoder.""")
group.add_argument('-share_embeddings', action='store_true',
help="""Share the word embeddings between encoder
and decoder. Need to use shared dictionary for this
option.""")
group.add_argument('-position_encoding', action='store_true',
help="""Use a sin to mark relative words positions.
Necessary for non-RNN style models.
""")
group = parser.add_argument_group('Model-Embedding Features')
group.add_argument('-feat_merge', type=str, default='concat',
choices=['concat', 'sum', 'mlp'],
help="""Merge action for incorporating features embeddings.
Options [concat|sum|mlp].""")
group.add_argument('-feat_vec_size', type=int, default=-1,
help="""If specified, feature embedding sizes
will be set to this. Otherwise, feat_vec_exponent
will be used.""")
group.add_argument('-feat_vec_exponent', type=float, default=0.7,
help="""If -feat_merge_size is not set, feature
embedding sizes will be set to N^feat_vec_exponent
where N is the number of values the feature takes.""")
# Encoder-Decoder Options
group = parser.add_argument_group('Model- Encoder-Decoder')
group.add_argument('-model_type', default='text',
help="""Type of source model to use. Allows
the system to incorporate non-text inputs.
Options are [text|img|audio].""")
group.add_argument('-encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],
help="""Type of encoder layer to use. Non-RNN layers
are experimental. Options are
[rnn|brnn|mean|transformer|cnn].""")
group.add_argument('-decoder_type', type=str, default='rnn',
choices=['rnn', 'transformer', 'cnn'],
help="""Type of decoder layer to use. Non-RNN layers
are experimental. Options are
[rnn|transformer|cnn].""")
group.add_argument('-layers', type=int, default=-1,
help='Number of layers in enc/dec.')
group.add_argument('-enc_layers', type=int, default=2,
help='Number of layers in the encoder')
group.add_argument('-dec_layers', type=int, default=2,
help='Number of layers in the decoder')
group.add_argument('-rnn_size', type=int, default=500,
help='Size of rnn hidden states')
group.add_argument('-cnn_kernel_width', type=int, default=3,
help="""Size of windows in the cnn, the kernel_size is
(cnn_kernel_width, 1) in conv layer""")
group.add_argument('-input_feed', type=int, default=1,
help="""Feed the context vector at each time step as
additional input (via concatenation with the word
embeddings) to the decoder.""")
group.add_argument('-bridge', action="store_true",
help="""Have an additional layer between the last encoder
state and the first decoder state""")
group.add_argument('-rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
action=CheckSRU,
help="""The gate type to use in the RNNs""")
# group.add_argument('-residual', action="store_true",
# help="Add residual connections between RNN layers.")
group.add_argument('-brnn', action=DeprecateAction,
help="Deprecated, use `encoder_type`.")
group.add_argument('-context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="""Type of context gate to use.
Do not select for no context gate.""")
# Attention options
group = parser.add_argument_group('Model- Attention')
group.add_argument('-global_attention', type=str, default='general',
choices=['dot', 'general', 'mlp'],
help="""The attention type to use:
dotprod or general (Luong) or MLP (Bahdanau)""")
group.add_argument('-global_attention_function', type=str,
default="softmax", choices=["softmax", "sparsemax"])
group.add_argument('-self_attn_type', type=str, default="scaled-dot",
help="""Self attention type in Transformer decoder
layer -- currently "scaled-dot" or "average" """)
group.add_argument('-heads', type=int, default=8,
help='Number of heads for transformer self-attention')
group.add_argument('-transformer_ff', type=int, default=2048,
help='Size of hidden transformer feed-forward')
# Generator and loss options.
group.add_argument('-copy_attn', action="store_true",
help='Train copy attention layer.')
group.add_argument('-generator_function', default="log_softmax",
choices=["log_softmax", "sparsemax"],
help="""Which function to use for generating
probabilities over the target vocabulary (choices:
log_softmax, sparsemax)""")
group.add_argument('-copy_attn_force', action="store_true",
help='When available, train to copy.')
group.add_argument('-reuse_copy_attn', action="store_true",
help="Reuse standard attention for copy")
group.add_argument('-copy_loss_by_seqlength', action="store_true",
help="Divide copy loss by length of sequence")
group.add_argument('-coverage_attn', action="store_true",
help='Train a coverage attention layer.')
group.add_argument('-lambda_coverage', type=float, default=1,
help='Lambda value for coverage.') | [
"def",
"model_opts",
"(",
"parser",
")",
":",
"# Embedding Options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model-Embeddings'",
")",
"group",
".",
"add_argument",
"(",
"'-src_word_vec_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"500",
",",
"help",
"=",
"'Word embedding size for src.'",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_word_vec_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"500",
",",
"help",
"=",
"'Word embedding size for tgt.'",
")",
"group",
".",
"add_argument",
"(",
"'-word_vec_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"'Word embedding size for src and tgt.'",
")",
"group",
".",
"add_argument",
"(",
"'-share_decoder_embeddings'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Use a shared weight matrix for the input and\n output word embeddings in the decoder.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-share_embeddings'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Share the word embeddings between encoder\n and decoder. Need to use shared dictionary for this\n option.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-position_encoding'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Use a sin to mark relative words positions.\n Necessary for non-RNN style models.\n \"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model-Embedding Features'",
")",
"group",
".",
"add_argument",
"(",
"'-feat_merge'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'concat'",
",",
"choices",
"=",
"[",
"'concat'",
",",
"'sum'",
",",
"'mlp'",
"]",
",",
"help",
"=",
"\"\"\"Merge action for incorporating features embeddings.\n Options [concat|sum|mlp].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-feat_vec_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"\"\"\"If specified, feature embedding sizes\n will be set to this. Otherwise, feat_vec_exponent\n will be used.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-feat_vec_exponent'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.7",
",",
"help",
"=",
"\"\"\"If -feat_merge_size is not set, feature\n embedding sizes will be set to N^feat_vec_exponent\n where N is the number of values the feature takes.\"\"\"",
")",
"# Encoder-Decoder Options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model- Encoder-Decoder'",
")",
"group",
".",
"add_argument",
"(",
"'-model_type'",
",",
"default",
"=",
"'text'",
",",
"help",
"=",
"\"\"\"Type of source model to use. Allows\n the system to incorporate non-text inputs.\n Options are [text|img|audio].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-encoder_type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'rnn'",
",",
"choices",
"=",
"[",
"'rnn'",
",",
"'brnn'",
",",
"'mean'",
",",
"'transformer'",
",",
"'cnn'",
"]",
",",
"help",
"=",
"\"\"\"Type of encoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|brnn|mean|transformer|cnn].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-decoder_type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'rnn'",
",",
"choices",
"=",
"[",
"'rnn'",
",",
"'transformer'",
",",
"'cnn'",
"]",
",",
"help",
"=",
"\"\"\"Type of decoder layer to use. Non-RNN layers\n are experimental. Options are\n [rnn|transformer|cnn].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-layers'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"'Number of layers in enc/dec.'",
")",
"group",
".",
"add_argument",
"(",
"'-enc_layers'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"2",
",",
"help",
"=",
"'Number of layers in the encoder'",
")",
"group",
".",
"add_argument",
"(",
"'-dec_layers'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"2",
",",
"help",
"=",
"'Number of layers in the decoder'",
")",
"group",
".",
"add_argument",
"(",
"'-rnn_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"500",
",",
"help",
"=",
"'Size of rnn hidden states'",
")",
"group",
".",
"add_argument",
"(",
"'-cnn_kernel_width'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"help",
"=",
"\"\"\"Size of windows in the cnn, the kernel_size is\n (cnn_kernel_width, 1) in conv layer\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-input_feed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"\"\"Feed the context vector at each time step as\n additional input (via concatenation with the word\n embeddings) to the decoder.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-bridge'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"\"\"Have an additional layer between the last encoder\n state and the first decoder state\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-rnn_type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'LSTM'",
",",
"choices",
"=",
"[",
"'LSTM'",
",",
"'GRU'",
",",
"'SRU'",
"]",
",",
"action",
"=",
"CheckSRU",
",",
"help",
"=",
"\"\"\"The gate type to use in the RNNs\"\"\"",
")",
"# group.add_argument('-residual', action=\"store_true\",",
"# help=\"Add residual connections between RNN layers.\")",
"group",
".",
"add_argument",
"(",
"'-brnn'",
",",
"action",
"=",
"DeprecateAction",
",",
"help",
"=",
"\"Deprecated, use `encoder_type`.\"",
")",
"group",
".",
"add_argument",
"(",
"'-context_gate'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"choices",
"=",
"[",
"'source'",
",",
"'target'",
",",
"'both'",
"]",
",",
"help",
"=",
"\"\"\"Type of context gate to use.\n Do not select for no context gate.\"\"\"",
")",
"# Attention options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model- Attention'",
")",
"group",
".",
"add_argument",
"(",
"'-global_attention'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'general'",
",",
"choices",
"=",
"[",
"'dot'",
",",
"'general'",
",",
"'mlp'",
"]",
",",
"help",
"=",
"\"\"\"The attention type to use:\n dotprod or general (Luong) or MLP (Bahdanau)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-global_attention_function'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"softmax\"",
",",
"choices",
"=",
"[",
"\"softmax\"",
",",
"\"sparsemax\"",
"]",
")",
"group",
".",
"add_argument",
"(",
"'-self_attn_type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"scaled-dot\"",
",",
"help",
"=",
"\"\"\"Self attention type in Transformer decoder\n layer -- currently \"scaled-dot\" or \"average\" \"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-heads'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"8",
",",
"help",
"=",
"'Number of heads for transformer self-attention'",
")",
"group",
".",
"add_argument",
"(",
"'-transformer_ff'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"2048",
",",
"help",
"=",
"'Size of hidden transformer feed-forward'",
")",
"# Generator and loss options.",
"group",
".",
"add_argument",
"(",
"'-copy_attn'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'Train copy attention layer.'",
")",
"group",
".",
"add_argument",
"(",
"'-generator_function'",
",",
"default",
"=",
"\"log_softmax\"",
",",
"choices",
"=",
"[",
"\"log_softmax\"",
",",
"\"sparsemax\"",
"]",
",",
"help",
"=",
"\"\"\"Which function to use for generating\n probabilities over the target vocabulary (choices:\n log_softmax, sparsemax)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-copy_attn_force'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'When available, train to copy.'",
")",
"group",
".",
"add_argument",
"(",
"'-reuse_copy_attn'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Reuse standard attention for copy\"",
")",
"group",
".",
"add_argument",
"(",
"'-copy_loss_by_seqlength'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Divide copy loss by length of sequence\"",
")",
"group",
".",
"add_argument",
"(",
"'-coverage_attn'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'Train a coverage attention layer.'",
")",
"group",
".",
"add_argument",
"(",
"'-lambda_coverage'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"1",
",",
"help",
"=",
"'Lambda value for coverage.'",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/opts.py#L8-L134 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/opts.py | python | preprocess_opts | (parser) | Pre-procesing options | Pre-procesing options | [
"Pre",
"-",
"procesing",
"options"
] | def preprocess_opts(parser):
""" Pre-procesing options """
# Data options
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="""Type of the source input.
Options are [text|img].""")
group.add_argument('-train_src', required=True,
help="Path to the training source data")
group.add_argument('-train_tgt', required=True,
help="Path to the training target data")
group.add_argument('-valid_src', required=True,
help="Path to the validation source data")
group.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
group.add_argument('-src_dir', default="",
help="Source directory for image or audio files.")
group.add_argument('-save_data', required=True,
help="Output file for the prepared data")
group.add_argument('-max_shard_size', type=int, default=0,
help="""For text corpus of large volume, it will
be divided into shards of this size to preprocess.
If 0, the data will be handled as a whole. The unit
is in bytes. Optimal value should be multiples of
64 bytes. A commonly used sharding value is 131072000.
It is recommended to ensure the corpus is shuffled
before sharding.""")
group.add_argument('-shard_size', type=int, default=0,
help="""Divide src_corpus and tgt_corpus into
smaller multiple src_copus and tgt corpus files, then
build shards, each shard will have
opt.shard_size samples except last shard.
shard_size=0 means no segmentation
shard_size>0 means segment dataset into multiple shards,
each shard has shard_size samples""")
# Dictionary options, for text corpus
group = parser.add_argument_group('Vocab')
group.add_argument('-src_vocab', default="",
help="""Path to an existing source vocabulary. Format:
one word per line.""")
group.add_argument('-tgt_vocab', default="",
help="""Path to an existing target vocabulary. Format:
one word per line.""")
group.add_argument('-features_vocabs_prefix', type=str, default='',
help="Path prefix to existing features vocabularies")
group.add_argument('-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
group.add_argument('-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
group.add_argument('-src_words_min_frequency', type=int, default=0)
group.add_argument('-tgt_words_min_frequency', type=int, default=0)
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
# Truncation options, for text corpus
group = parser.add_argument_group('Pruning')
group.add_argument('-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
group.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
group.add_argument('-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
group.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
group.add_argument('-lower', action='store_true', help='lowercase data')
# Data processing options
group = parser.add_argument_group('Random')
group.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
group.add_argument('-seed', type=int, default=3435,
help="Random seed")
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
# Options most relevant to speech
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
group.add_argument('-window_stride', type=float, default=.01,
help="Window stride for spectrogram in seconds.")
group.add_argument('-window', default='hamming',
help="Window type for spectrogram generation.")
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""") | [
"def",
"preprocess_opts",
"(",
"parser",
")",
":",
"# Data options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Data'",
")",
"group",
".",
"add_argument",
"(",
"'-data_type'",
",",
"default",
"=",
"\"text\"",
",",
"help",
"=",
"\"\"\"Type of the source input.\n Options are [text|img].\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-train_src'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to the training source data\"",
")",
"group",
".",
"add_argument",
"(",
"'-train_tgt'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to the training target data\"",
")",
"group",
".",
"add_argument",
"(",
"'-valid_src'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to the validation source data\"",
")",
"group",
".",
"add_argument",
"(",
"'-valid_tgt'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Path to the validation target data\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_dir'",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Source directory for image or audio files.\"",
")",
"group",
".",
"add_argument",
"(",
"'-save_data'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Output file for the prepared data\"",
")",
"group",
".",
"add_argument",
"(",
"'-max_shard_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"\"\"For text corpus of large volume, it will\n be divided into shards of this size to preprocess.\n If 0, the data will be handled as a whole. The unit\n is in bytes. Optimal value should be multiples of\n 64 bytes. A commonly used sharding value is 131072000.\n It is recommended to ensure the corpus is shuffled\n before sharding.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-shard_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"\"\"Divide src_corpus and tgt_corpus into\n smaller multiple src_copus and tgt corpus files, then\n build shards, each shard will have\n opt.shard_size samples except last shard.\n shard_size=0 means no segmentation\n shard_size>0 means segment dataset into multiple shards,\n each shard has shard_size samples\"\"\"",
")",
"# Dictionary options, for text corpus",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Vocab'",
")",
"group",
".",
"add_argument",
"(",
"'-src_vocab'",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"\"\"Path to an existing source vocabulary. Format:\n one word per line.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_vocab'",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"\"\"Path to an existing target vocabulary. Format:\n one word per line.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-features_vocabs_prefix'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"\"Path prefix to existing features vocabularies\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_vocab_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50000",
",",
"help",
"=",
"\"Size of the source vocabulary\"",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_vocab_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50000",
",",
"help",
"=",
"\"Size of the target vocabulary\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_words_min_frequency'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_words_min_frequency'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
")",
"group",
".",
"add_argument",
"(",
"'-dynamic_dict'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Create dynamic dictionaries\"",
")",
"group",
".",
"add_argument",
"(",
"'-share_vocab'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Share source and target vocabulary\"",
")",
"# Truncation options, for text corpus",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Pruning'",
")",
"group",
".",
"add_argument",
"(",
"'-src_seq_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50",
",",
"help",
"=",
"\"Maximum source sequence length\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_seq_length_trunc'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"Truncate source sequence length.\"",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_seq_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50",
",",
"help",
"=",
"\"Maximum target sequence length to keep.\"",
")",
"group",
".",
"add_argument",
"(",
"'-tgt_seq_length_trunc'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"Truncate target sequence length.\"",
")",
"group",
".",
"add_argument",
"(",
"'-lower'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'lowercase data'",
")",
"# Data processing options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Random'",
")",
"group",
".",
"add_argument",
"(",
"'-shuffle'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"Shuffle data\"",
")",
"group",
".",
"add_argument",
"(",
"'-seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3435",
",",
"help",
"=",
"\"Random seed\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Logging'",
")",
"group",
".",
"add_argument",
"(",
"'-report_every'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100000",
",",
"help",
"=",
"\"Report status every this many sentences\"",
")",
"group",
".",
"add_argument",
"(",
"'-log_file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Output logs to a file under this path.\"",
")",
"# Options most relevant to speech",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Speech'",
")",
"group",
".",
"add_argument",
"(",
"'-sample_rate'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"16000",
",",
"help",
"=",
"\"Sample rate.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window_size'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".02",
",",
"help",
"=",
"\"Window size for spectrogram in seconds.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window_stride'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".01",
",",
"help",
"=",
"\"Window stride for spectrogram in seconds.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window'",
",",
"default",
"=",
"'hamming'",
",",
"help",
"=",
"\"Window type for spectrogram generation.\"",
")",
"# Option most relevant to image input",
"group",
".",
"add_argument",
"(",
"'-image_channel_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"choices",
"=",
"[",
"3",
",",
"1",
"]",
",",
"help",
"=",
"\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/opts.py#L137-L242 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/opts.py | python | train_opts | (parser) | Training and saving options | Training and saving options | [
"Training",
"and",
"saving",
"options"
] | def train_opts(parser):
""" Training and saving options """
group = parser.add_argument_group('General')
group.add_argument('-data', required=True,
help="""Path prefix to the ".train.pt" and
".valid.pt" file path from preprocess.py""")
group.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_N.pt where N is the number
of steps""")
group.add_argument('-save_checkpoint_steps', type=int, default=5000,
help="""Save a checkpoint every X steps""")
group.add_argument('-keep_checkpoint', type=int, default=-1,
help="""Keep X checkpoints (negative: keep all)""")
# GPU
group.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Deprecated see world_size and gpu_ranks.")
group.add_argument('-gpu_ranks', default=[], nargs='+', type=int,
help="list of ranks of each process.")
group.add_argument('-world_size', default=1, type=int,
help="total number of distributed processes.")
group.add_argument('-gpu_backend', default='nccl', nargs='+', type=str,
help="Type of torch distributed backend")
group.add_argument('-gpu_verbose_level', default=0, type=int,
help="Gives more info on each process per GPU.")
group.add_argument('-master_ip', default="localhost", type=str,
help="IP of master for torch.distributed training.")
group.add_argument('-master_port', default=10000, type=int,
help="Port of master for torch.distributed training.")
group.add_argument('-seed', type=int, default=-1,
help="""Random seed used for the experiments
reproducibility.""")
# Init options
group = parser.add_argument_group('Initialization')
group.add_argument('-param_init', type=float, default=0.1,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init).
Use 0 to not use initialization""")
group.add_argument('-param_init_glorot', action='store_true',
help="""Init parameters with xavier_uniform.
Required for transfomer.""")
group.add_argument('-train_from', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model's state_dict.""")
# Pretrained word vectors
group.add_argument('-pre_word_vecs_enc',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the encoder side.
See README for specific formatting instructions.""")
group.add_argument('-pre_word_vecs_dec',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the decoder side.
See README for specific formatting instructions.""")
# Fixed word vectors
group.add_argument('-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
group.add_argument('-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the decoder side.")
# Optimization options
group = parser.add_argument_group('Optimization- Type')
group.add_argument('-batch_size', type=int, default=64,
help='Maximum batch size for training')
group.add_argument('-batch_type', default='sents',
choices=["sents", "tokens"],
help="""Batch grouping for batch_size. Standard
is sents. Tokens will do dynamic batching""")
group.add_argument('-normalization', default='sents',
choices=["sents", "tokens"],
help='Normalization method of the gradient.')
group.add_argument('-accum_count', type=int, default=1,
help="""Accumulate gradient this many times.
Approximately equivalent to updating
batch_size * accum_count batches at once.
Recommended for Transformer.""")
group.add_argument('-valid_steps', type=int, default=10000,
help='Perfom validation every X steps')
group.add_argument('-valid_batch_size', type=int, default=32,
help='Maximum batch size for validation')
group.add_argument('-max_generator_batches', type=int, default=32,
help="""Maximum batches of words in a sequence to run
the generator on in parallel. Higher is faster, but
uses more memory.""")
group.add_argument('-train_steps', type=int, default=100000,
help='Number of training steps')
group.add_argument('-epochs', type=int, default=0,
help='Deprecated epochs see train_steps')
group.add_argument('-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam',
'sparseadam'],
help="""Optimization method.""")
group.add_argument('-adagrad_accumulator_init', type=float, default=0,
help="""Initializes the accumulator values in adagrad.
Mirrors the initial_accumulator_value option
in the tensorflow adagrad (use 0.1 for their default).
""")
group.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to
max_grad_norm""")
group.add_argument('-dropout', type=float, default=0.3,
help="Dropout probability; applied in LSTM stacks.")
group.add_argument('-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
group.add_argument('-adam_beta1', type=float, default=0.9,
help="""The beta1 parameter used by Adam.
Almost without exception a value of 0.9 is used in
the literature, seemingly giving good results,
so we would discourage changing this value from
the default without due consideration.""")
group.add_argument('-adam_beta2', type=float, default=0.999,
help="""The beta2 parameter used by Adam.
Typically a value of 0.999 is recommended, as this is
the value suggested by the original paper describing
Adam, and is also the value adopted in other frameworks
such as Tensorflow and Kerras, i.e. see:
https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/ .
Whereas recently the paper "Attention is All You Need"
suggested a value of 0.98 for beta2, this parameter may
not work well for normal models / default
baselines.""")
group.add_argument('-label_smoothing', type=float, default=0.0,
help="""Label smoothing value epsilon.
Probabilities of all non-true labels
will be smoothed by epsilon / (vocab_size - 1).
Set to zero to turn off label smoothing.
For more detailed information, see:
https://arxiv.org/abs/1512.00567""")
# learning rate
group = parser.add_argument_group('Optimization- Rate')
group.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate.
Recommended settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
group.add_argument('-learning_rate_decay', type=float, default=0.5,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) steps have gone past
start_decay_steps""")
group.add_argument('-start_decay_steps', type=int, default=50000,
help="""Start decaying every decay_steps after
start_decay_steps""")
group.add_argument('-decay_steps', type=int, default=10000,
help="""Decay every decay_steps""")
group.add_argument('-decay_method', type=str, default="",
choices=['noam'], help="Use a custom decay rate.")
group.add_argument('-warmup_steps', type=int, default=4000,
help="""Number of warmup steps for custom decay.""")
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=50,
help="Print stats at this interval.")
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add_argument('-exp_host', type=str, default="",
help="Send logs to this crayon server.")
group.add_argument('-exp', type=str, default="",
help="Name of the experiment for logging.")
# Use TensorboardX for visualization during training
group.add_argument('-tensorboard', action="store_true",
help="""Use tensorboardX for visualization during training.
Must have the library tensorboardX.""")
group.add_argument("-tensorboard_log_dir", type=str,
default="runs/onmt",
help="""Log directory for Tensorboard.
This is also the name of the run.
""")
group = parser.add_argument_group('Speech')
# Options most relevant to speech
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help="Window size for spectrogram in seconds.")
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""") | [
"def",
"train_opts",
"(",
"parser",
")",
":",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'General'",
")",
"group",
".",
"add_argument",
"(",
"'-data'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"\"\"Path prefix to the \".train.pt\" and\n \".valid.pt\" file path from preprocess.py\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-save_model'",
",",
"default",
"=",
"'model'",
",",
"help",
"=",
"\"\"\"Model filename (the model will be saved as\n <save_model>_N.pt where N is the number\n of steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-save_checkpoint_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"5000",
",",
"help",
"=",
"\"\"\"Save a checkpoint every X steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-keep_checkpoint'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"\"\"\"Keep X checkpoints (negative: keep all)\"\"\"",
")",
"# GPU",
"group",
".",
"add_argument",
"(",
"'-gpuid'",
",",
"default",
"=",
"[",
"]",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Deprecated see world_size and gpu_ranks.\"",
")",
"group",
".",
"add_argument",
"(",
"'-gpu_ranks'",
",",
"default",
"=",
"[",
"]",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"list of ranks of each process.\"",
")",
"group",
".",
"add_argument",
"(",
"'-world_size'",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"total number of distributed processes.\"",
")",
"group",
".",
"add_argument",
"(",
"'-gpu_backend'",
",",
"default",
"=",
"'nccl'",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Type of torch distributed backend\"",
")",
"group",
".",
"add_argument",
"(",
"'-gpu_verbose_level'",
",",
"default",
"=",
"0",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Gives more info on each process per GPU.\"",
")",
"group",
".",
"add_argument",
"(",
"'-master_ip'",
",",
"default",
"=",
"\"localhost\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"IP of master for torch.distributed training.\"",
")",
"group",
".",
"add_argument",
"(",
"'-master_port'",
",",
"default",
"=",
"10000",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"Port of master for torch.distributed training.\"",
")",
"group",
".",
"add_argument",
"(",
"'-seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"\"\"\"Random seed used for the experiments\n reproducibility.\"\"\"",
")",
"# Init options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Initialization'",
")",
"group",
".",
"add_argument",
"(",
"'-param_init'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.1",
",",
"help",
"=",
"\"\"\"Parameters are initialized over uniform distribution\n with support (-param_init, param_init).\n Use 0 to not use initialization\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-param_init_glorot'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Init parameters with xavier_uniform.\n Required for transfomer.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-train_from'",
",",
"default",
"=",
"''",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"\"\"If training from a checkpoint then this is the\n path to the pretrained model's state_dict.\"\"\"",
")",
"# Pretrained word vectors",
"group",
".",
"add_argument",
"(",
"'-pre_word_vecs_enc'",
",",
"help",
"=",
"\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the encoder side.\n See README for specific formatting instructions.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-pre_word_vecs_dec'",
",",
"help",
"=",
"\"\"\"If a valid path is specified, then this will load\n pretrained word embeddings on the decoder side.\n See README for specific formatting instructions.\"\"\"",
")",
"# Fixed word vectors",
"group",
".",
"add_argument",
"(",
"'-fix_word_vecs_enc'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Fix word embeddings on the encoder side.\"",
")",
"group",
".",
"add_argument",
"(",
"'-fix_word_vecs_dec'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Fix word embeddings on the decoder side.\"",
")",
"# Optimization options",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Optimization- Type'",
")",
"group",
".",
"add_argument",
"(",
"'-batch_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"64",
",",
"help",
"=",
"'Maximum batch size for training'",
")",
"group",
".",
"add_argument",
"(",
"'-batch_type'",
",",
"default",
"=",
"'sents'",
",",
"choices",
"=",
"[",
"\"sents\"",
",",
"\"tokens\"",
"]",
",",
"help",
"=",
"\"\"\"Batch grouping for batch_size. Standard\n is sents. Tokens will do dynamic batching\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-normalization'",
",",
"default",
"=",
"'sents'",
",",
"choices",
"=",
"[",
"\"sents\"",
",",
"\"tokens\"",
"]",
",",
"help",
"=",
"'Normalization method of the gradient.'",
")",
"group",
".",
"add_argument",
"(",
"'-accum_count'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"\"\"Accumulate gradient this many times.\n Approximately equivalent to updating\n batch_size * accum_count batches at once.\n Recommended for Transformer.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-valid_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10000",
",",
"help",
"=",
"'Perfom validation every X steps'",
")",
"group",
".",
"add_argument",
"(",
"'-valid_batch_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"32",
",",
"help",
"=",
"'Maximum batch size for validation'",
")",
"group",
".",
"add_argument",
"(",
"'-max_generator_batches'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"32",
",",
"help",
"=",
"\"\"\"Maximum batches of words in a sequence to run\n the generator on in parallel. Higher is faster, but\n uses more memory.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-train_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100000",
",",
"help",
"=",
"'Number of training steps'",
")",
"group",
".",
"add_argument",
"(",
"'-epochs'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'Deprecated epochs see train_steps'",
")",
"group",
".",
"add_argument",
"(",
"'-optim'",
",",
"default",
"=",
"'sgd'",
",",
"choices",
"=",
"[",
"'sgd'",
",",
"'adagrad'",
",",
"'adadelta'",
",",
"'adam'",
",",
"'sparseadam'",
"]",
",",
"help",
"=",
"\"\"\"Optimization method.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-adagrad_accumulator_init'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"\"\"Initializes the accumulator values in adagrad.\n Mirrors the initial_accumulator_value option\n in the tensorflow adagrad (use 0.1 for their default).\n \"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-max_grad_norm'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"5",
",",
"help",
"=",
"\"\"\"If the norm of the gradient vector exceeds this,\n renormalize it to have the norm equal to\n max_grad_norm\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-dropout'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.3",
",",
"help",
"=",
"\"Dropout probability; applied in LSTM stacks.\"",
")",
"group",
".",
"add_argument",
"(",
"'-truncated_decoder'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"\"\"\"Truncated bptt.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-adam_beta1'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.9",
",",
"help",
"=",
"\"\"\"The beta1 parameter used by Adam.\n Almost without exception a value of 0.9 is used in\n the literature, seemingly giving good results,\n so we would discourage changing this value from\n the default without due consideration.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-adam_beta2'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.999",
",",
"help",
"=",
"\"\"\"The beta2 parameter used by Adam.\n Typically a value of 0.999 is recommended, as this is\n the value suggested by the original paper describing\n Adam, and is also the value adopted in other frameworks\n such as Tensorflow and Kerras, i.e. see:\n https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer\n https://keras.io/optimizers/ .\n Whereas recently the paper \"Attention is All You Need\"\n suggested a value of 0.98 for beta2, this parameter may\n not work well for normal models / default\n baselines.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-label_smoothing'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.0",
",",
"help",
"=",
"\"\"\"Label smoothing value epsilon.\n Probabilities of all non-true labels\n will be smoothed by epsilon / (vocab_size - 1).\n Set to zero to turn off label smoothing.\n For more detailed information, see:\n https://arxiv.org/abs/1512.00567\"\"\"",
")",
"# learning rate",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Optimization- Rate'",
")",
"group",
".",
"add_argument",
"(",
"'-learning_rate'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"1.0",
",",
"help",
"=",
"\"\"\"Starting learning rate.\n Recommended settings: sgd = 1, adagrad = 0.1,\n adadelta = 1, adam = 0.001\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-learning_rate_decay'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.5",
",",
"help",
"=",
"\"\"\"If update_learning_rate, decay learning rate by\n this much if (i) perplexity does not decrease on the\n validation set or (ii) steps have gone past\n start_decay_steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-start_decay_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50000",
",",
"help",
"=",
"\"\"\"Start decaying every decay_steps after\n start_decay_steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-decay_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10000",
",",
"help",
"=",
"\"\"\"Decay every decay_steps\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-decay_method'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"choices",
"=",
"[",
"'noam'",
"]",
",",
"help",
"=",
"\"Use a custom decay rate.\"",
")",
"group",
".",
"add_argument",
"(",
"'-warmup_steps'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"4000",
",",
"help",
"=",
"\"\"\"Number of warmup steps for custom decay.\"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Logging'",
")",
"group",
".",
"add_argument",
"(",
"'-report_every'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"50",
",",
"help",
"=",
"\"Print stats at this interval.\"",
")",
"group",
".",
"add_argument",
"(",
"'-log_file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Output logs to a file under this path.\"",
")",
"group",
".",
"add_argument",
"(",
"'-exp_host'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Send logs to this crayon server.\"",
")",
"group",
".",
"add_argument",
"(",
"'-exp'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Name of the experiment for logging.\"",
")",
"# Use TensorboardX for visualization during training",
"group",
".",
"add_argument",
"(",
"'-tensorboard'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"\"\"Use tensorboardX for visualization during training.\n Must have the library tensorboardX.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"\"-tensorboard_log_dir\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"runs/onmt\"",
",",
"help",
"=",
"\"\"\"Log directory for Tensorboard.\n This is also the name of the run.\n \"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Speech'",
")",
"# Options most relevant to speech",
"group",
".",
"add_argument",
"(",
"'-sample_rate'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"16000",
",",
"help",
"=",
"\"Sample rate.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window_size'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".02",
",",
"help",
"=",
"\"Window size for spectrogram in seconds.\"",
")",
"# Option most relevant to image input",
"group",
".",
"add_argument",
"(",
"'-image_channel_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"choices",
"=",
"[",
"3",
",",
"1",
"]",
",",
"help",
"=",
"\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/opts.py#L245-L436 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/opts.py | python | translate_opts | (parser) | Translation / inference options | Translation / inference options | [
"Translation",
"/",
"inference",
"options"
] | def translate_opts(parser):
""" Translation / inference options """
group = parser.add_argument_group('Model')
group.add_argument('-model', dest='models', metavar='MODEL',
nargs='+', type=str, default=[], required=True,
help='Path to model .pt file(s). '
'Multiple models can be specified, '
'for ensemble decoding.')
group = parser.add_argument_group('Data')
group.add_argument('-data_type', default="text",
help="Type of the source input. Options: [text|img].")
group.add_argument('-src', required=True,
help="""Source sequence to decode (one line per
sequence)""")
group.add_argument('-src_dir', default="",
help='Source directory for image or audio files')
group.add_argument('-tgt',
help='True target sequence (optional)')
group.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
group.add_argument('-report_bleu', action='store_true',
help="""Report bleu score after translation,
call tools/multi-bleu.perl on command line""")
group.add_argument('-report_rouge', action='store_true',
help="""Report rouge 1/2/3/L/SU4 score after translation
call tools/test_rouge.py on command line""")
# Options most relevant to summarization.
group.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
group.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
group = parser.add_argument_group('Beam')
group.add_argument('-fast', action="store_true",
help="""Use fast beam search (some features may not be
supported!)""")
group.add_argument('-beam_size', type=int, default=5,
help='Beam size')
group.add_argument('-min_length', type=int, default=0,
help='Minimum prediction length')
group.add_argument('-max_length', type=int, default=100,
help='Maximum prediction length.')
group.add_argument('-max_sent_length', action=DeprecateAction,
help="Deprecated, use `-max_length` instead")
# Alpha and Beta values for Google Length + Coverage penalty
# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7
group.add_argument('-stepwise_penalty', action='store_true',
help="""Apply penalty at every decoding step.
Helpful for summary penalty.""")
group.add_argument('-length_penalty', default='none',
choices=['none', 'wu', 'avg'],
help="""Length Penalty to use.""")
group.add_argument('-coverage_penalty', default='none',
choices=['none', 'wu', 'summary'],
help="""Coverage Penalty to use.""")
group.add_argument('-alpha', type=float, default=0.,
help="""Google NMT length penalty parameter
(higher = longer generation)""")
group.add_argument('-beta', type=float, default=-0.,
help="""Coverage penalty parameter""")
group.add_argument('-block_ngram_repeat', type=int, default=0,
help='Block repetition of ngrams during decoding.')
group.add_argument('-ignore_when_blocking', nargs='+', type=str,
default=[],
help="""Ignore these strings when blocking repeats.
You want to block sentence delimiters.""")
group.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the
source token that had highest attention weight. If
phrase_table is provided, it will lookup the
identified source token and give the corresponding
target token. If it is not provided(or the identified
source token does not exist in the table) then it
will copy the source token""")
group = parser.add_argument_group('Logging')
group.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
group.add_argument('-log_file', type=str, default="",
help="Output logs to a file under this path.")
group.add_argument('-attn_debug', action="store_true",
help='Print best attn for each word')
group.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
group.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
group = parser.add_argument_group('Efficiency')
group.add_argument('-batch_size', type=int, default=30,
help='Batch size')
group.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
# Options most relevant to speech.
group = parser.add_argument_group('Speech')
group.add_argument('-sample_rate', type=int, default=16000,
help="Sample rate.")
group.add_argument('-window_size', type=float, default=.02,
help='Window size for spectrogram in seconds')
group.add_argument('-window_stride', type=float, default=.01,
help='Window stride for spectrogram in seconds')
group.add_argument('-window', default='hamming',
help='Window type for spectrogram generation')
# Option most relevant to image input
group.add_argument('-image_channel_size', type=int, default=3,
choices=[3, 1],
help="""Using grayscale image can training
model faster and smaller""") | [
"def",
"translate_opts",
"(",
"parser",
")",
":",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Model'",
")",
"group",
".",
"add_argument",
"(",
"'-model'",
",",
"dest",
"=",
"'models'",
",",
"metavar",
"=",
"'MODEL'",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"[",
"]",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Path to model .pt file(s). '",
"'Multiple models can be specified, '",
"'for ensemble decoding.'",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Data'",
")",
"group",
".",
"add_argument",
"(",
"'-data_type'",
",",
"default",
"=",
"\"text\"",
",",
"help",
"=",
"\"Type of the source input. Options: [text|img].\"",
")",
"group",
".",
"add_argument",
"(",
"'-src'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"\"\"Source sequence to decode (one line per\n sequence)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-src_dir'",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"'Source directory for image or audio files'",
")",
"group",
".",
"add_argument",
"(",
"'-tgt'",
",",
"help",
"=",
"'True target sequence (optional)'",
")",
"group",
".",
"add_argument",
"(",
"'-output'",
",",
"default",
"=",
"'pred.txt'",
",",
"help",
"=",
"\"\"\"Path to output the predictions (each line will\n be the decoded sequence\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-report_bleu'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Report bleu score after translation,\n call tools/multi-bleu.perl on command line\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-report_rouge'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Report rouge 1/2/3/L/SU4 score after translation\n call tools/test_rouge.py on command line\"\"\"",
")",
"# Options most relevant to summarization.",
"group",
".",
"add_argument",
"(",
"'-dynamic_dict'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Create dynamic dictionaries\"",
")",
"group",
".",
"add_argument",
"(",
"'-share_vocab'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Share source and target vocabulary\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Beam'",
")",
"group",
".",
"add_argument",
"(",
"'-fast'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"\"\"Use fast beam search (some features may not be\n supported!)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-beam_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"5",
",",
"help",
"=",
"'Beam size'",
")",
"group",
".",
"add_argument",
"(",
"'-min_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'Minimum prediction length'",
")",
"group",
".",
"add_argument",
"(",
"'-max_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100",
",",
"help",
"=",
"'Maximum prediction length.'",
")",
"group",
".",
"add_argument",
"(",
"'-max_sent_length'",
",",
"action",
"=",
"DeprecateAction",
",",
"help",
"=",
"\"Deprecated, use `-max_length` instead\"",
")",
"# Alpha and Beta values for Google Length + Coverage penalty",
"# Described here: https://arxiv.org/pdf/1609.08144.pdf, Section 7",
"group",
".",
"add_argument",
"(",
"'-stepwise_penalty'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Apply penalty at every decoding step.\n Helpful for summary penalty.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-length_penalty'",
",",
"default",
"=",
"'none'",
",",
"choices",
"=",
"[",
"'none'",
",",
"'wu'",
",",
"'avg'",
"]",
",",
"help",
"=",
"\"\"\"Length Penalty to use.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-coverage_penalty'",
",",
"default",
"=",
"'none'",
",",
"choices",
"=",
"[",
"'none'",
",",
"'wu'",
",",
"'summary'",
"]",
",",
"help",
"=",
"\"\"\"Coverage Penalty to use.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-alpha'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.",
",",
"help",
"=",
"\"\"\"Google NMT length penalty parameter\n (higher = longer generation)\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-beta'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"-",
"0.",
",",
"help",
"=",
"\"\"\"Coverage penalty parameter\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-block_ngram_repeat'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'Block repetition of ngrams during decoding.'",
")",
"group",
".",
"add_argument",
"(",
"'-ignore_when_blocking'",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"[",
"]",
",",
"help",
"=",
"\"\"\"Ignore these strings when blocking repeats.\n You want to block sentence delimiters.\"\"\"",
")",
"group",
".",
"add_argument",
"(",
"'-replace_unk'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"\"\"Replace the generated UNK tokens with the\n source token that had highest attention weight. If\n phrase_table is provided, it will lookup the\n identified source token and give the corresponding\n target token. If it is not provided(or the identified\n source token does not exist in the table) then it\n will copy the source token\"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Logging'",
")",
"group",
".",
"add_argument",
"(",
"'-verbose'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'Print scores and predictions for each sentence'",
")",
"group",
".",
"add_argument",
"(",
"'-log_file'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"\"Output logs to a file under this path.\"",
")",
"group",
".",
"add_argument",
"(",
"'-attn_debug'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"'Print best attn for each word'",
")",
"group",
".",
"add_argument",
"(",
"'-dump_beam'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"\"",
",",
"help",
"=",
"'File to dump beam information to.'",
")",
"group",
".",
"add_argument",
"(",
"'-n_best'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"\"\"If verbose is set, will output the n_best\n decoded sentences\"\"\"",
")",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Efficiency'",
")",
"group",
".",
"add_argument",
"(",
"'-batch_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"30",
",",
"help",
"=",
"'Batch size'",
")",
"group",
".",
"add_argument",
"(",
"'-gpu'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"\"Device to run on\"",
")",
"# Options most relevant to speech.",
"group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Speech'",
")",
"group",
".",
"add_argument",
"(",
"'-sample_rate'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"16000",
",",
"help",
"=",
"\"Sample rate.\"",
")",
"group",
".",
"add_argument",
"(",
"'-window_size'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".02",
",",
"help",
"=",
"'Window size for spectrogram in seconds'",
")",
"group",
".",
"add_argument",
"(",
"'-window_stride'",
",",
"type",
"=",
"float",
",",
"default",
"=",
".01",
",",
"help",
"=",
"'Window stride for spectrogram in seconds'",
")",
"group",
".",
"add_argument",
"(",
"'-window'",
",",
"default",
"=",
"'hamming'",
",",
"help",
"=",
"'Window type for spectrogram generation'",
")",
"# Option most relevant to image input",
"group",
".",
"add_argument",
"(",
"'-image_channel_size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"3",
",",
"choices",
"=",
"[",
"3",
",",
"1",
"]",
",",
"help",
"=",
"\"\"\"Using grayscale image can training\n model faster and smaller\"\"\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/opts.py#L439-L553 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/opts.py | python | add_md_help_argument | (parser) | md help parser | md help parser | [
"md",
"help",
"parser"
] | def add_md_help_argument(parser):
""" md help parser """
parser.add_argument('-md', action=MarkdownHelpAction,
help='print Markdown-formatted help text and exit.') | [
"def",
"add_md_help_argument",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'-md'",
",",
"action",
"=",
"MarkdownHelpAction",
",",
"help",
"=",
"'print Markdown-formatted help text and exit.'",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/opts.py#L556-L559 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | RNNDecoderBase.forward | (self, tgt, memory_bank, state, memory_lengths=None,
step=None,sent_encoder=None,src_sents=None) | return decoder_outputs, state, attns | Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`. | Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`. | [
"Args",
":",
"tgt",
"(",
"LongTensor",
")",
":",
"sequences",
"of",
"padded",
"tokens",
"[",
"tgt_len",
"x",
"batch",
"x",
"nfeats",
"]",
".",
"memory_bank",
"(",
"FloatTensor",
")",
":",
"vectors",
"from",
"the",
"encoder",
"[",
"src_len",
"x",
"batch",
"x",
"hidden",
"]",
".",
"state",
"(",
":",
"obj",
":",
"onmt",
".",
"models",
".",
"DecoderState",
")",
":",
"decoder",
"state",
"object",
"to",
"initialize",
"the",
"decoder",
"memory_lengths",
"(",
"LongTensor",
")",
":",
"the",
"padded",
"source",
"lengths",
"[",
"batch",
"]",
".",
"Returns",
":",
"(",
"FloatTensor",
":",
"obj",
":",
"onmt",
".",
"Models",
".",
"DecoderState",
"FloatTensor",
")",
":",
"*",
"decoder_outputs",
":",
"output",
"from",
"the",
"decoder",
"(",
"after",
"attn",
")",
"[",
"tgt_len",
"x",
"batch",
"x",
"hidden",
"]",
".",
"*",
"decoder_state",
":",
"final",
"hidden",
"state",
"from",
"the",
"decoder",
"*",
"attns",
":",
"distribution",
"over",
"src",
"at",
"each",
"tgt",
"[",
"tgt_len",
"x",
"batch",
"x",
"src_len",
"]",
"."
] | def forward(self, tgt, memory_bank, state, memory_lengths=None,
step=None,sent_encoder=None,src_sents=None):
"""
Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`.
"""
# Check
assert isinstance(state, RNNDecoderState)
# tgt.size() returns tgt length and batch
_, tgt_batch, _ = tgt.size()
_, memory_batch, _ = memory_bank.size()
aeq(tgt_batch, memory_batch)
# END
# 23333: TODO I changed this return value 'sent_decoder'
# Run the forward pass of the RNN.
decoder_final, decoder_outputs, attns = self._run_forward_pass(
tgt, memory_bank, state, memory_lengths=memory_lengths,sent_encoder=sent_encoder,src_sents=src_sents)
# Update the state with the result.
final_output = decoder_outputs[-1]
coverage = None
if "coverage" in attns:
coverage = attns["coverage"][-1].unsqueeze(0)
state.update_state(decoder_final, final_output.unsqueeze(0), coverage)
# Concatenates sequence of tensors along a new dimension.
# NOTE: v0.3 to 0.4: decoder_outputs / attns[*] may not be list
# (in particular in case of SRU) it was not raising error in 0.3
# since stack(Variable) was allowed.
# In 0.4, SRU returns a tensor that shouldn't be stacke
if type(decoder_outputs) == list:
decoder_outputs = torch.stack(decoder_outputs)
for k in attns:
if type(attns[k]) == list:
attns[k] = torch.stack(attns[k])
return decoder_outputs, state, attns | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
",",
"sent_encoder",
"=",
"None",
",",
"src_sents",
"=",
"None",
")",
":",
"# Check",
"assert",
"isinstance",
"(",
"state",
",",
"RNNDecoderState",
")",
"# tgt.size() returns tgt length and batch",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"_",
",",
"memory_batch",
",",
"_",
"=",
"memory_bank",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"memory_batch",
")",
"# END",
"# 23333: TODO I changed this return value 'sent_decoder'",
"# Run the forward pass of the RNN.",
"decoder_final",
",",
"decoder_outputs",
",",
"attns",
"=",
"self",
".",
"_run_forward_pass",
"(",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"memory_lengths",
",",
"sent_encoder",
"=",
"sent_encoder",
",",
"src_sents",
"=",
"src_sents",
")",
"# Update the state with the result.",
"final_output",
"=",
"decoder_outputs",
"[",
"-",
"1",
"]",
"coverage",
"=",
"None",
"if",
"\"coverage\"",
"in",
"attns",
":",
"coverage",
"=",
"attns",
"[",
"\"coverage\"",
"]",
"[",
"-",
"1",
"]",
".",
"unsqueeze",
"(",
"0",
")",
"state",
".",
"update_state",
"(",
"decoder_final",
",",
"final_output",
".",
"unsqueeze",
"(",
"0",
")",
",",
"coverage",
")",
"# Concatenates sequence of tensors along a new dimension.",
"# NOTE: v0.3 to 0.4: decoder_outputs / attns[*] may not be list",
"# (in particular in case of SRU) it was not raising error in 0.3",
"# since stack(Variable) was allowed.",
"# In 0.4, SRU returns a tensor that shouldn't be stacke",
"if",
"type",
"(",
"decoder_outputs",
")",
"==",
"list",
":",
"decoder_outputs",
"=",
"torch",
".",
"stack",
"(",
"decoder_outputs",
")",
"for",
"k",
"in",
"attns",
":",
"if",
"type",
"(",
"attns",
"[",
"k",
"]",
")",
"==",
"list",
":",
"attns",
"[",
"k",
"]",
"=",
"torch",
".",
"stack",
"(",
"attns",
"[",
"k",
"]",
")",
"return",
"decoder_outputs",
",",
"state",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L115-L170 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | RNNDecoderBase.init_decoder_state | (self, src, memory_bank, encoder_final,
with_cache=False) | Init decoder state with last state of the encoder | Init decoder state with last state of the encoder | [
"Init",
"decoder",
"state",
"with",
"last",
"state",
"of",
"the",
"encoder"
] | def init_decoder_state(self, src, memory_bank, encoder_final,
with_cache=False):
""" Init decoder state with last state of the encoder """
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat([hidden[0:hidden.size(0):2],
hidden[1:hidden.size(0):2]], 2)
return hidden
if isinstance(encoder_final, tuple): # LSTM
return RNNDecoderState(self.hidden_size,
tuple([_fix_enc_hidden(enc_hid)
for enc_hid in encoder_final]))
else: # GRU
return RNNDecoderState(self.hidden_size,
_fix_enc_hidden(encoder_final)) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"src",
",",
"memory_bank",
",",
"encoder_final",
",",
"with_cache",
"=",
"False",
")",
":",
"def",
"_fix_enc_hidden",
"(",
"hidden",
")",
":",
"# The encoder hidden is (layers*directions) x batch x dim.",
"# We need to convert it to layers x batch x (directions*dim).",
"if",
"self",
".",
"bidirectional_encoder",
":",
"hidden",
"=",
"torch",
".",
"cat",
"(",
"[",
"hidden",
"[",
"0",
":",
"hidden",
".",
"size",
"(",
"0",
")",
":",
"2",
"]",
",",
"hidden",
"[",
"1",
":",
"hidden",
".",
"size",
"(",
"0",
")",
":",
"2",
"]",
"]",
",",
"2",
")",
"return",
"hidden",
"if",
"isinstance",
"(",
"encoder_final",
",",
"tuple",
")",
":",
"# LSTM",
"return",
"RNNDecoderState",
"(",
"self",
".",
"hidden_size",
",",
"tuple",
"(",
"[",
"_fix_enc_hidden",
"(",
"enc_hid",
")",
"for",
"enc_hid",
"in",
"encoder_final",
"]",
")",
")",
"else",
":",
"# GRU",
"return",
"RNNDecoderState",
"(",
"self",
".",
"hidden_size",
",",
"_fix_enc_hidden",
"(",
"encoder_final",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L172-L189 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | StdRNNDecoder._run_forward_pass | (self, tgt, memory_bank, state, memory_lengths=None) | return decoder_final, decoder_outputs, attns | Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder. | Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder. | [
"Private",
"helper",
"for",
"running",
"the",
"specific",
"RNN",
"forward",
"pass",
".",
"Must",
"be",
"overriden",
"by",
"all",
"subclasses",
".",
"Args",
":",
"tgt",
"(",
"LongTensor",
")",
":",
"a",
"sequence",
"of",
"input",
"tokens",
"tensors",
"[",
"len",
"x",
"batch",
"x",
"nfeats",
"]",
".",
"memory_bank",
"(",
"FloatTensor",
")",
":",
"output",
"(",
"tensor",
"sequence",
")",
"from",
"the",
"encoder",
"RNN",
"of",
"size",
"(",
"src_len",
"x",
"batch",
"x",
"hidden_size",
")",
".",
"state",
"(",
"FloatTensor",
")",
":",
"hidden",
"state",
"from",
"the",
"encoder",
"RNN",
"for",
"initializing",
"the",
"decoder",
".",
"memory_lengths",
"(",
"LongTensor",
")",
":",
"the",
"source",
"memory_bank",
"lengths",
".",
"Returns",
":",
"decoder_final",
"(",
"Tensor",
")",
":",
"final",
"hidden",
"state",
"from",
"the",
"decoder",
".",
"decoder_outputs",
"(",
"[",
"FloatTensor",
"]",
")",
":",
"an",
"array",
"of",
"output",
"of",
"every",
"time",
"step",
"from",
"the",
"decoder",
".",
"attns",
"(",
"dict",
"of",
"(",
"str",
"[",
"FloatTensor",
"]",
")",
":",
"a",
"dictionary",
"of",
"different",
"type",
"of",
"attention",
"Tensor",
"array",
"of",
"every",
"time",
"step",
"from",
"the",
"decoder",
"."
] | def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder.
"""
assert not self._copy # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
# Initialize local and return variables.
attns = {}
emb = self.embeddings(tgt)
# Run the forward pass of the RNN.
if isinstance(self.rnn, nn.GRU):
rnn_output, decoder_final = self.rnn(emb, state.hidden[0])
else:
rnn_output, decoder_final = self.rnn(emb, state.hidden)
# Check
tgt_len, tgt_batch, _ = tgt.size()
output_len, output_batch, _ = rnn_output.size()
aeq(tgt_len, output_len)
aeq(tgt_batch, output_batch)
# END
# Calculate the attention.
decoder_outputs, p_attn = self.attn(
rnn_output.transpose(0, 1).contiguous(),
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths
)
attns["std"] = p_attn
# Calculate the context gate.
if self.context_gate is not None:
decoder_outputs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
decoder_outputs.view(-1, decoder_outputs.size(2))
)
decoder_outputs = \
decoder_outputs.view(tgt_len, tgt_batch, self.hidden_size)
decoder_outputs = self.dropout(decoder_outputs)
return decoder_final, decoder_outputs, attns | [
"def",
"_run_forward_pass",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
")",
":",
"assert",
"not",
"self",
".",
"_copy",
"# TODO, no support yet.",
"assert",
"not",
"self",
".",
"_coverage",
"# TODO, no support yet.",
"# Initialize local and return variables.",
"attns",
"=",
"{",
"}",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"# Run the forward pass of the RNN.",
"if",
"isinstance",
"(",
"self",
".",
"rnn",
",",
"nn",
".",
"GRU",
")",
":",
"rnn_output",
",",
"decoder_final",
"=",
"self",
".",
"rnn",
"(",
"emb",
",",
"state",
".",
"hidden",
"[",
"0",
"]",
")",
"else",
":",
"rnn_output",
",",
"decoder_final",
"=",
"self",
".",
"rnn",
"(",
"emb",
",",
"state",
".",
"hidden",
")",
"# Check",
"tgt_len",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"output_len",
",",
"output_batch",
",",
"_",
"=",
"rnn_output",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_len",
",",
"output_len",
")",
"aeq",
"(",
"tgt_batch",
",",
"output_batch",
")",
"# END",
"# Calculate the attention.",
"decoder_outputs",
",",
"p_attn",
"=",
"self",
".",
"attn",
"(",
"rnn_output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"memory_lengths",
"=",
"memory_lengths",
")",
"attns",
"[",
"\"std\"",
"]",
"=",
"p_attn",
"# Calculate the context gate.",
"if",
"self",
".",
"context_gate",
"is",
"not",
"None",
":",
"decoder_outputs",
"=",
"self",
".",
"context_gate",
"(",
"emb",
".",
"view",
"(",
"-",
"1",
",",
"emb",
".",
"size",
"(",
"2",
")",
")",
",",
"rnn_output",
".",
"view",
"(",
"-",
"1",
",",
"rnn_output",
".",
"size",
"(",
"2",
")",
")",
",",
"decoder_outputs",
".",
"view",
"(",
"-",
"1",
",",
"decoder_outputs",
".",
"size",
"(",
"2",
")",
")",
")",
"decoder_outputs",
"=",
"decoder_outputs",
".",
"view",
"(",
"tgt_len",
",",
"tgt_batch",
",",
"self",
".",
"hidden_size",
")",
"decoder_outputs",
"=",
"self",
".",
"dropout",
"(",
"decoder_outputs",
")",
"return",
"decoder_final",
",",
"decoder_outputs",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L208-L270 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | StdRNNDecoder._input_size | (self) | return self.embeddings.embedding_size | Private helper returning the number of expected features. | Private helper returning the number of expected features. | [
"Private",
"helper",
"returning",
"the",
"number",
"of",
"expected",
"features",
"."
] | def _input_size(self):
"""
Private helper returning the number of expected features.
"""
return self.embeddings.embedding_size | [
"def",
"_input_size",
"(",
"self",
")",
":",
"return",
"self",
".",
"embeddings",
".",
"embedding_size"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L277-L281 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | InputFeedRNNDecoder._run_mmr_attention | (self,sent_encoder,sent_decoder,src_sents,input_step) | return mmr_among_words | This is the attention version, where in the encoding part we use self-attention,
the score is the max value of the attention weight
# sent_encoder: size (sent_len=9,batch=2,dim=512)
# sent_decoder: size (sent_len=1,batch=2,dim=512)
# src_sents: size (batch=2,sent_len=9)
function to calculate mmr
:param sent_encoder:
:param sent_decoder:
:param src_sents:
:return: | This is the attention version, where in the encoding part we use self-attention,
the score is the max value of the attention weight
# sent_encoder: size (sent_len=9,batch=2,dim=512)
# sent_decoder: size (sent_len=1,batch=2,dim=512)
# src_sents: size (batch=2,sent_len=9)
function to calculate mmr
:param sent_encoder:
:param sent_decoder:
:param src_sents:
:return: | [
"This",
"is",
"the",
"attention",
"version",
"where",
"in",
"the",
"encoding",
"part",
"we",
"use",
"self",
"-",
"attention",
"the",
"score",
"is",
"the",
"max",
"value",
"of",
"the",
"attention",
"weight",
"#",
"sent_encoder",
":",
"size",
"(",
"sent_len",
"=",
"9",
"batch",
"=",
"2",
"dim",
"=",
"512",
")",
"#",
"sent_decoder",
":",
"size",
"(",
"sent_len",
"=",
"1",
"batch",
"=",
"2",
"dim",
"=",
"512",
")",
"#",
"src_sents",
":",
"size",
"(",
"batch",
"=",
"2",
"sent_len",
"=",
"9",
")",
"function",
"to",
"calculate",
"mmr",
":",
"param",
"sent_encoder",
":",
":",
"param",
"sent_decoder",
":",
":",
"param",
"src_sents",
":",
":",
"return",
":"
] | def _run_mmr_attention(self,sent_encoder,sent_decoder,src_sents,input_step):
'''
This is the attention version, where in the encoding part we use self-attention,
the score is the max value of the attention weight
# sent_encoder: size (sent_len=9,batch=2,dim=512)
# sent_decoder: size (sent_len=1,batch=2,dim=512)
# src_sents: size (batch=2,sent_len=9)
function to calculate mmr
:param sent_encoder:
:param sent_decoder:
:param src_sents:
:return:
'''
pdist = nn.PairwiseDistance(p=2)
sent_decoder=sent_decoder.permute(1,0,2) # (2,1,512)
scores =[]
# define sent matrix and current vector distance as the Euclidean distance
for sent in sent_encoder:
# distance: https://pytorch.org/docs/stable/_modules/torch/nn/modules/distance.html
sim2 = 0.5 * torch.sum(pdist(sent_encoder.permute(1,0,2),sent.unsqueeze(1)),1).unsqueeze(1) # this is also similarity func, can be another for-loop
sim1 = torch.bmm(self.mmr_W(sent_decoder), sent.unsqueeze(2)).squeeze(2) # (2,1)
scores.append(0.5*(sim1 - sim2))
sent_ranking_att = torch.t(torch.cat(scores,1)) #(sent_len=9,batch_size)
sent_ranking_att = torch.softmax(sent_ranking_att, dim=0).permute(1,0) #(sent_len=9,batch_size)
# scores is a list of score (sent_len=9, tensor shape (batch_size, 1))
mmr_among_words = [] # should be (batch=2,input_step=200)
for batch_id in range(sent_ranking_att.size()[0]):
# iterate each batch, create zero weight on the input steps
# mmr= torch.zeros([input_step], dtype=torch.float32).cuda()
tmp = []
for id,position in enumerate(src_sents[batch_id]):
for x in range(position):
tmp.append(sent_ranking_att[batch_id][id])
mmr = torch.stack(tmp) # make to 1-d
if len(mmr) < input_step:
tmp = torch.zeros(input_step - len(mmr)).float().cuda()
# for x in range(input_step-len(mmr)):
mmr = torch.cat((mmr, tmp), 0)
else:
mmr = mmr[:input_step]
mmr_among_words.append(mmr.unsqueeze(0))
mmr_among_words = torch.cat(mmr_among_words,0)
# shape: (batch=2, input_step=200)
return mmr_among_words | [
"def",
"_run_mmr_attention",
"(",
"self",
",",
"sent_encoder",
",",
"sent_decoder",
",",
"src_sents",
",",
"input_step",
")",
":",
"pdist",
"=",
"nn",
".",
"PairwiseDistance",
"(",
"p",
"=",
"2",
")",
"sent_decoder",
"=",
"sent_decoder",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
"# (2,1,512)",
"scores",
"=",
"[",
"]",
"# define sent matrix and current vector distance as the Euclidean distance",
"for",
"sent",
"in",
"sent_encoder",
":",
"# distance: https://pytorch.org/docs/stable/_modules/torch/nn/modules/distance.html",
"sim2",
"=",
"0.5",
"*",
"torch",
".",
"sum",
"(",
"pdist",
"(",
"sent_encoder",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
",",
"sent",
".",
"unsqueeze",
"(",
"1",
")",
")",
",",
"1",
")",
".",
"unsqueeze",
"(",
"1",
")",
"# this is also similarity func, can be another for-loop",
"sim1",
"=",
"torch",
".",
"bmm",
"(",
"self",
".",
"mmr_W",
"(",
"sent_decoder",
")",
",",
"sent",
".",
"unsqueeze",
"(",
"2",
")",
")",
".",
"squeeze",
"(",
"2",
")",
"# (2,1)",
"scores",
".",
"append",
"(",
"0.5",
"*",
"(",
"sim1",
"-",
"sim2",
")",
")",
"sent_ranking_att",
"=",
"torch",
".",
"t",
"(",
"torch",
".",
"cat",
"(",
"scores",
",",
"1",
")",
")",
"#(sent_len=9,batch_size)",
"sent_ranking_att",
"=",
"torch",
".",
"softmax",
"(",
"sent_ranking_att",
",",
"dim",
"=",
"0",
")",
".",
"permute",
"(",
"1",
",",
"0",
")",
"#(sent_len=9,batch_size)",
"# scores is a list of score (sent_len=9, tensor shape (batch_size, 1))",
"mmr_among_words",
"=",
"[",
"]",
"# should be (batch=2,input_step=200)",
"for",
"batch_id",
"in",
"range",
"(",
"sent_ranking_att",
".",
"size",
"(",
")",
"[",
"0",
"]",
")",
":",
"# iterate each batch, create zero weight on the input steps",
"# mmr= torch.zeros([input_step], dtype=torch.float32).cuda()",
"tmp",
"=",
"[",
"]",
"for",
"id",
",",
"position",
"in",
"enumerate",
"(",
"src_sents",
"[",
"batch_id",
"]",
")",
":",
"for",
"x",
"in",
"range",
"(",
"position",
")",
":",
"tmp",
".",
"append",
"(",
"sent_ranking_att",
"[",
"batch_id",
"]",
"[",
"id",
"]",
")",
"mmr",
"=",
"torch",
".",
"stack",
"(",
"tmp",
")",
"# make to 1-d",
"if",
"len",
"(",
"mmr",
")",
"<",
"input_step",
":",
"tmp",
"=",
"torch",
".",
"zeros",
"(",
"input_step",
"-",
"len",
"(",
"mmr",
")",
")",
".",
"float",
"(",
")",
".",
"cuda",
"(",
")",
"# for x in range(input_step-len(mmr)):",
"mmr",
"=",
"torch",
".",
"cat",
"(",
"(",
"mmr",
",",
"tmp",
")",
",",
"0",
")",
"else",
":",
"mmr",
"=",
"mmr",
"[",
":",
"input_step",
"]",
"mmr_among_words",
".",
"append",
"(",
"mmr",
".",
"unsqueeze",
"(",
"0",
")",
")",
"mmr_among_words",
"=",
"torch",
".",
"cat",
"(",
"mmr_among_words",
",",
"0",
")",
"# shape: (batch=2, input_step=200)",
"return",
"mmr_among_words"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L324-L382 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | InputFeedRNNDecoder._run_forward_pass | (self, tgt, memory_bank, state, memory_lengths=None,sent_encoder=None,src_sents=None) | return hidden, decoder_outputs, attns | See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns["mmr"] = []. | See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns["mmr"] = []. | [
"See",
"StdRNNDecoder",
".",
"_run_forward_pass",
"()",
"for",
"description",
"of",
"arguments",
"and",
"return",
"values",
".",
"TODO",
":",
"added",
"a",
"new",
"param",
":",
"sent_encoder",
"from",
"model",
".",
"py",
"this",
"is",
"the",
"sentence",
"matrix",
";",
"add",
"attns",
"[",
"mmr",
"]",
"=",
"[]",
"."
] | def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None,sent_encoder=None,src_sents=None):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns["mmr"] = [].
"""
# Additional args check.
input_feed = state.input_feed.squeeze(0)
#print("input feed size: {}\n".format(input_feed.size()))
input_feed_batch, _ = input_feed.size()
_, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
# Initialize local and return variables.
decoder_outputs = []
attns = {"std": []}
attns["mmr"] = []
if self._copy:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
hidden = state.hidden
coverage = state.coverage.squeeze(0) \
if state.coverage is not None else None
# Input feed concatenates hidden state with
# input at every time step.
#print("emb size: {}\n".format(emb.size()));exit()
for _, emb_t in enumerate(emb.split(1)):
# for each output time step in the loop
emb_t = emb_t.squeeze(0)
decoder_input = torch.cat([emb_t, input_feed], 1)
# TODO: the following is where we get attention!
rnn_output, hidden = self.rnn(decoder_input, hidden)
decoder_output, p_attn = self.attn(
rnn_output,
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths)
# p_attn: size (batch=2,input_step=200)
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
decoder_outputs += [decoder_output]
attns["std"] += [p_attn]
# Update the coverage attention.
if self._coverage:
coverage = coverage + p_attn \
if coverage is not None else p_attn
attns["coverage"] += [coverage]
# Run the forward pass of the copy attention layer.
#
if self._copy and not self._reuse_copy_attn:
_, copy_attn = self.copy_attn(decoder_output, memory_bank.transpose(0, 1))
attns["copy"] += [copy_attn]
elif self._copy:
attns["copy"] = attns["std"] # attns["copy"] is a list of tensor for each output step=51, each size: [batch_size=2, input_step=200]
# 2333: TODO : the sentence representation for decoder
sent_decoder = decoder_outputs[-1].unsqueeze(0) # shape: (1, batch_size=2,dim=512)
# Return result.
# 2333: TODO: attns['std'] is a list of tensors, length is output_step, each tensor shape is (batch=2,input_step=200)
# 2333: TODO: compute mmr attention here:
print ('Now..')
mmr_among_words = self._run_mmr_attention(sent_encoder, sent_decoder, src_sents,attns["std"][0].size()[-1])
# 2333: TODO: bring mmr to attention...
for output_step in attns["std"]:
attention_weight = output_step
# pairwise multiplication
attention_weight = torch.mul(mmr_among_words,attention_weight)
attns["mmr"].append(attention_weight.cuda())
# pdb.set_trace()
attns["std"] = attns["mmr"]
# decoder_outputs is a list of tensors for each output step=51, each tensor: (batch_size=2,dim=512)
return hidden, decoder_outputs, attns | [
"def",
"_run_forward_pass",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"sent_encoder",
"=",
"None",
",",
"src_sents",
"=",
"None",
")",
":",
"# Additional args check.",
"input_feed",
"=",
"state",
".",
"input_feed",
".",
"squeeze",
"(",
"0",
")",
"#print(\"input feed size: {}\\n\".format(input_feed.size()))",
"input_feed_batch",
",",
"_",
"=",
"input_feed",
".",
"size",
"(",
")",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"input_feed_batch",
")",
"# END Additional args check.",
"# Initialize local and return variables.",
"decoder_outputs",
"=",
"[",
"]",
"attns",
"=",
"{",
"\"std\"",
":",
"[",
"]",
"}",
"attns",
"[",
"\"mmr\"",
"]",
"=",
"[",
"]",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"[",
"]",
"if",
"self",
".",
"_coverage",
":",
"attns",
"[",
"\"coverage\"",
"]",
"=",
"[",
"]",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"assert",
"emb",
".",
"dim",
"(",
")",
"==",
"3",
"# len x batch x embedding_dim",
"hidden",
"=",
"state",
".",
"hidden",
"coverage",
"=",
"state",
".",
"coverage",
".",
"squeeze",
"(",
"0",
")",
"if",
"state",
".",
"coverage",
"is",
"not",
"None",
"else",
"None",
"# Input feed concatenates hidden state with",
"# input at every time step.",
"#print(\"emb size: {}\\n\".format(emb.size()));exit()",
"for",
"_",
",",
"emb_t",
"in",
"enumerate",
"(",
"emb",
".",
"split",
"(",
"1",
")",
")",
":",
"# for each output time step in the loop",
"emb_t",
"=",
"emb_t",
".",
"squeeze",
"(",
"0",
")",
"decoder_input",
"=",
"torch",
".",
"cat",
"(",
"[",
"emb_t",
",",
"input_feed",
"]",
",",
"1",
")",
"# TODO: the following is where we get attention!",
"rnn_output",
",",
"hidden",
"=",
"self",
".",
"rnn",
"(",
"decoder_input",
",",
"hidden",
")",
"decoder_output",
",",
"p_attn",
"=",
"self",
".",
"attn",
"(",
"rnn_output",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"memory_lengths",
"=",
"memory_lengths",
")",
"# p_attn: size (batch=2,input_step=200)",
"if",
"self",
".",
"context_gate",
"is",
"not",
"None",
":",
"# TODO: context gate should be employed",
"# instead of second RNN transform.",
"decoder_output",
"=",
"self",
".",
"context_gate",
"(",
"decoder_input",
",",
"rnn_output",
",",
"decoder_output",
")",
"decoder_output",
"=",
"self",
".",
"dropout",
"(",
"decoder_output",
")",
"input_feed",
"=",
"decoder_output",
"decoder_outputs",
"+=",
"[",
"decoder_output",
"]",
"attns",
"[",
"\"std\"",
"]",
"+=",
"[",
"p_attn",
"]",
"# Update the coverage attention.",
"if",
"self",
".",
"_coverage",
":",
"coverage",
"=",
"coverage",
"+",
"p_attn",
"if",
"coverage",
"is",
"not",
"None",
"else",
"p_attn",
"attns",
"[",
"\"coverage\"",
"]",
"+=",
"[",
"coverage",
"]",
"# Run the forward pass of the copy attention layer.",
"#",
"if",
"self",
".",
"_copy",
"and",
"not",
"self",
".",
"_reuse_copy_attn",
":",
"_",
",",
"copy_attn",
"=",
"self",
".",
"copy_attn",
"(",
"decoder_output",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
")",
"attns",
"[",
"\"copy\"",
"]",
"+=",
"[",
"copy_attn",
"]",
"elif",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"attns",
"[",
"\"std\"",
"]",
"# attns[\"copy\"] is a list of tensor for each output step=51, each size: [batch_size=2, input_step=200]",
"# 2333: TODO : the sentence representation for decoder",
"sent_decoder",
"=",
"decoder_outputs",
"[",
"-",
"1",
"]",
".",
"unsqueeze",
"(",
"0",
")",
"# shape: (1, batch_size=2,dim=512)",
"# Return result.",
"# 2333: TODO: attns['std'] is a list of tensors, length is output_step, each tensor shape is (batch=2,input_step=200)",
"# 2333: TODO: compute mmr attention here:",
"print",
"(",
"'Now..'",
")",
"mmr_among_words",
"=",
"self",
".",
"_run_mmr_attention",
"(",
"sent_encoder",
",",
"sent_decoder",
",",
"src_sents",
",",
"attns",
"[",
"\"std\"",
"]",
"[",
"0",
"]",
".",
"size",
"(",
")",
"[",
"-",
"1",
"]",
")",
"# 2333: TODO: bring mmr to attention...",
"for",
"output_step",
"in",
"attns",
"[",
"\"std\"",
"]",
":",
"attention_weight",
"=",
"output_step",
"# pairwise multiplication",
"attention_weight",
"=",
"torch",
".",
"mul",
"(",
"mmr_among_words",
",",
"attention_weight",
")",
"attns",
"[",
"\"mmr\"",
"]",
".",
"append",
"(",
"attention_weight",
".",
"cuda",
"(",
")",
")",
"# pdb.set_trace()",
"attns",
"[",
"\"std\"",
"]",
"=",
"attns",
"[",
"\"mmr\"",
"]",
"# decoder_outputs is a list of tensors for each output step=51, each tensor: (batch_size=2,dim=512)",
"return",
"hidden",
",",
"decoder_outputs",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L385-L490 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | InputFeedRNNDecoder._input_size | (self) | return self.embeddings.embedding_size + self.hidden_size | Using input feed by concatenating input with attention vectors. | Using input feed by concatenating input with attention vectors. | [
"Using",
"input",
"feed",
"by",
"concatenating",
"input",
"with",
"attention",
"vectors",
"."
] | def _input_size(self):
"""
Using input feed by concatenating input with attention vectors.
"""
return self.embeddings.embedding_size + self.hidden_size | [
"def",
"_input_size",
"(",
"self",
")",
":",
"return",
"self",
".",
"embeddings",
".",
"embedding_size",
"+",
"self",
".",
"hidden_size"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L504-L508 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | DecoderState.detach | (self) | Need to document this | Need to document this | [
"Need",
"to",
"document",
"this"
] | def detach(self):
""" Need to document this """
self.hidden = tuple([_.detach() for _ in self.hidden])
self.input_feed = self.input_feed.detach() | [
"def",
"detach",
"(",
"self",
")",
":",
"self",
".",
"hidden",
"=",
"tuple",
"(",
"[",
"_",
".",
"detach",
"(",
")",
"for",
"_",
"in",
"self",
".",
"hidden",
"]",
")",
"self",
".",
"input_feed",
"=",
"self",
".",
"input_feed",
".",
"detach",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L519-L522 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | DecoderState.beam_update | (self, idx, positions, beam_size) | Need to document this | Need to document this | [
"Need",
"to",
"document",
"this"
] | def beam_update(self, idx, positions, beam_size):
""" Need to document this """
for e in self._all:
sizes = e.size()
br = sizes[1]
if len(sizes) == 3:
sent_states = e.view(sizes[0], beam_size, br // beam_size,
sizes[2])[:, :, idx]
else:
sent_states = e.view(sizes[0], beam_size,
br // beam_size,
sizes[2],
sizes[3])[:, :, idx]
sent_states.data.copy_(
sent_states.data.index_select(1, positions)) | [
"def",
"beam_update",
"(",
"self",
",",
"idx",
",",
"positions",
",",
"beam_size",
")",
":",
"for",
"e",
"in",
"self",
".",
"_all",
":",
"sizes",
"=",
"e",
".",
"size",
"(",
")",
"br",
"=",
"sizes",
"[",
"1",
"]",
"if",
"len",
"(",
"sizes",
")",
"==",
"3",
":",
"sent_states",
"=",
"e",
".",
"view",
"(",
"sizes",
"[",
"0",
"]",
",",
"beam_size",
",",
"br",
"//",
"beam_size",
",",
"sizes",
"[",
"2",
"]",
")",
"[",
":",
",",
":",
",",
"idx",
"]",
"else",
":",
"sent_states",
"=",
"e",
".",
"view",
"(",
"sizes",
"[",
"0",
"]",
",",
"beam_size",
",",
"br",
"//",
"beam_size",
",",
"sizes",
"[",
"2",
"]",
",",
"sizes",
"[",
"3",
"]",
")",
"[",
":",
",",
":",
",",
"idx",
"]",
"sent_states",
".",
"data",
".",
"copy_",
"(",
"sent_states",
".",
"data",
".",
"index_select",
"(",
"1",
",",
"positions",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L524-L539 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | RNNDecoderState.__init__ | (self, hidden_size, rnnstate) | Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim). | Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim). | [
"Args",
":",
"hidden_size",
"(",
"int",
")",
":",
"the",
"size",
"of",
"hidden",
"layer",
"of",
"the",
"decoder",
".",
"rnnstate",
":",
"final",
"hidden",
"state",
"from",
"the",
"encoder",
".",
"transformed",
"to",
"shape",
":",
"layers",
"x",
"batch",
"x",
"(",
"directions",
"*",
"dim",
")",
"."
] | def __init__(self, hidden_size, rnnstate):
"""
Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim).
"""
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.coverage = None
# Init the input feed.
batch_size = self.hidden[0].size(1)
h_size = (batch_size, hidden_size)
self.input_feed = self.hidden[0].data.new(*h_size).zero_() \
.unsqueeze(0) | [
"def",
"__init__",
"(",
"self",
",",
"hidden_size",
",",
"rnnstate",
")",
":",
"if",
"not",
"isinstance",
"(",
"rnnstate",
",",
"tuple",
")",
":",
"self",
".",
"hidden",
"=",
"(",
"rnnstate",
",",
")",
"else",
":",
"self",
".",
"hidden",
"=",
"rnnstate",
"self",
".",
"coverage",
"=",
"None",
"# Init the input feed.",
"batch_size",
"=",
"self",
".",
"hidden",
"[",
"0",
"]",
".",
"size",
"(",
"1",
")",
"h_size",
"=",
"(",
"batch_size",
",",
"hidden_size",
")",
"self",
".",
"input_feed",
"=",
"self",
".",
"hidden",
"[",
"0",
"]",
".",
"data",
".",
"new",
"(",
"*",
"h_size",
")",
".",
"zero_",
"(",
")",
".",
"unsqueeze",
"(",
"0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L548-L565 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | RNNDecoderState.update_state | (self, rnnstate, input_feed, coverage) | Update decoder state | Update decoder state | [
"Update",
"decoder",
"state"
] | def update_state(self, rnnstate, input_feed, coverage):
""" Update decoder state """
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.input_feed = input_feed
self.coverage = coverage | [
"def",
"update_state",
"(",
"self",
",",
"rnnstate",
",",
"input_feed",
",",
"coverage",
")",
":",
"if",
"not",
"isinstance",
"(",
"rnnstate",
",",
"tuple",
")",
":",
"self",
".",
"hidden",
"=",
"(",
"rnnstate",
",",
")",
"else",
":",
"self",
".",
"hidden",
"=",
"rnnstate",
"self",
".",
"input_feed",
"=",
"input_feed",
"self",
".",
"coverage",
"=",
"coverage"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L571-L578 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/decoder.py | python | RNNDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
vars = [e.data.repeat(1, beam_size, 1)
for e in self._all]
self.hidden = tuple(vars[:-1])
self.input_feed = vars[-1] | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"vars",
"=",
"[",
"e",
".",
"data",
".",
"repeat",
"(",
"1",
",",
"beam_size",
",",
"1",
")",
"for",
"e",
"in",
"self",
".",
"_all",
"]",
"self",
".",
"hidden",
"=",
"tuple",
"(",
"vars",
"[",
":",
"-",
"1",
"]",
")",
"self",
".",
"input_feed",
"=",
"vars",
"[",
"-",
"1",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/decoder.py#L580-L585 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/ensemble.py | python | load_test_model | (opt, dummy_opt) | return shared_fields, ensemble_model, shared_model_opt | Read in multiple models for ensemble | Read in multiple models for ensemble | [
"Read",
"in",
"multiple",
"models",
"for",
"ensemble"
] | def load_test_model(opt, dummy_opt):
""" Read in multiple models for ensemble """
shared_fields = None
shared_model_opt = None
models = []
for model_path in opt.models:
fields, model, model_opt = \
onmt.model_builder.load_test_model(opt,
dummy_opt,
model_path=model_path)
import pdb;pdb.set_trace()
if shared_fields is None:
shared_fields = fields
else:
for key, field in fields.items():
if field is not None and 'vocab' in field.__dict__:
assert field.vocab.stoi == shared_fields[key].vocab.stoi, \
'Ensemble models must use the same preprocessed data'
models.append(model)
if shared_model_opt is None:
shared_model_opt = model_opt
ensemble_model = EnsembleModel(models)
return shared_fields, ensemble_model, shared_model_opt | [
"def",
"load_test_model",
"(",
"opt",
",",
"dummy_opt",
")",
":",
"shared_fields",
"=",
"None",
"shared_model_opt",
"=",
"None",
"models",
"=",
"[",
"]",
"for",
"model_path",
"in",
"opt",
".",
"models",
":",
"fields",
",",
"model",
",",
"model_opt",
"=",
"onmt",
".",
"model_builder",
".",
"load_test_model",
"(",
"opt",
",",
"dummy_opt",
",",
"model_path",
"=",
"model_path",
")",
"import",
"pdb",
"pdb",
".",
"set_trace",
"(",
")",
"if",
"shared_fields",
"is",
"None",
":",
"shared_fields",
"=",
"fields",
"else",
":",
"for",
"key",
",",
"field",
"in",
"fields",
".",
"items",
"(",
")",
":",
"if",
"field",
"is",
"not",
"None",
"and",
"'vocab'",
"in",
"field",
".",
"__dict__",
":",
"assert",
"field",
".",
"vocab",
".",
"stoi",
"==",
"shared_fields",
"[",
"key",
"]",
".",
"vocab",
".",
"stoi",
",",
"'Ensemble models must use the same preprocessed data'",
"models",
".",
"append",
"(",
"model",
")",
"if",
"shared_model_opt",
"is",
"None",
":",
"shared_model_opt",
"=",
"model_opt",
"ensemble_model",
"=",
"EnsembleModel",
"(",
"models",
")",
"return",
"shared_fields",
",",
"ensemble_model",
",",
"shared_model_opt"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/ensemble.py#L135-L157 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/ensemble.py | python | EnsembleDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
for model_state in self.model_decoder_states:
model_state.repeat_beam_size_times(beam_size) | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"for",
"model_state",
"in",
"self",
".",
"model_decoder_states",
":",
"model_state",
".",
"repeat_beam_size_times",
"(",
"beam_size",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/ensemble.py#L27-L30 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/ensemble.py | python | EnsembleDecoderOutput.squeeze | (self, dim=None) | return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_outputs]) | Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()` | Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()` | [
"Delegate",
"squeeze",
"to",
"avoid",
"modifying",
":",
"obj",
":",
"Translator",
".",
"translate_batch",
"()"
] | def squeeze(self, dim=None):
"""
Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()`
"""
return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_outputs]) | [
"def",
"squeeze",
"(",
"self",
",",
"dim",
"=",
"None",
")",
":",
"return",
"EnsembleDecoderOutput",
"(",
"[",
"x",
".",
"squeeze",
"(",
"dim",
")",
"for",
"x",
"in",
"self",
".",
"model_outputs",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/ensemble.py#L41-L47 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/ensemble.py | python | EnsembleDecoder.forward | (self, tgt, memory_bank, state, memory_lengths=None,
step=None) | return (EnsembleDecoderOutput(outputs),
EnsembleDecoderState(states),
mean_attns) | See :obj:`RNNDecoderBase.forward()` | See :obj:`RNNDecoderBase.forward()` | [
"See",
":",
"obj",
":",
"RNNDecoderBase",
".",
"forward",
"()"
] | def forward(self, tgt, memory_bank, state, memory_lengths=None,
step=None):
""" See :obj:`RNNDecoderBase.forward()` """
# Memory_lengths is a single tensor shared between all models.
# This assumption will not hold if Translator is modified
# to calculate memory_lengths as something other than the length
# of the input.
outputs, states, attns = zip(*[
model_decoder.forward(
tgt, memory_bank[i], state[i], memory_lengths, step=step)
for (i, model_decoder)
in enumerate(self.model_decoders)])
mean_attns = self.combine_attns(attns)
return (EnsembleDecoderOutput(outputs),
EnsembleDecoderState(states),
mean_attns) | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"# Memory_lengths is a single tensor shared between all models.",
"# This assumption will not hold if Translator is modified",
"# to calculate memory_lengths as something other than the length",
"# of the input.",
"outputs",
",",
"states",
",",
"attns",
"=",
"zip",
"(",
"*",
"[",
"model_decoder",
".",
"forward",
"(",
"tgt",
",",
"memory_bank",
"[",
"i",
"]",
",",
"state",
"[",
"i",
"]",
",",
"memory_lengths",
",",
"step",
"=",
"step",
")",
"for",
"(",
"i",
",",
"model_decoder",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_decoders",
")",
"]",
")",
"mean_attns",
"=",
"self",
".",
"combine_attns",
"(",
"attns",
")",
"return",
"(",
"EnsembleDecoderOutput",
"(",
"outputs",
")",
",",
"EnsembleDecoderState",
"(",
"states",
")",
",",
"mean_attns",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/ensemble.py#L72-L87 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/ensemble.py | python | EnsembleDecoder.init_decoder_state | (self, src, memory_bank, enc_hidden) | return EnsembleDecoderState(
[model_decoder.init_decoder_state(src,
memory_bank[i],
enc_hidden[i])
for (i, model_decoder) in enumerate(self.model_decoders)]) | See :obj:`RNNDecoderBase.init_decoder_state()` | See :obj:`RNNDecoderBase.init_decoder_state()` | [
"See",
":",
"obj",
":",
"RNNDecoderBase",
".",
"init_decoder_state",
"()"
] | def init_decoder_state(self, src, memory_bank, enc_hidden):
""" See :obj:`RNNDecoderBase.init_decoder_state()` """
return EnsembleDecoderState(
[model_decoder.init_decoder_state(src,
memory_bank[i],
enc_hidden[i])
for (i, model_decoder) in enumerate(self.model_decoders)]) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"src",
",",
"memory_bank",
",",
"enc_hidden",
")",
":",
"return",
"EnsembleDecoderState",
"(",
"[",
"model_decoder",
".",
"init_decoder_state",
"(",
"src",
",",
"memory_bank",
"[",
"i",
"]",
",",
"enc_hidden",
"[",
"i",
"]",
")",
"for",
"(",
"i",
",",
"model_decoder",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_decoders",
")",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/ensemble.py#L95-L101 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/ensemble.py | python | EnsembleGenerator.forward | (self, hidden) | return torch.stack(distributions).mean(0) | Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary. | Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary. | [
"Compute",
"a",
"distribution",
"over",
"the",
"target",
"dictionary",
"by",
"averaging",
"distributions",
"from",
"models",
"in",
"the",
"ensemble",
".",
"All",
"models",
"in",
"the",
"ensemble",
"must",
"share",
"a",
"target",
"vocabulary",
"."
] | def forward(self, hidden):
"""
Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary.
"""
distributions = [model_generator.forward(hidden[i])
for (i, model_generator)
in enumerate(self.model_generators)]
return torch.stack(distributions).mean(0) | [
"def",
"forward",
"(",
"self",
",",
"hidden",
")",
":",
"distributions",
"=",
"[",
"model_generator",
".",
"forward",
"(",
"hidden",
"[",
"i",
"]",
")",
"for",
"(",
"i",
",",
"model_generator",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_generators",
")",
"]",
"return",
"torch",
".",
"stack",
"(",
"distributions",
")",
".",
"mean",
"(",
"0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/ensemble.py#L113-L122 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/audio_encoder.py | python | AudioEncoder.load_pretrained_vectors | (self, opt) | Pass in needed options only when modify function definition. | Pass in needed options only when modify function definition. | [
"Pass",
"in",
"needed",
"options",
"only",
"when",
"modify",
"function",
"definition",
"."
] | def load_pretrained_vectors(self, opt):
""" Pass in needed options only when modify function definition."""
pass | [
"def",
"load_pretrained_vectors",
"(",
"self",
",",
"opt",
")",
":",
"pass"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/audio_encoder.py#L45-L47 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/audio_encoder.py | python | AudioEncoder.forward | (self, src, lengths=None) | return hidden, output | See :obj:`onmt.encoders.encoder.EncoderBase.forward()` | See :obj:`onmt.encoders.encoder.EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"encoders",
".",
"encoder",
".",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`"
# (batch_size, 1, nfft, t)
# layer 1
src = self.batch_norm1(self.layer1(src[:, :, :, :]))
# (batch_size, 32, nfft/2, t/2)
src = F.hardtanh(src, 0, 20, inplace=True)
# (batch_size, 32, nfft/2/2, t/2)
# layer 2
src = self.batch_norm2(self.layer2(src))
# (batch_size, 32, nfft/2/2, t/2)
src = F.hardtanh(src, 0, 20, inplace=True)
batch_size = src.size(0)
length = src.size(3)
src = src.view(batch_size, -1, length)
src = src.transpose(0, 2).transpose(1, 2)
output, hidden = self.rnn(src)
return hidden, output | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"# (batch_size, 1, nfft, t)",
"# layer 1",
"src",
"=",
"self",
".",
"batch_norm1",
"(",
"self",
".",
"layer1",
"(",
"src",
"[",
":",
",",
":",
",",
":",
",",
":",
"]",
")",
")",
"# (batch_size, 32, nfft/2, t/2)",
"src",
"=",
"F",
".",
"hardtanh",
"(",
"src",
",",
"0",
",",
"20",
",",
"inplace",
"=",
"True",
")",
"# (batch_size, 32, nfft/2/2, t/2)",
"# layer 2",
"src",
"=",
"self",
".",
"batch_norm2",
"(",
"self",
".",
"layer2",
"(",
"src",
")",
")",
"# (batch_size, 32, nfft/2/2, t/2)",
"src",
"=",
"F",
".",
"hardtanh",
"(",
"src",
",",
"0",
",",
"20",
",",
"inplace",
"=",
"True",
")",
"batch_size",
"=",
"src",
".",
"size",
"(",
"0",
")",
"length",
"=",
"src",
".",
"size",
"(",
"3",
")",
"src",
"=",
"src",
".",
"view",
"(",
"batch_size",
",",
"-",
"1",
",",
"length",
")",
"src",
"=",
"src",
".",
"transpose",
"(",
"0",
",",
"2",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"output",
",",
"hidden",
"=",
"self",
".",
"rnn",
"(",
"src",
")",
"return",
"hidden",
",",
"output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/audio_encoder.py#L49-L72 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/mean_encoder.py | python | MeanEncoder.forward | (self, src, lengths=None) | return encoder_final, memory_bank | See :obj:`EncoderBase.forward()` | See :obj:`EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(src, lengths)
emb = self.embeddings(src)
_, batch, emb_dim = emb.size()
mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)
memory_bank = emb
encoder_final = (mean, mean)
return encoder_final, memory_bank | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"src",
",",
"lengths",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"src",
")",
"_",
",",
"batch",
",",
"emb_dim",
"=",
"emb",
".",
"size",
"(",
")",
"mean",
"=",
"emb",
".",
"mean",
"(",
"0",
")",
".",
"expand",
"(",
"self",
".",
"num_layers",
",",
"batch",
",",
"emb_dim",
")",
"memory_bank",
"=",
"emb",
"encoder_final",
"=",
"(",
"mean",
",",
"mean",
")",
"return",
"encoder_final",
",",
"memory_bank"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/mean_encoder.py#L20-L29 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/rnn_encoder_hi.py | python | RNNEncoder.forward | (self, src, lengths=None) | return encoder_final, memory_bank | See :obj:`EncoderBase.forward()` | See :obj:`EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
"See :obj:`EncoderBase.forward()`"
self._check_args(src, lengths)
emb = self.embeddings(src)
s_len, batch, emb_dim = emb.size() # (185 16 128), s_len is changeable.
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Tensor.
lengths = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths)
memory_bank, encoder_final = self.rnn(packed_emb) # output, (hidden, cell), unpack using pad_packed_sequence()
# memory_bank is the output
# self.rnn is a LSTM(128, 256, bidirectional=True) # input dim; output dim;
# print('Hidden..', encoder_final[0].size(), encoder_final[1].size()) # both torch.Size([2, 16, 256]), 2 directions.
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
print('Out..', memory_bank.size())
#Out.. torch.Size([16, 512]) torch.Size([16, 512]) : two dir?
return encoder_final, memory_bank | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"src",
",",
"lengths",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"src",
")",
"s_len",
",",
"batch",
",",
"emb_dim",
"=",
"emb",
".",
"size",
"(",
")",
"# (185 16 128), s_len is changeable.",
"packed_emb",
"=",
"emb",
"if",
"lengths",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"no_pack_padded_seq",
":",
"# Lengths data is wrapped inside a Tensor.",
"lengths",
"=",
"lengths",
".",
"view",
"(",
"-",
"1",
")",
".",
"tolist",
"(",
")",
"packed_emb",
"=",
"pack",
"(",
"emb",
",",
"lengths",
")",
"memory_bank",
",",
"encoder_final",
"=",
"self",
".",
"rnn",
"(",
"packed_emb",
")",
"# output, (hidden, cell), unpack using pad_packed_sequence()",
"# memory_bank is the output",
"# self.rnn is a LSTM(128, 256, bidirectional=True) # input dim; output dim;",
"# print('Hidden..', encoder_final[0].size(), encoder_final[1].size()) # both torch.Size([2, 16, 256]), 2 directions.",
"if",
"lengths",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"no_pack_padded_seq",
":",
"memory_bank",
"=",
"unpack",
"(",
"memory_bank",
")",
"[",
"0",
"]",
"if",
"self",
".",
"use_bridge",
":",
"encoder_final",
"=",
"self",
".",
"_bridge",
"(",
"encoder_final",
")",
"print",
"(",
"'Out..'",
",",
"memory_bank",
".",
"size",
"(",
")",
")",
"#Out.. torch.Size([16, 512]) torch.Size([16, 512]) : two dir?",
"return",
"encoder_final",
",",
"memory_bank"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/rnn_encoder_hi.py#L53-L82 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/rnn_encoder_hi.py | python | RNNEncoder._bridge | (self, hidden) | return outs | Forward hidden state through bridge | Forward hidden state through bridge | [
"Forward",
"hidden",
"state",
"through",
"bridge"
] | def _bridge(self, hidden):
"""
Forward hidden state through bridge
"""
def bottle_hidden(linear, states):
"""
Transform from 3D to 2D, apply linear and return initial size
"""
size = states.size()
result = linear(states.view(-1, self.total_hidden_dim))
return F.relu(result).view(size)
if isinstance(hidden, tuple): # LSTM
outs = tuple([bottle_hidden(layer, hidden[ix])
for ix, layer in enumerate(self.bridge)])
else:
outs = bottle_hidden(self.bridge[0], hidden)
return outs | [
"def",
"_bridge",
"(",
"self",
",",
"hidden",
")",
":",
"def",
"bottle_hidden",
"(",
"linear",
",",
"states",
")",
":",
"\"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"",
"size",
"=",
"states",
".",
"size",
"(",
")",
"result",
"=",
"linear",
"(",
"states",
".",
"view",
"(",
"-",
"1",
",",
"self",
".",
"total_hidden_dim",
")",
")",
"return",
"F",
".",
"relu",
"(",
"result",
")",
".",
"view",
"(",
"size",
")",
"if",
"isinstance",
"(",
"hidden",
",",
"tuple",
")",
":",
"# LSTM",
"outs",
"=",
"tuple",
"(",
"[",
"bottle_hidden",
"(",
"layer",
",",
"hidden",
"[",
"ix",
"]",
")",
"for",
"ix",
",",
"layer",
"in",
"enumerate",
"(",
"self",
".",
"bridge",
")",
"]",
")",
"else",
":",
"outs",
"=",
"bottle_hidden",
"(",
"self",
".",
"bridge",
"[",
"0",
"]",
",",
"hidden",
")",
"return",
"outs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/rnn_encoder_hi.py#L99-L116 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/transformer.py | python | TransformerEncoderLayer.forward | (self, inputs, mask) | return self.feed_forward(out) | Transformer Encoder Layer definition.
Args:
inputs (`FloatTensor`): `[batch_size x src_len x model_dim]`
mask (`LongTensor`): `[batch_size x src_len x src_len]`
Returns:
(`FloatTensor`):
* outputs `[batch_size x src_len x model_dim]` | Transformer Encoder Layer definition. | [
"Transformer",
"Encoder",
"Layer",
"definition",
"."
] | def forward(self, inputs, mask):
"""
Transformer Encoder Layer definition.
Args:
inputs (`FloatTensor`): `[batch_size x src_len x model_dim]`
mask (`LongTensor`): `[batch_size x src_len x src_len]`
Returns:
(`FloatTensor`):
* outputs `[batch_size x src_len x model_dim]`
"""
input_norm = self.layer_norm(inputs)
context, _ = self.self_attn(input_norm, input_norm, input_norm,
mask=mask)
out = self.dropout(context) + inputs
return self.feed_forward(out) | [
"def",
"forward",
"(",
"self",
",",
"inputs",
",",
"mask",
")",
":",
"input_norm",
"=",
"self",
".",
"layer_norm",
"(",
"inputs",
")",
"context",
",",
"_",
"=",
"self",
".",
"self_attn",
"(",
"input_norm",
",",
"input_norm",
",",
"input_norm",
",",
"mask",
"=",
"mask",
")",
"out",
"=",
"self",
".",
"dropout",
"(",
"context",
")",
"+",
"inputs",
"return",
"self",
".",
"feed_forward",
"(",
"out",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/transformer.py#L35-L52 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/transformer.py | python | TransformerEncoder.forward | (self, src, lengths=None) | return emb, out.transpose(0, 1).contiguous() | See :obj:`EncoderBase.forward()` | See :obj:`EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
""" See :obj:`EncoderBase.forward()`"""
self._check_args(src, lengths)
emb = self.embeddings(src)
out = emb.transpose(0, 1).contiguous()
words = src[:, :, 0].transpose(0, 1)
w_batch, w_len = words.size()
padding_idx = self.embeddings.word_padding_idx
mask = words.data.eq(padding_idx).unsqueeze(1) \
.expand(w_batch, w_len, w_len)
# Run the forward pass of every layer of the tranformer.
for i in range(self.num_layers):
out = self.transformer[i](out, mask)
out = self.layer_norm(out)
return emb, out.transpose(0, 1).contiguous() | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"src",
",",
"lengths",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"src",
")",
"out",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"words",
"=",
"src",
"[",
":",
",",
":",
",",
"0",
"]",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"w_batch",
",",
"w_len",
"=",
"words",
".",
"size",
"(",
")",
"padding_idx",
"=",
"self",
".",
"embeddings",
".",
"word_padding_idx",
"mask",
"=",
"words",
".",
"data",
".",
"eq",
"(",
"padding_idx",
")",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand",
"(",
"w_batch",
",",
"w_len",
",",
"w_len",
")",
"# Run the forward pass of every layer of the tranformer.",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_layers",
")",
":",
"out",
"=",
"self",
".",
"transformer",
"[",
"i",
"]",
"(",
"out",
",",
"mask",
")",
"out",
"=",
"self",
".",
"layer_norm",
"(",
"out",
")",
"return",
"emb",
",",
"out",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/transformer.py#L98-L115 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/cnn_decoder.py | python | CNNDecoder.forward | (self, tgt, memory_bank, state, memory_lengths=None, step=None) | return outputs, state, attns | See :obj:`onmt.modules.RNNDecoderBase.forward()` | See :obj:`onmt.modules.RNNDecoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"modules",
".",
"RNNDecoderBase",
".",
"forward",
"()"
] | def forward(self, tgt, memory_bank, state, memory_lengths=None, step=None):
""" See :obj:`onmt.modules.RNNDecoderBase.forward()`"""
# NOTE: memory_lengths is only here for compatibility reasons
# with onmt.modules.RNNDecoderBase.forward()
# CHECKS
assert isinstance(state, CNNDecoderState)
_, tgt_batch, _ = tgt.size()
_, contxt_batch, _ = memory_bank.size()
aeq(tgt_batch, contxt_batch)
# END CHECKS
if state.previous_input is not None:
tgt = torch.cat([state.previous_input, tgt], 0)
# Initialize return variables.
outputs = []
attns = {"std": []}
assert not self._copy, "Copy mechanism not yet tested in conv2conv"
if self._copy:
attns["copy"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_memory_bank_c = state.init_src.transpose(0, 1).contiguous()
# Run the forward pass of the CNNDecoder.
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = torch.zeros(x.size(0), x.size(1),
self.cnn_kernel_width - 1, 1)
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_memory_bank_t, src_memory_bank_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(tgt)
return outputs, state, attns | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"# NOTE: memory_lengths is only here for compatibility reasons",
"# with onmt.modules.RNNDecoderBase.forward()",
"# CHECKS",
"assert",
"isinstance",
"(",
"state",
",",
"CNNDecoderState",
")",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"_",
",",
"contxt_batch",
",",
"_",
"=",
"memory_bank",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"contxt_batch",
")",
"# END CHECKS",
"if",
"state",
".",
"previous_input",
"is",
"not",
"None",
":",
"tgt",
"=",
"torch",
".",
"cat",
"(",
"[",
"state",
".",
"previous_input",
",",
"tgt",
"]",
",",
"0",
")",
"# Initialize return variables.",
"outputs",
"=",
"[",
"]",
"attns",
"=",
"{",
"\"std\"",
":",
"[",
"]",
"}",
"assert",
"not",
"self",
".",
"_copy",
",",
"\"Copy mechanism not yet tested in conv2conv\"",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"[",
"]",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"assert",
"emb",
".",
"dim",
"(",
")",
"==",
"3",
"# len x batch x embedding_dim",
"tgt_emb",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# The output of CNNEncoder.",
"src_memory_bank_t",
"=",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# The combination of output of CNNEncoder and source embeddings.",
"src_memory_bank_c",
"=",
"state",
".",
"init_src",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# Run the forward pass of the CNNDecoder.",
"emb_reshape",
"=",
"tgt_emb",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"tgt_emb",
".",
"size",
"(",
"0",
")",
"*",
"tgt_emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"linear_out",
"=",
"self",
".",
"linear",
"(",
"emb_reshape",
")",
"x",
"=",
"linear_out",
".",
"view",
"(",
"tgt_emb",
".",
"size",
"(",
"0",
")",
",",
"tgt_emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"x",
"=",
"shape_transform",
"(",
"x",
")",
"pad",
"=",
"torch",
".",
"zeros",
"(",
"x",
".",
"size",
"(",
"0",
")",
",",
"x",
".",
"size",
"(",
"1",
")",
",",
"self",
".",
"cnn_kernel_width",
"-",
"1",
",",
"1",
")",
"pad",
"=",
"pad",
".",
"type_as",
"(",
"x",
")",
"base_target_emb",
"=",
"x",
"for",
"conv",
",",
"attention",
"in",
"zip",
"(",
"self",
".",
"conv_layers",
",",
"self",
".",
"attn_layers",
")",
":",
"new_target_input",
"=",
"torch",
".",
"cat",
"(",
"[",
"pad",
",",
"x",
"]",
",",
"2",
")",
"out",
"=",
"conv",
"(",
"new_target_input",
")",
"c",
",",
"attn",
"=",
"attention",
"(",
"base_target_emb",
",",
"out",
",",
"src_memory_bank_t",
",",
"src_memory_bank_c",
")",
"x",
"=",
"(",
"x",
"+",
"(",
"c",
"+",
"out",
")",
"*",
"SCALE_WEIGHT",
")",
"*",
"SCALE_WEIGHT",
"output",
"=",
"x",
".",
"squeeze",
"(",
"3",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"# Process the result and update the attentions.",
"outputs",
"=",
"output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"if",
"state",
".",
"previous_input",
"is",
"not",
"None",
":",
"outputs",
"=",
"outputs",
"[",
"state",
".",
"previous_input",
".",
"size",
"(",
"0",
")",
":",
"]",
"attn",
"=",
"attn",
"[",
":",
",",
"state",
".",
"previous_input",
".",
"size",
"(",
"0",
")",
":",
"]",
".",
"squeeze",
"(",
")",
"attn",
"=",
"torch",
".",
"stack",
"(",
"[",
"attn",
"]",
")",
"attns",
"[",
"\"std\"",
"]",
"=",
"attn",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"attn",
"# Update the state.",
"state",
".",
"update_state",
"(",
"tgt",
")",
"return",
"outputs",
",",
"state",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/cnn_decoder.py#L58-L122 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/cnn_decoder.py | python | CNNDecoder.init_decoder_state | (self, _, memory_bank, enc_hidden, with_cache=False) | return CNNDecoderState(memory_bank, enc_hidden) | Init decoder state. | Init decoder state. | [
"Init",
"decoder",
"state",
"."
] | def init_decoder_state(self, _, memory_bank, enc_hidden, with_cache=False):
"""
Init decoder state.
"""
return CNNDecoderState(memory_bank, enc_hidden) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"_",
",",
"memory_bank",
",",
"enc_hidden",
",",
"with_cache",
"=",
"False",
")",
":",
"return",
"CNNDecoderState",
"(",
"memory_bank",
",",
"enc_hidden",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/cnn_decoder.py#L124-L128 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/cnn_decoder.py | python | CNNDecoderState._all | (self) | return (self.previous_input,) | Contains attributes that need to be updated in self.beam_update(). | Contains attributes that need to be updated in self.beam_update(). | [
"Contains",
"attributes",
"that",
"need",
"to",
"be",
"updated",
"in",
"self",
".",
"beam_update",
"()",
"."
] | def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input,) | [
"def",
"_all",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"previous_input",
",",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/cnn_decoder.py#L141-L145 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/cnn_decoder.py | python | CNNDecoderState.update_state | (self, new_input) | Called for every decoder forward pass. | Called for every decoder forward pass. | [
"Called",
"for",
"every",
"decoder",
"forward",
"pass",
"."
] | def update_state(self, new_input):
""" Called for every decoder forward pass. """
self.previous_input = new_input | [
"def",
"update_state",
"(",
"self",
",",
"new_input",
")",
":",
"self",
".",
"previous_input",
"=",
"new_input"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/cnn_decoder.py#L150-L152 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/cnn_decoder.py | python | CNNDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.init_src = self.init_src.data.repeat(1, beam_size, 1) | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"self",
".",
"init_src",
"=",
"self",
".",
"init_src",
".",
"data",
".",
"repeat",
"(",
"1",
",",
"beam_size",
",",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/cnn_decoder.py#L154-L156 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/encoder.py | python | EncoderBase.forward | (self, src, lengths=None) | Args:
src (:obj:`LongTensor`):
padded sequences of sparse indices `[src_len x batch x nfeat]`
lengths (:obj:`LongTensor`): length of each sequence `[batch]`
Returns:
(tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):
* final encoder state, used to initialize decoder
* memory bank for attention, `[src_len x batch x hidden]` | Args:
src (:obj:`LongTensor`):
padded sequences of sparse indices `[src_len x batch x nfeat]`
lengths (:obj:`LongTensor`): length of each sequence `[batch]` | [
"Args",
":",
"src",
"(",
":",
"obj",
":",
"LongTensor",
")",
":",
"padded",
"sequences",
"of",
"sparse",
"indices",
"[",
"src_len",
"x",
"batch",
"x",
"nfeat",
"]",
"lengths",
"(",
":",
"obj",
":",
"LongTensor",
")",
":",
"length",
"of",
"each",
"sequence",
"[",
"batch",
"]"
] | def forward(self, src, lengths=None):
"""
Args:
src (:obj:`LongTensor`):
padded sequences of sparse indices `[src_len x batch x nfeat]`
lengths (:obj:`LongTensor`): length of each sequence `[batch]`
Returns:
(tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):
* final encoder state, used to initialize decoder
* memory bank for attention, `[src_len x batch x hidden]`
"""
raise NotImplementedError | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/encoder.py#L41-L54 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/rnn_encoder.py | python | RNNEncoder.build_sentence_layer | (self,memory_bank,src_sents) | return sent_output | In this method we define sentence level representation. (This is the old version)
:param memory_bank:
:param encoder_final:
:param src_sents:
:return: sentence embeddings | In this method we define sentence level representation. (This is the old version)
:param memory_bank:
:param encoder_final:
:param src_sents:
:return: sentence embeddings | [
"In",
"this",
"method",
"we",
"define",
"sentence",
"level",
"representation",
".",
"(",
"This",
"is",
"the",
"old",
"version",
")",
":",
"param",
"memory_bank",
":",
":",
"param",
"encoder_final",
":",
":",
"param",
"src_sents",
":",
":",
"return",
":",
"sentence",
"embeddings"
] | def build_sentence_layer(self,memory_bank,src_sents):
'''
In this method we define sentence level representation. (This is the old version)
:param memory_bank:
:param encoder_final:
:param src_sents:
:return: sentence embeddings
'''
# print('Memory..', memory_bank.size()) # torch.Size([200, 2, 512]) TODO: this is the output
# #
# print ('encoder_final..',encoder_final) #
if isinstance(memory_bank, torch.nn.utils.rnn.PackedSequence):
memory_bank = nn.utils.rnn.pad_packed_sequence(memory_bank)[0] # as after unpack it is a tuple
hidden = memory_bank.permute(1,0,2) # size: (2,200,512)
# print ('in func...', src_sents)
# in each case for the current batch, send the last hidden output as the input to the sent_lstm layer
batch_input_list = []
for output,sent_id in zip(hidden,src_sents): # so we have batch_size to be 1
common_len = len(sent_id)
output = output.unsqueeze(1)
sent_input_list = []
# firs id
start_ind_sent_id = 0
start_ind = sent_id[start_ind_sent_id]
while (start_ind < output.size()[0]) and (start_ind_sent_id < sent_id.size()[0]):
# add
sent_input_list.append(output[start_ind])
# both ids move to the next
start_ind_sent_id += 1
if start_ind_sent_id < sent_id.size()[0]:
start_ind += sent_id[start_ind_sent_id]
else:
break
# FEB 10, len check
if len(sent_input_list) < common_len:
# pad with zero
pad_size = output[0].size()
zeros = torch.zeros(pad_size, dtype=torch.float32).cuda()
pad_list = [zeros]* (common_len-len(sent_input_list))
sent_input_list = sent_input_list + pad_list
sent_input = torch.cat(sent_input_list,0).unsqueeze(1) # (n_sent, batch_size=1,dim=512)
batch_input_list.append(sent_input)
# print ([x.size() for x in batch_input_list])
# [torch.Size([18, 1, 512]), torch.Size([15, 1, 512]), torch.Size([18, 1, 512]), torch.Size([18, 1, 512]), torch.Size([18, 1, 512])]
batch_input_list_concat = torch.cat(batch_input_list,1)
# get the id of sent length:
sent_output, (h_, c_) = self.sent_rnn(batch_input_list_concat)
# LSTM(512, 256, bidirectional=True), sent_output has the same shape with batch_input_list_concat
#sent_output: shape(number of sents or step, batch_size, dim) (9, 2, 512), number of sents or step can be different
# print ('Encoder Sentence_output...',sent_output.size())
return sent_output | [
"def",
"build_sentence_layer",
"(",
"self",
",",
"memory_bank",
",",
"src_sents",
")",
":",
"# print('Memory..', memory_bank.size()) # torch.Size([200, 2, 512]) TODO: this is the output",
"# #",
"# print ('encoder_final..',encoder_final) #",
"if",
"isinstance",
"(",
"memory_bank",
",",
"torch",
".",
"nn",
".",
"utils",
".",
"rnn",
".",
"PackedSequence",
")",
":",
"memory_bank",
"=",
"nn",
".",
"utils",
".",
"rnn",
".",
"pad_packed_sequence",
"(",
"memory_bank",
")",
"[",
"0",
"]",
"# as after unpack it is a tuple",
"hidden",
"=",
"memory_bank",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
"# size: (2,200,512)",
"# print ('in func...', src_sents)",
"# in each case for the current batch, send the last hidden output as the input to the sent_lstm layer",
"batch_input_list",
"=",
"[",
"]",
"for",
"output",
",",
"sent_id",
"in",
"zip",
"(",
"hidden",
",",
"src_sents",
")",
":",
"# so we have batch_size to be 1",
"common_len",
"=",
"len",
"(",
"sent_id",
")",
"output",
"=",
"output",
".",
"unsqueeze",
"(",
"1",
")",
"sent_input_list",
"=",
"[",
"]",
"# firs id",
"start_ind_sent_id",
"=",
"0",
"start_ind",
"=",
"sent_id",
"[",
"start_ind_sent_id",
"]",
"while",
"(",
"start_ind",
"<",
"output",
".",
"size",
"(",
")",
"[",
"0",
"]",
")",
"and",
"(",
"start_ind_sent_id",
"<",
"sent_id",
".",
"size",
"(",
")",
"[",
"0",
"]",
")",
":",
"# add",
"sent_input_list",
".",
"append",
"(",
"output",
"[",
"start_ind",
"]",
")",
"# both ids move to the next",
"start_ind_sent_id",
"+=",
"1",
"if",
"start_ind_sent_id",
"<",
"sent_id",
".",
"size",
"(",
")",
"[",
"0",
"]",
":",
"start_ind",
"+=",
"sent_id",
"[",
"start_ind_sent_id",
"]",
"else",
":",
"break",
"# FEB 10, len check",
"if",
"len",
"(",
"sent_input_list",
")",
"<",
"common_len",
":",
"# pad with zero",
"pad_size",
"=",
"output",
"[",
"0",
"]",
".",
"size",
"(",
")",
"zeros",
"=",
"torch",
".",
"zeros",
"(",
"pad_size",
",",
"dtype",
"=",
"torch",
".",
"float32",
")",
".",
"cuda",
"(",
")",
"pad_list",
"=",
"[",
"zeros",
"]",
"*",
"(",
"common_len",
"-",
"len",
"(",
"sent_input_list",
")",
")",
"sent_input_list",
"=",
"sent_input_list",
"+",
"pad_list",
"sent_input",
"=",
"torch",
".",
"cat",
"(",
"sent_input_list",
",",
"0",
")",
".",
"unsqueeze",
"(",
"1",
")",
"# (n_sent, batch_size=1,dim=512)",
"batch_input_list",
".",
"append",
"(",
"sent_input",
")",
"# print ([x.size() for x in batch_input_list])",
"# [torch.Size([18, 1, 512]), torch.Size([15, 1, 512]), torch.Size([18, 1, 512]), torch.Size([18, 1, 512]), torch.Size([18, 1, 512])]",
"batch_input_list_concat",
"=",
"torch",
".",
"cat",
"(",
"batch_input_list",
",",
"1",
")",
"# get the id of sent length:",
"sent_output",
",",
"(",
"h_",
",",
"c_",
")",
"=",
"self",
".",
"sent_rnn",
"(",
"batch_input_list_concat",
")",
"# LSTM(512, 256, bidirectional=True), sent_output has the same shape with batch_input_list_concat",
"#sent_output: shape(number of sents or step, batch_size, dim) (9, 2, 512), number of sents or step can be different",
"# print ('Encoder Sentence_output...',sent_output.size())",
"return",
"sent_output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/rnn_encoder.py#L71-L150 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/rnn_encoder.py | python | RNNEncoder.sent_level_encoder | (self, memory_bank) | return sent_output | This is the sentence level encoder, it takes a bunch of sentence encoding,
then feed into another sentence level rnn
:param memory_bank: sentence encoding ( a list of packed)
:return: output of the rnn layer | This is the sentence level encoder, it takes a bunch of sentence encoding,
then feed into another sentence level rnn
:param memory_bank: sentence encoding ( a list of packed)
:return: output of the rnn layer | [
"This",
"is",
"the",
"sentence",
"level",
"encoder",
"it",
"takes",
"a",
"bunch",
"of",
"sentence",
"encoding",
"then",
"feed",
"into",
"another",
"sentence",
"level",
"rnn",
":",
"param",
"memory_bank",
":",
"sentence",
"encoding",
"(",
"a",
"list",
"of",
"packed",
")",
":",
"return",
":",
"output",
"of",
"the",
"rnn",
"layer"
] | def sent_level_encoder(self, memory_bank):
'''
This is the sentence level encoder, it takes a bunch of sentence encoding,
then feed into another sentence level rnn
:param memory_bank: sentence encoding ( a list of packed)
:return: output of the rnn layer
'''
if isinstance(memory_bank, torch.nn.utils.rnn.PackedSequence):
memory_bank_unpacked = nn.utils.rnn.pad_packed_sequence(memory_bank)[0].permute(1,0,2)# as after unpack it is a tuple
# memory_bank_unpacked size: torch.Size([42, 9, 512]) # [seq_len,batch_size,512]
# take the last hiddent state of each
last_hidden = [x[-1].unsqueeze(0) for x in memory_bank_unpacked]
last_hidden = torch.cat(last_hidden, 0).unsqueeze(0) # size is [1,9,512]
sent_output, (h_, c_) = self.sent_rnn(last_hidden)
return sent_output | [
"def",
"sent_level_encoder",
"(",
"self",
",",
"memory_bank",
")",
":",
"if",
"isinstance",
"(",
"memory_bank",
",",
"torch",
".",
"nn",
".",
"utils",
".",
"rnn",
".",
"PackedSequence",
")",
":",
"memory_bank_unpacked",
"=",
"nn",
".",
"utils",
".",
"rnn",
".",
"pad_packed_sequence",
"(",
"memory_bank",
")",
"[",
"0",
"]",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
"# as after unpack it is a tuple",
"# memory_bank_unpacked size: torch.Size([42, 9, 512]) # [seq_len,batch_size,512]",
"# take the last hiddent state of each",
"last_hidden",
"=",
"[",
"x",
"[",
"-",
"1",
"]",
".",
"unsqueeze",
"(",
"0",
")",
"for",
"x",
"in",
"memory_bank_unpacked",
"]",
"last_hidden",
"=",
"torch",
".",
"cat",
"(",
"last_hidden",
",",
"0",
")",
".",
"unsqueeze",
"(",
"0",
")",
"# size is [1,9,512]",
"sent_output",
",",
"(",
"h_",
",",
"c_",
")",
"=",
"self",
".",
"sent_rnn",
"(",
"last_hidden",
")",
"return",
"sent_output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/rnn_encoder.py#L153-L173 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/rnn_encoder.py | python | RNNEncoder.forward_new | (self, src, src_sents=None, lengths=None) | return final_encoder_final, final_memory_bank, final_sent_output | New Forward` | New Forward` | [
"New",
"Forward"
] | def forward_new(self, src, src_sents=None, lengths=None):
"New Forward`"
self._check_args(src, lengths)
emb = self.embeddings(src)
s_len, batch, emb_dim = emb.size() # (185 16 128), s_len is sequence_len.
# 2333 TODO: change starts here
# Feb15: we break this into sentences
# iterate each batch..
input_embeddings=emb.permute(1,0,2)
final_memory_bank = []
final_encoder_final = []
final_sent_output = []
for batch_id in range(batch):
# this is the input to word-level lstm
current_sequence = input_embeddings[batch_id] # size id (sequence_len, emb_dim)
# break this into multiple sentences according to the sentence lengths, and input to the rnn
# sent len check, define len_sequence to be: tensor([26, 17, 21, 23, 19, 26, 10, 42], device='cuda:0')
if torch.sum(src_sents[batch_id]) >= s_len:
# if exceeds the total length, then their is a bug
len_sequence = src_sents[batch_id][:-1]
else:
len_sequence = src_sents[batch_id]
counter = 0
feeding_as_a_batch = []
lengths_as_a_batch = []
lengths = []
actual_len = 0
for idx in len_sequence:
if (counter < s_len ) and (idx != 0):
actual_len += 1
# from the current_sequence, add to the rnn
feeding_sequence = current_sequence[counter:counter+idx].unsqueeze(0)
feeding_as_a_batch.append(feeding_sequence.permute(1,0,2)) #feeding_sequence size = [1,26,128]
counter += idx
# feed into rnn
lengths_as_a_batch.append(idx)
feeding_as_a_batch_padded = torch.cat([x for x in pad_sequence(feeding_as_a_batch,batch_first=True)],1)
# feed into rnn size: torch.Size([42, 9, 128]) -> [max, batch_size, dim]
max_dim = feeding_as_a_batch_padded.size()[0]
lengths_as_a_batch = [max_dim for x in range(actual_len)]
# lengths_as_a_batch = [item for sublist in lengths_as_a_batch for item in sublist]
if lengths_as_a_batch is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Tensor.
packed_emb_rnn_input = pack(feeding_as_a_batch_padded, lengths_as_a_batch)
# feed into!
memory_bank, encoder_final = self.rnn(packed_emb_rnn_input)
# feed into sentence_level
sent_output = self.sent_level_encoder(memory_bank)
final_sent_output.append(sent_output.view(-1,4*emb_dim))
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
# we need to get the original output, before padded
revised_memory_bank = memory_bank.permute(1, 0, 2)
memory_bank_unpadded_list = []
for idx in range(actual_len):
memory_bank_unpadded_list.append(revised_memory_bank[idx][:len_sequence[idx]])
unpadded_memory_bank = torch.cat(memory_bank_unpadded_list,0) # size is [sequence_len,512] # need to pad or truncate
actual_size = unpadded_memory_bank.size()[0]
if actual_size >= s_len:
padded_memory_bank = unpadded_memory_bank[:s_len]
# print ('Size is okk..', padded_memory_bank.size())
else:
# pad with zero
pad_size = s_len - actual_size
padded_memory_bank = F.pad(unpadded_memory_bank, (0,0,0,pad_size), 'constant',0.0)
# print ('Padded...',unpadded_memory_bank.size(),pad_size,padded_memory_bank.size())
# print (actual_size,s_len,padded_memory_bank.size())
final_memory_bank.append(padded_memory_bank.unsqueeze(1))
# finish processing on memory bank
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
final_encoder_final.append(tuple([x[:,-1,:].unsqueeze(1) for x in encoder_final]))
# add unpacked from final_memory_bank
final_memory_bank = torch.cat(final_memory_bank,1) # [200, 2, 512], ready to return
# join the encoder_final
hs = []
cs = []
for (h,c) in final_encoder_final:
hs.append(h)
cs.append(c)
hs = torch.cat(hs,1)
cs = torch.cat(cs,1)
# encoder_final
final_encoder_final = tuple([hs,cs]) # ready to return
# sent output
final_sent_output = pad_sequence(final_sent_output) # size [9,2,512], ready to return
# 2333 TODO: change finish here
# import pdb;pdb.set_trace()
return final_encoder_final, final_memory_bank, final_sent_output | [
"def",
"forward_new",
"(",
"self",
",",
"src",
",",
"src_sents",
"=",
"None",
",",
"lengths",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"src",
",",
"lengths",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"src",
")",
"s_len",
",",
"batch",
",",
"emb_dim",
"=",
"emb",
".",
"size",
"(",
")",
"# (185 16 128), s_len is sequence_len.",
"# 2333 TODO: change starts here",
"# Feb15: we break this into sentences",
"# iterate each batch..",
"input_embeddings",
"=",
"emb",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
"final_memory_bank",
"=",
"[",
"]",
"final_encoder_final",
"=",
"[",
"]",
"final_sent_output",
"=",
"[",
"]",
"for",
"batch_id",
"in",
"range",
"(",
"batch",
")",
":",
"# this is the input to word-level lstm",
"current_sequence",
"=",
"input_embeddings",
"[",
"batch_id",
"]",
"# size id (sequence_len, emb_dim)",
"# break this into multiple sentences according to the sentence lengths, and input to the rnn",
"# sent len check, define len_sequence to be: tensor([26, 17, 21, 23, 19, 26, 10, 42], device='cuda:0')",
"if",
"torch",
".",
"sum",
"(",
"src_sents",
"[",
"batch_id",
"]",
")",
">=",
"s_len",
":",
"# if exceeds the total length, then their is a bug",
"len_sequence",
"=",
"src_sents",
"[",
"batch_id",
"]",
"[",
":",
"-",
"1",
"]",
"else",
":",
"len_sequence",
"=",
"src_sents",
"[",
"batch_id",
"]",
"counter",
"=",
"0",
"feeding_as_a_batch",
"=",
"[",
"]",
"lengths_as_a_batch",
"=",
"[",
"]",
"lengths",
"=",
"[",
"]",
"actual_len",
"=",
"0",
"for",
"idx",
"in",
"len_sequence",
":",
"if",
"(",
"counter",
"<",
"s_len",
")",
"and",
"(",
"idx",
"!=",
"0",
")",
":",
"actual_len",
"+=",
"1",
"# from the current_sequence, add to the rnn",
"feeding_sequence",
"=",
"current_sequence",
"[",
"counter",
":",
"counter",
"+",
"idx",
"]",
".",
"unsqueeze",
"(",
"0",
")",
"feeding_as_a_batch",
".",
"append",
"(",
"feeding_sequence",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
")",
"#feeding_sequence size = [1,26,128]",
"counter",
"+=",
"idx",
"# feed into rnn",
"lengths_as_a_batch",
".",
"append",
"(",
"idx",
")",
"feeding_as_a_batch_padded",
"=",
"torch",
".",
"cat",
"(",
"[",
"x",
"for",
"x",
"in",
"pad_sequence",
"(",
"feeding_as_a_batch",
",",
"batch_first",
"=",
"True",
")",
"]",
",",
"1",
")",
"# feed into rnn size: torch.Size([42, 9, 128]) -> [max, batch_size, dim]",
"max_dim",
"=",
"feeding_as_a_batch_padded",
".",
"size",
"(",
")",
"[",
"0",
"]",
"lengths_as_a_batch",
"=",
"[",
"max_dim",
"for",
"x",
"in",
"range",
"(",
"actual_len",
")",
"]",
"# lengths_as_a_batch = [item for sublist in lengths_as_a_batch for item in sublist]",
"if",
"lengths_as_a_batch",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"no_pack_padded_seq",
":",
"# Lengths data is wrapped inside a Tensor.",
"packed_emb_rnn_input",
"=",
"pack",
"(",
"feeding_as_a_batch_padded",
",",
"lengths_as_a_batch",
")",
"# feed into!",
"memory_bank",
",",
"encoder_final",
"=",
"self",
".",
"rnn",
"(",
"packed_emb_rnn_input",
")",
"# feed into sentence_level",
"sent_output",
"=",
"self",
".",
"sent_level_encoder",
"(",
"memory_bank",
")",
"final_sent_output",
".",
"append",
"(",
"sent_output",
".",
"view",
"(",
"-",
"1",
",",
"4",
"*",
"emb_dim",
")",
")",
"if",
"lengths",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"no_pack_padded_seq",
":",
"memory_bank",
"=",
"unpack",
"(",
"memory_bank",
")",
"[",
"0",
"]",
"# we need to get the original output, before padded",
"revised_memory_bank",
"=",
"memory_bank",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
"memory_bank_unpadded_list",
"=",
"[",
"]",
"for",
"idx",
"in",
"range",
"(",
"actual_len",
")",
":",
"memory_bank_unpadded_list",
".",
"append",
"(",
"revised_memory_bank",
"[",
"idx",
"]",
"[",
":",
"len_sequence",
"[",
"idx",
"]",
"]",
")",
"unpadded_memory_bank",
"=",
"torch",
".",
"cat",
"(",
"memory_bank_unpadded_list",
",",
"0",
")",
"# size is [sequence_len,512] # need to pad or truncate",
"actual_size",
"=",
"unpadded_memory_bank",
".",
"size",
"(",
")",
"[",
"0",
"]",
"if",
"actual_size",
">=",
"s_len",
":",
"padded_memory_bank",
"=",
"unpadded_memory_bank",
"[",
":",
"s_len",
"]",
"# print ('Size is okk..', padded_memory_bank.size())",
"else",
":",
"# pad with zero",
"pad_size",
"=",
"s_len",
"-",
"actual_size",
"padded_memory_bank",
"=",
"F",
".",
"pad",
"(",
"unpadded_memory_bank",
",",
"(",
"0",
",",
"0",
",",
"0",
",",
"pad_size",
")",
",",
"'constant'",
",",
"0.0",
")",
"# print ('Padded...',unpadded_memory_bank.size(),pad_size,padded_memory_bank.size())",
"# print (actual_size,s_len,padded_memory_bank.size())",
"final_memory_bank",
".",
"append",
"(",
"padded_memory_bank",
".",
"unsqueeze",
"(",
"1",
")",
")",
"# finish processing on memory bank",
"if",
"self",
".",
"use_bridge",
":",
"encoder_final",
"=",
"self",
".",
"_bridge",
"(",
"encoder_final",
")",
"final_encoder_final",
".",
"append",
"(",
"tuple",
"(",
"[",
"x",
"[",
":",
",",
"-",
"1",
",",
":",
"]",
".",
"unsqueeze",
"(",
"1",
")",
"for",
"x",
"in",
"encoder_final",
"]",
")",
")",
"# add unpacked from final_memory_bank",
"final_memory_bank",
"=",
"torch",
".",
"cat",
"(",
"final_memory_bank",
",",
"1",
")",
"# [200, 2, 512], ready to return",
"# join the encoder_final",
"hs",
"=",
"[",
"]",
"cs",
"=",
"[",
"]",
"for",
"(",
"h",
",",
"c",
")",
"in",
"final_encoder_final",
":",
"hs",
".",
"append",
"(",
"h",
")",
"cs",
".",
"append",
"(",
"c",
")",
"hs",
"=",
"torch",
".",
"cat",
"(",
"hs",
",",
"1",
")",
"cs",
"=",
"torch",
".",
"cat",
"(",
"cs",
",",
"1",
")",
"# encoder_final",
"final_encoder_final",
"=",
"tuple",
"(",
"[",
"hs",
",",
"cs",
"]",
")",
"# ready to return",
"# sent output",
"final_sent_output",
"=",
"pad_sequence",
"(",
"final_sent_output",
")",
"# size [9,2,512], ready to return",
"# 2333 TODO: change finish here",
"# import pdb;pdb.set_trace()",
"return",
"final_encoder_final",
",",
"final_memory_bank",
",",
"final_sent_output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/rnn_encoder.py#L175-L298 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/rnn_encoder.py | python | RNNEncoder.forward | (self, src, src_sents=None, lengths=None) | return encoder_final, memory_bank, sent_output | forward_original | forward_original | [
"forward_original"
] | def forward(self, src, src_sents=None, lengths=None):
"forward_original"
#print ('Original!')
self._check_args(src, lengths)
emb = self.embeddings(src)
s_len, batch, emb_dim = emb.size() # (185 16 128), s_len is changeable.
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Tensor.
lengths = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths)
memory_bank, encoder_final = self.rnn(packed_emb) # output, (hidden, cell), unpack using pad_packed_sequence(), encoder_final is the last state, a list (contains the batch)
# encoder_final size: a list, len is the batch size; for each item, size [2, 2, 256]
# memory_bank is the output
# output, (hidden, cell), unpack using pad_packed_sequence()
# self.rnn is a LSTM(128, 256, bidirectional=True) # input dim; output dim;
# print ('forwarding... src_sents',src_sents)
# get sentence embedding
sent_output = self.build_sentence_layer(memory_bank,src_sents)
# sent_output size: torch.Size([9, 2, 512])
# print ('We need...!!!',src_sents.size(),src_sents)
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
# memory_bank size torch.Size([200, 2, 512])
# encoder_final: a tuple of 2 (batch size)
# each of it has the size of torch.Size([2, 2, 256])
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
# encoder_final same shape as before
return encoder_final, memory_bank, sent_output | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"src_sents",
"=",
"None",
",",
"lengths",
"=",
"None",
")",
":",
"#print ('Original!')",
"self",
".",
"_check_args",
"(",
"src",
",",
"lengths",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"src",
")",
"s_len",
",",
"batch",
",",
"emb_dim",
"=",
"emb",
".",
"size",
"(",
")",
"# (185 16 128), s_len is changeable.",
"packed_emb",
"=",
"emb",
"if",
"lengths",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"no_pack_padded_seq",
":",
"# Lengths data is wrapped inside a Tensor.",
"lengths",
"=",
"lengths",
".",
"view",
"(",
"-",
"1",
")",
".",
"tolist",
"(",
")",
"packed_emb",
"=",
"pack",
"(",
"emb",
",",
"lengths",
")",
"memory_bank",
",",
"encoder_final",
"=",
"self",
".",
"rnn",
"(",
"packed_emb",
")",
"# output, (hidden, cell), unpack using pad_packed_sequence(), encoder_final is the last state, a list (contains the batch)",
"# encoder_final size: a list, len is the batch size; for each item, size [2, 2, 256]",
"# memory_bank is the output",
"# output, (hidden, cell), unpack using pad_packed_sequence()",
"# self.rnn is a LSTM(128, 256, bidirectional=True) # input dim; output dim;",
"# print ('forwarding... src_sents',src_sents)",
"# get sentence embedding",
"sent_output",
"=",
"self",
".",
"build_sentence_layer",
"(",
"memory_bank",
",",
"src_sents",
")",
"# sent_output size: torch.Size([9, 2, 512])",
"# print ('We need...!!!',src_sents.size(),src_sents)",
"if",
"lengths",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"no_pack_padded_seq",
":",
"memory_bank",
"=",
"unpack",
"(",
"memory_bank",
")",
"[",
"0",
"]",
"# memory_bank size torch.Size([200, 2, 512])",
"# encoder_final: a tuple of 2 (batch size)",
"# each of it has the size of torch.Size([2, 2, 256])",
"if",
"self",
".",
"use_bridge",
":",
"encoder_final",
"=",
"self",
".",
"_bridge",
"(",
"encoder_final",
")",
"# encoder_final same shape as before",
"return",
"encoder_final",
",",
"memory_bank",
",",
"sent_output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/rnn_encoder.py#L300-L348 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/rnn_encoder.py | python | RNNEncoder._bridge | (self, hidden) | return outs | Forward hidden state through bridge | Forward hidden state through bridge | [
"Forward",
"hidden",
"state",
"through",
"bridge"
] | def _bridge(self, hidden):
"""
Forward hidden state through bridge
"""
def bottle_hidden(linear, states):
"""
Transform from 3D to 2D, apply linear and return initial size
"""
size = states.size()
result = linear(states.view(-1, self.total_hidden_dim))
return F.relu(result).view(size)
if isinstance(hidden, tuple): # LSTM
outs = tuple([bottle_hidden(layer, hidden[ix])
for ix, layer in enumerate(self.bridge)])
else:
outs = bottle_hidden(self.bridge[0], hidden)
return outs | [
"def",
"_bridge",
"(",
"self",
",",
"hidden",
")",
":",
"def",
"bottle_hidden",
"(",
"linear",
",",
"states",
")",
":",
"\"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"",
"size",
"=",
"states",
".",
"size",
"(",
")",
"result",
"=",
"linear",
"(",
"states",
".",
"view",
"(",
"-",
"1",
",",
"self",
".",
"total_hidden_dim",
")",
")",
"return",
"F",
".",
"relu",
"(",
"result",
")",
".",
"view",
"(",
"size",
")",
"if",
"isinstance",
"(",
"hidden",
",",
"tuple",
")",
":",
"# LSTM",
"outs",
"=",
"tuple",
"(",
"[",
"bottle_hidden",
"(",
"layer",
",",
"hidden",
"[",
"ix",
"]",
")",
"for",
"ix",
",",
"layer",
"in",
"enumerate",
"(",
"self",
".",
"bridge",
")",
"]",
")",
"else",
":",
"outs",
"=",
"bottle_hidden",
"(",
"self",
".",
"bridge",
"[",
"0",
"]",
",",
"hidden",
")",
"return",
"outs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/rnn_encoder.py#L365-L382 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/image_encoder.py | python | ImageEncoder.load_pretrained_vectors | (self, opt) | Pass in needed options only when modify function definition. | Pass in needed options only when modify function definition. | [
"Pass",
"in",
"needed",
"options",
"only",
"when",
"modify",
"function",
"definition",
"."
] | def load_pretrained_vectors(self, opt):
""" Pass in needed options only when modify function definition."""
pass | [
"def",
"load_pretrained_vectors",
"(",
"self",
",",
"opt",
")",
":",
"pass"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/image_encoder.py#L50-L52 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/image_encoder.py | python | ImageEncoder.forward | (self, src, lengths=None) | return hidden_t, out | See :obj:`onmt.encoders.encoder.EncoderBase.forward()` | See :obj:`onmt.encoders.encoder.EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"encoders",
".",
"encoder",
".",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`"
batch_size = src.size(0)
# (batch_size, 64, imgH, imgW)
# layer 1
src = F.relu(self.layer1(src[:, :, :, :] - 0.5), True)
# (batch_size, 64, imgH/2, imgW/2)
src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 128, imgH/2, imgW/2)
# layer 2
src = F.relu(self.layer2(src), True)
# (batch_size, 128, imgH/2/2, imgW/2/2)
src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer 3
# batch norm 1
src = F.relu(self.batch_norm1(self.layer3(src)), True)
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer4
src = F.relu(self.layer4(src), True)
# (batch_size, 256, imgH/2/2/2, imgW/2/2)
src = F.max_pool2d(src, kernel_size=(1, 2), stride=(1, 2))
# (batch_size, 512, imgH/2/2/2, imgW/2/2)
# layer 5
# batch norm 2
src = F.relu(self.batch_norm2(self.layer5(src)), True)
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
src = F.max_pool2d(src, kernel_size=(2, 1), stride=(2, 1))
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
src = F.relu(self.batch_norm3(self.layer6(src)), True)
# # (batch_size, 512, H, W)
all_outputs = []
for row in range(src.size(2)):
inp = src[:, :, row, :].transpose(0, 2) \
.transpose(1, 2)
row_vec = torch.Tensor(batch_size).type_as(inp.data) \
.long().fill_(row)
pos_emb = self.pos_lut(row_vec)
with_pos = torch.cat(
(pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
outputs, hidden_t = self.rnn(with_pos)
all_outputs.append(outputs)
out = torch.cat(all_outputs, 0)
return hidden_t, out | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"batch_size",
"=",
"src",
".",
"size",
"(",
"0",
")",
"# (batch_size, 64, imgH, imgW)",
"# layer 1",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"layer1",
"(",
"src",
"[",
":",
",",
":",
",",
":",
",",
":",
"]",
"-",
"0.5",
")",
",",
"True",
")",
"# (batch_size, 64, imgH/2, imgW/2)",
"src",
"=",
"F",
".",
"max_pool2d",
"(",
"src",
",",
"kernel_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
")",
"# (batch_size, 128, imgH/2, imgW/2)",
"# layer 2",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"layer2",
"(",
"src",
")",
",",
"True",
")",
"# (batch_size, 128, imgH/2/2, imgW/2/2)",
"src",
"=",
"F",
".",
"max_pool2d",
"(",
"src",
",",
"kernel_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
")",
"# (batch_size, 256, imgH/2/2, imgW/2/2)",
"# layer 3",
"# batch norm 1",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"batch_norm1",
"(",
"self",
".",
"layer3",
"(",
"src",
")",
")",
",",
"True",
")",
"# (batch_size, 256, imgH/2/2, imgW/2/2)",
"# layer4",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"layer4",
"(",
"src",
")",
",",
"True",
")",
"# (batch_size, 256, imgH/2/2/2, imgW/2/2)",
"src",
"=",
"F",
".",
"max_pool2d",
"(",
"src",
",",
"kernel_size",
"=",
"(",
"1",
",",
"2",
")",
",",
"stride",
"=",
"(",
"1",
",",
"2",
")",
")",
"# (batch_size, 512, imgH/2/2/2, imgW/2/2)",
"# layer 5",
"# batch norm 2",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"batch_norm2",
"(",
"self",
".",
"layer5",
"(",
"src",
")",
")",
",",
"True",
")",
"# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)",
"src",
"=",
"F",
".",
"max_pool2d",
"(",
"src",
",",
"kernel_size",
"=",
"(",
"2",
",",
"1",
")",
",",
"stride",
"=",
"(",
"2",
",",
"1",
")",
")",
"# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"batch_norm3",
"(",
"self",
".",
"layer6",
"(",
"src",
")",
")",
",",
"True",
")",
"# # (batch_size, 512, H, W)",
"all_outputs",
"=",
"[",
"]",
"for",
"row",
"in",
"range",
"(",
"src",
".",
"size",
"(",
"2",
")",
")",
":",
"inp",
"=",
"src",
"[",
":",
",",
":",
",",
"row",
",",
":",
"]",
".",
"transpose",
"(",
"0",
",",
"2",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"row_vec",
"=",
"torch",
".",
"Tensor",
"(",
"batch_size",
")",
".",
"type_as",
"(",
"inp",
".",
"data",
")",
".",
"long",
"(",
")",
".",
"fill_",
"(",
"row",
")",
"pos_emb",
"=",
"self",
".",
"pos_lut",
"(",
"row_vec",
")",
"with_pos",
"=",
"torch",
".",
"cat",
"(",
"(",
"pos_emb",
".",
"view",
"(",
"1",
",",
"pos_emb",
".",
"size",
"(",
"0",
")",
",",
"pos_emb",
".",
"size",
"(",
"1",
")",
")",
",",
"inp",
")",
",",
"0",
")",
"outputs",
",",
"hidden_t",
"=",
"self",
".",
"rnn",
"(",
"with_pos",
")",
"all_outputs",
".",
"append",
"(",
"outputs",
")",
"out",
"=",
"torch",
".",
"cat",
"(",
"all_outputs",
",",
"0",
")",
"return",
"hidden_t",
",",
"out"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/image_encoder.py#L54-L109 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/encoders/cnn_encoder.py | python | CNNEncoder.forward | (self, input, lengths=None, hidden=None) | return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \
out.squeeze(3).transpose(0, 1).contiguous() | See :obj:`onmt.modules.EncoderBase.forward()` | See :obj:`onmt.modules.EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"modules",
".",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, input, lengths=None, hidden=None):
""" See :obj:`onmt.modules.EncoderBase.forward()`"""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
# s_len, batch, emb_dim = emb.size()
emb = emb.transpose(0, 1).contiguous()
emb_reshape = emb.view(emb.size(0) * emb.size(1), -1)
emb_remap = self.linear(emb_reshape)
emb_remap = emb_remap.view(emb.size(0), emb.size(1), -1)
emb_remap = shape_transform(emb_remap)
out = self.cnn(emb_remap)
return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \
out.squeeze(3).transpose(0, 1).contiguous() | [
"def",
"forward",
"(",
"self",
",",
"input",
",",
"lengths",
"=",
"None",
",",
"hidden",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"input",
",",
"lengths",
",",
"hidden",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"input",
")",
"# s_len, batch, emb_dim = emb.size()",
"emb",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"emb_reshape",
"=",
"emb",
".",
"view",
"(",
"emb",
".",
"size",
"(",
"0",
")",
"*",
"emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"emb_remap",
"=",
"self",
".",
"linear",
"(",
"emb_reshape",
")",
"emb_remap",
"=",
"emb_remap",
".",
"view",
"(",
"emb",
".",
"size",
"(",
"0",
")",
",",
"emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"emb_remap",
"=",
"shape_transform",
"(",
"emb_remap",
")",
"out",
"=",
"self",
".",
"cnn",
"(",
"emb_remap",
")",
"return",
"emb_remap",
".",
"squeeze",
"(",
"3",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
",",
"out",
".",
"squeeze",
"(",
"3",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/encoders/cnn_encoder.py#L28-L43 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/models/sru.py | python | check_sru_requirement | (abort=False) | return True | Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False. | Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False. | [
"Return",
"True",
"if",
"check",
"pass",
";",
"if",
"check",
"fails",
"and",
"abort",
"is",
"True",
"raise",
"an",
"Exception",
"othereise",
"return",
"False",
"."
] | def check_sru_requirement(abort=False):
"""
Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False.
"""
# Check 1.
try:
if platform.system() == 'Windows':
subprocess.check_output('pip freeze | findstr cupy', shell=True)
subprocess.check_output('pip freeze | findstr pynvrtc',
shell=True)
else: # Unix-like systems
subprocess.check_output('pip freeze | grep -w cupy', shell=True)
subprocess.check_output('pip freeze | grep -w pynvrtc',
shell=True)
except subprocess.CalledProcessError:
if not abort:
return False
raise AssertionError("Using SRU requires 'cupy' and 'pynvrtc' "
"python packages installed.")
# Check 2.
if torch.cuda.is_available() is False:
if not abort:
return False
raise AssertionError("Using SRU requires pytorch built with cuda.")
# Check 3.
pattern = re.compile(".*cuda/lib.*")
ld_path = os.getenv('LD_LIBRARY_PATH', "")
if re.match(pattern, ld_path) is None:
if not abort:
return False
raise AssertionError("Using SRU requires setting cuda lib path, e.g. "
"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.")
return True | [
"def",
"check_sru_requirement",
"(",
"abort",
"=",
"False",
")",
":",
"# Check 1.",
"try",
":",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"subprocess",
".",
"check_output",
"(",
"'pip freeze | findstr cupy'",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"check_output",
"(",
"'pip freeze | findstr pynvrtc'",
",",
"shell",
"=",
"True",
")",
"else",
":",
"# Unix-like systems",
"subprocess",
".",
"check_output",
"(",
"'pip freeze | grep -w cupy'",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"check_output",
"(",
"'pip freeze | grep -w pynvrtc'",
",",
"shell",
"=",
"True",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"if",
"not",
"abort",
":",
"return",
"False",
"raise",
"AssertionError",
"(",
"\"Using SRU requires 'cupy' and 'pynvrtc' \"",
"\"python packages installed.\"",
")",
"# Check 2.",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
"is",
"False",
":",
"if",
"not",
"abort",
":",
"return",
"False",
"raise",
"AssertionError",
"(",
"\"Using SRU requires pytorch built with cuda.\"",
")",
"# Check 3.",
"pattern",
"=",
"re",
".",
"compile",
"(",
"\".*cuda/lib.*\"",
")",
"ld_path",
"=",
"os",
".",
"getenv",
"(",
"'LD_LIBRARY_PATH'",
",",
"\"\"",
")",
"if",
"re",
".",
"match",
"(",
"pattern",
",",
"ld_path",
")",
"is",
"None",
":",
"if",
"not",
"abort",
":",
"return",
"False",
"raise",
"AssertionError",
"(",
"\"Using SRU requires setting cuda lib path, e.g. \"",
"\"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.\"",
")",
"return",
"True"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/models/sru.py#L32-L69 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/models/model.py | python | NMTModel.forward | (self, src, tgt, src_sents, lengths, dec_state=None) | return decoder_outputs, attns, dec_state | Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (:obj:`Tensor`):
a source sequence passed to encoder.
typically for inputs this will be a padded :obj:`LongTensor`
of size `[len x batch x features]`. however, may be an
image or other generic input depending on encoder.
tgt (:obj:`LongTensor`):
a target sequence of size `[tgt_len x batch]`.
lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.
dec_state (:obj:`DecoderState`, optional): initial decoder state
Returns:
(:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):
* decoder output `[tgt_len x batch x hidden]`
* dictionary attention dists of `[tgt_len x batch x src_len]`
* final decoder state | Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state. | [
"Forward",
"propagate",
"a",
"src",
"and",
"tgt",
"pair",
"for",
"training",
".",
"Possible",
"initialized",
"with",
"a",
"beginning",
"decoder",
"state",
"."
] | def forward(self, src, tgt, src_sents, lengths, dec_state=None):
"""Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (:obj:`Tensor`):
a source sequence passed to encoder.
typically for inputs this will be a padded :obj:`LongTensor`
of size `[len x batch x features]`. however, may be an
image or other generic input depending on encoder.
tgt (:obj:`LongTensor`):
a target sequence of size `[tgt_len x batch]`.
lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.
dec_state (:obj:`DecoderState`, optional): initial decoder state
Returns:
(:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):
* decoder output `[tgt_len x batch x hidden]`
* dictionary attention dists of `[tgt_len x batch x src_len]`
* final decoder state
"""
tgt = tgt[:-1] # exclude last target from inputs ?? why
# import pdb;pdb.set_trace()
old_src_sents = src_sents.clone()
enc_final, memory_bank, sent_encoder = self.encoder(src,src_sents,lengths)
enc_state =self.decoder.init_decoder_state(src, memory_bank, enc_final)
decoder_outputs, dec_state, attns = \
self.decoder(tgt, memory_bank,
enc_state if dec_state is None
else dec_state,sent_encoder=sent_encoder,src_sents=old_src_sents,
memory_lengths=lengths)
if self.multigpu:
# Not yet supported on multi-gpu
dec_state = None
attns = None
return decoder_outputs, attns, dec_state | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"tgt",
",",
"src_sents",
",",
"lengths",
",",
"dec_state",
"=",
"None",
")",
":",
"tgt",
"=",
"tgt",
"[",
":",
"-",
"1",
"]",
"# exclude last target from inputs ?? why",
"# import pdb;pdb.set_trace()",
"old_src_sents",
"=",
"src_sents",
".",
"clone",
"(",
")",
"enc_final",
",",
"memory_bank",
",",
"sent_encoder",
"=",
"self",
".",
"encoder",
"(",
"src",
",",
"src_sents",
",",
"lengths",
")",
"enc_state",
"=",
"self",
".",
"decoder",
".",
"init_decoder_state",
"(",
"src",
",",
"memory_bank",
",",
"enc_final",
")",
"decoder_outputs",
",",
"dec_state",
",",
"attns",
"=",
"self",
".",
"decoder",
"(",
"tgt",
",",
"memory_bank",
",",
"enc_state",
"if",
"dec_state",
"is",
"None",
"else",
"dec_state",
",",
"sent_encoder",
"=",
"sent_encoder",
",",
"src_sents",
"=",
"old_src_sents",
",",
"memory_lengths",
"=",
"lengths",
")",
"if",
"self",
".",
"multigpu",
":",
"# Not yet supported on multi-gpu",
"dec_state",
"=",
"None",
"attns",
"=",
"None",
"return",
"decoder_outputs",
",",
"attns",
",",
"dec_state"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/models/model.py#L22-L65 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/models/model_saver.py | python | ModelSaverBase.maybe_save | (self, step) | Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic | Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic | [
"Main",
"entry",
"point",
"for",
"model",
"saver",
"It",
"wraps",
"the",
"_save",
"method",
"with",
"checks",
"and",
"apply",
"keep_checkpoint",
"related",
"logic"
] | def maybe_save(self, step):
"""
Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic
"""
if self.keep_checkpoint == 0:
return
if step % self.save_checkpoint_steps != 0:
return
chkpt, chkpt_name = self._save(step)
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name) | [
"def",
"maybe_save",
"(",
"self",
",",
"step",
")",
":",
"if",
"self",
".",
"keep_checkpoint",
"==",
"0",
":",
"return",
"if",
"step",
"%",
"self",
".",
"save_checkpoint_steps",
"!=",
"0",
":",
"return",
"chkpt",
",",
"chkpt_name",
"=",
"self",
".",
"_save",
"(",
"step",
")",
"if",
"self",
".",
"keep_checkpoint",
">",
"0",
":",
"if",
"len",
"(",
"self",
".",
"checkpoint_queue",
")",
"==",
"self",
".",
"checkpoint_queue",
".",
"maxlen",
":",
"todel",
"=",
"self",
".",
"checkpoint_queue",
".",
"popleft",
"(",
")",
"self",
".",
"_rm_checkpoint",
"(",
"todel",
")",
"self",
".",
"checkpoint_queue",
".",
"append",
"(",
"chkpt_name",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/models/model_saver.py#L43-L61 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/models/model_saver.py | python | ModelSaverBase._save | (self, step) | Save a resumable checkpoint.
Args:
step (int): step number
Returns:
checkpoint: the saved object
checkpoint_name: name (or path) of the saved checkpoint | Save a resumable checkpoint. | [
"Save",
"a",
"resumable",
"checkpoint",
"."
] | def _save(self, step):
""" Save a resumable checkpoint.
Args:
step (int): step number
Returns:
checkpoint: the saved object
checkpoint_name: name (or path) of the saved checkpoint
"""
raise NotImplementedError() | [
"def",
"_save",
"(",
"self",
",",
"step",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/models/model_saver.py#L63-L73 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/models/model_saver.py | python | ModelSaverBase._rm_checkpoint | (self, name) | Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath) | Remove a checkpoint | [
"Remove",
"a",
"checkpoint"
] | def _rm_checkpoint(self, name):
"""
Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath)
"""
raise NotImplementedError() | [
"def",
"_rm_checkpoint",
"(",
"self",
",",
"name",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/models/model_saver.py#L75-L83 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/cnn_factory.py | python | shape_transform | (x) | return torch.unsqueeze(torch.transpose(x, 1, 2), 3) | Tranform the size of the tensors to fit for conv input. | Tranform the size of the tensors to fit for conv input. | [
"Tranform",
"the",
"size",
"of",
"the",
"tensors",
"to",
"fit",
"for",
"conv",
"input",
"."
] | def shape_transform(x):
""" Tranform the size of the tensors to fit for conv input. """
return torch.unsqueeze(torch.transpose(x, 1, 2), 3) | [
"def",
"shape_transform",
"(",
"x",
")",
":",
"return",
"torch",
".",
"unsqueeze",
"(",
"torch",
".",
"transpose",
"(",
"x",
",",
"1",
",",
"2",
")",
",",
"3",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/cnn_factory.py#L14-L16 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | build_optim | (model, opt, checkpoint) | return optim | Build optimizer | Build optimizer | [
"Build",
"optimizer"
] | def build_optim(model, opt, checkpoint):
""" Build optimizer """
saved_optimizer_state_dict = None
if opt.train_from:
optim = checkpoint['optim']
# We need to save a copy of optim.optimizer.state_dict() for setting
# the, optimizer state later on in Stage 2 in this method, since
# the method optim.set_parameters(model.parameters()) will overwrite
# optim.optimizer, and with ith the values stored in
# optim.optimizer.state_dict()
saved_optimizer_state_dict = optim.optimizer.state_dict()
else:
optim = Optimizer(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_steps=opt.start_decay_steps,
decay_steps=opt.decay_steps,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
decay_method=opt.decay_method,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
# Stage 1:
# Essentially optim.set_parameters (re-)creates and optimizer using
# model.paramters() as parameters that will be stored in the
# optim.optimizer.param_groups field of the torch optimizer class.
# Importantly, this method does not yet load the optimizer state, as
# essentially it builds a new optimizer with empty optimizer state and
# parameters from the model.
optim.set_parameters(model.named_parameters())
if opt.train_from:
# Stage 2: In this stage, which is only performed when loading an
# optimizer from a checkpoint, we load the saved_optimizer_state_dict
# into the re-created optimizer, to set the optim.optimizer.state
# field, which was previously empty. For this, we use the optimizer
# state saved in the "saved_optimizer_state_dict" variable for
# this purpose.
# See also: https://github.com/pytorch/pytorch/issues/2830
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
# Convert back the state values to cuda type if applicable
if use_gpu(opt):
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
# We want to make sure that indeed we have a non-empty optimizer state
# when we loaded an existing model. This should be at least the case
# for Adam, which saves "exp_avg" and "exp_avg_sq" state
# (Exponential moving average of gradient and squared gradient values)
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
return optim | [
"def",
"build_optim",
"(",
"model",
",",
"opt",
",",
"checkpoint",
")",
":",
"saved_optimizer_state_dict",
"=",
"None",
"if",
"opt",
".",
"train_from",
":",
"optim",
"=",
"checkpoint",
"[",
"'optim'",
"]",
"# We need to save a copy of optim.optimizer.state_dict() for setting",
"# the, optimizer state later on in Stage 2 in this method, since",
"# the method optim.set_parameters(model.parameters()) will overwrite",
"# optim.optimizer, and with ith the values stored in",
"# optim.optimizer.state_dict()",
"saved_optimizer_state_dict",
"=",
"optim",
".",
"optimizer",
".",
"state_dict",
"(",
")",
"else",
":",
"optim",
"=",
"Optimizer",
"(",
"opt",
".",
"optim",
",",
"opt",
".",
"learning_rate",
",",
"opt",
".",
"max_grad_norm",
",",
"lr_decay",
"=",
"opt",
".",
"learning_rate_decay",
",",
"start_decay_steps",
"=",
"opt",
".",
"start_decay_steps",
",",
"decay_steps",
"=",
"opt",
".",
"decay_steps",
",",
"beta1",
"=",
"opt",
".",
"adam_beta1",
",",
"beta2",
"=",
"opt",
".",
"adam_beta2",
",",
"adagrad_accum",
"=",
"opt",
".",
"adagrad_accumulator_init",
",",
"decay_method",
"=",
"opt",
".",
"decay_method",
",",
"warmup_steps",
"=",
"opt",
".",
"warmup_steps",
",",
"model_size",
"=",
"opt",
".",
"rnn_size",
")",
"# Stage 1:",
"# Essentially optim.set_parameters (re-)creates and optimizer using",
"# model.paramters() as parameters that will be stored in the",
"# optim.optimizer.param_groups field of the torch optimizer class.",
"# Importantly, this method does not yet load the optimizer state, as",
"# essentially it builds a new optimizer with empty optimizer state and",
"# parameters from the model.",
"optim",
".",
"set_parameters",
"(",
"model",
".",
"named_parameters",
"(",
")",
")",
"if",
"opt",
".",
"train_from",
":",
"# Stage 2: In this stage, which is only performed when loading an",
"# optimizer from a checkpoint, we load the saved_optimizer_state_dict",
"# into the re-created optimizer, to set the optim.optimizer.state",
"# field, which was previously empty. For this, we use the optimizer",
"# state saved in the \"saved_optimizer_state_dict\" variable for",
"# this purpose.",
"# See also: https://github.com/pytorch/pytorch/issues/2830",
"optim",
".",
"optimizer",
".",
"load_state_dict",
"(",
"saved_optimizer_state_dict",
")",
"# Convert back the state values to cuda type if applicable",
"if",
"use_gpu",
"(",
"opt",
")",
":",
"for",
"state",
"in",
"optim",
".",
"optimizer",
".",
"state",
".",
"values",
"(",
")",
":",
"for",
"k",
",",
"v",
"in",
"state",
".",
"items",
"(",
")",
":",
"if",
"torch",
".",
"is_tensor",
"(",
"v",
")",
":",
"state",
"[",
"k",
"]",
"=",
"v",
".",
"cuda",
"(",
")",
"# We want to make sure that indeed we have a non-empty optimizer state",
"# when we loaded an existing model. This should be at least the case",
"# for Adam, which saves \"exp_avg\" and \"exp_avg_sq\" state",
"# (Exponential moving average of gradient and squared gradient values)",
"if",
"(",
"optim",
".",
"method",
"==",
"'adam'",
")",
"and",
"(",
"len",
"(",
"optim",
".",
"optimizer",
".",
"state",
")",
"<",
"1",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Error: loaded Adam optimizer from existing model\"",
"+",
"\" but optimizer state is empty\"",
")",
"return",
"optim"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L9-L68 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | MultipleOptimizer.__init__ | (self, op) | ? | ? | [
"?"
] | def __init__(self, op):
""" ? """
self.optimizers = op | [
"def",
"__init__",
"(",
"self",
",",
"op",
")",
":",
"self",
".",
"optimizers",
"=",
"op"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L74-L76 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | MultipleOptimizer.zero_grad | (self) | ? | ? | [
"?"
] | def zero_grad(self):
""" ? """
for op in self.optimizers:
op.zero_grad() | [
"def",
"zero_grad",
"(",
"self",
")",
":",
"for",
"op",
"in",
"self",
".",
"optimizers",
":",
"op",
".",
"zero_grad",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L78-L81 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | MultipleOptimizer.step | (self) | ? | ? | [
"?"
] | def step(self):
""" ? """
for op in self.optimizers:
op.step() | [
"def",
"step",
"(",
"self",
")",
":",
"for",
"op",
"in",
"self",
".",
"optimizers",
":",
"op",
".",
"step",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L83-L86 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | MultipleOptimizer.state | (self) | return {k: v for op in self.optimizers for k, v in op.state.items()} | ? | ? | [
"?"
] | def state(self):
""" ? """
return {k: v for op in self.optimizers for k, v in op.state.items()} | [
"def",
"state",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"op",
"in",
"self",
".",
"optimizers",
"for",
"k",
",",
"v",
"in",
"op",
".",
"state",
".",
"items",
"(",
")",
"}"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L89-L91 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | MultipleOptimizer.state_dict | (self) | return [op.state_dict() for op in self.optimizers] | ? | ? | [
"?"
] | def state_dict(self):
""" ? """
return [op.state_dict() for op in self.optimizers] | [
"def",
"state_dict",
"(",
"self",
")",
":",
"return",
"[",
"op",
".",
"state_dict",
"(",
")",
"for",
"op",
"in",
"self",
".",
"optimizers",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L93-L95 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | MultipleOptimizer.load_state_dict | (self, state_dicts) | ? | ? | [
"?"
] | def load_state_dict(self, state_dicts):
""" ? """
assert len(state_dicts) == len(self.optimizers)
for i in range(len(state_dicts)):
self.optimizers[i].load_state_dict(state_dicts[i]) | [
"def",
"load_state_dict",
"(",
"self",
",",
"state_dicts",
")",
":",
"assert",
"len",
"(",
"state_dicts",
")",
"==",
"len",
"(",
"self",
".",
"optimizers",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"state_dicts",
")",
")",
":",
"self",
".",
"optimizers",
"[",
"i",
"]",
".",
"load_state_dict",
"(",
"state_dicts",
"[",
"i",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L97-L101 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | Optimizer.set_parameters | (self, params) | ? | ? | [
"?"
] | def set_parameters(self, params):
""" ? """
self.params = []
self.sparse_params = []
for k, p in params:
if p.requires_grad:
if self.method != 'sparseadam' or "embed" not in k:
self.params.append(p)
else:
self.sparse_params.append(p)
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.learning_rate)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.learning_rate)
for group in self.optimizer.param_groups:
for p in group['params']:
self.optimizer.state[p]['sum'] = self.optimizer\
.state[p]['sum'].fill_(self.adagrad_accum)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.learning_rate)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.learning_rate,
betas=self.betas, eps=1e-9)
elif self.method == 'sparseadam':
self.optimizer = MultipleOptimizer(
[optim.Adam(self.params, lr=self.learning_rate,
betas=self.betas, eps=1e-8),
optim.SparseAdam(self.sparse_params, lr=self.learning_rate,
betas=self.betas, eps=1e-8)])
else:
raise RuntimeError("Invalid optim method: " + self.method) | [
"def",
"set_parameters",
"(",
"self",
",",
"params",
")",
":",
"self",
".",
"params",
"=",
"[",
"]",
"self",
".",
"sparse_params",
"=",
"[",
"]",
"for",
"k",
",",
"p",
"in",
"params",
":",
"if",
"p",
".",
"requires_grad",
":",
"if",
"self",
".",
"method",
"!=",
"'sparseadam'",
"or",
"\"embed\"",
"not",
"in",
"k",
":",
"self",
".",
"params",
".",
"append",
"(",
"p",
")",
"else",
":",
"self",
".",
"sparse_params",
".",
"append",
"(",
"p",
")",
"if",
"self",
".",
"method",
"==",
"'sgd'",
":",
"self",
".",
"optimizer",
"=",
"optim",
".",
"SGD",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
")",
"elif",
"self",
".",
"method",
"==",
"'adagrad'",
":",
"self",
".",
"optimizer",
"=",
"optim",
".",
"Adagrad",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
")",
"for",
"group",
"in",
"self",
".",
"optimizer",
".",
"param_groups",
":",
"for",
"p",
"in",
"group",
"[",
"'params'",
"]",
":",
"self",
".",
"optimizer",
".",
"state",
"[",
"p",
"]",
"[",
"'sum'",
"]",
"=",
"self",
".",
"optimizer",
".",
"state",
"[",
"p",
"]",
"[",
"'sum'",
"]",
".",
"fill_",
"(",
"self",
".",
"adagrad_accum",
")",
"elif",
"self",
".",
"method",
"==",
"'adadelta'",
":",
"self",
".",
"optimizer",
"=",
"optim",
".",
"Adadelta",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
")",
"elif",
"self",
".",
"method",
"==",
"'adam'",
":",
"self",
".",
"optimizer",
"=",
"optim",
".",
"Adam",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
",",
"betas",
"=",
"self",
".",
"betas",
",",
"eps",
"=",
"1e-9",
")",
"elif",
"self",
".",
"method",
"==",
"'sparseadam'",
":",
"self",
".",
"optimizer",
"=",
"MultipleOptimizer",
"(",
"[",
"optim",
".",
"Adam",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
",",
"betas",
"=",
"self",
".",
"betas",
",",
"eps",
"=",
"1e-8",
")",
",",
"optim",
".",
"SparseAdam",
"(",
"self",
".",
"sparse_params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
",",
"betas",
"=",
"self",
".",
"betas",
",",
"eps",
"=",
"1e-8",
")",
"]",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Invalid optim method: \"",
"+",
"self",
".",
"method",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L158-L188 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/optimizers.py | python | Optimizer.step | (self) | Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate. | Update the model parameters based on current gradients. | [
"Update",
"the",
"model",
"parameters",
"based",
"on",
"current",
"gradients",
"."
] | def step(self):
"""Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate.
"""
self._step += 1
# Decay method used in tensor2tensor.
if self.decay_method == "noam":
self._set_rate(
self.original_lr *
(self.model_size ** (-0.5) *
min(self._step ** (-0.5),
self._step * self.warmup_steps**(-1.5))))
# Decay based on start_decay_steps every decay_steps
else:
if ((self.start_decay_steps is not None) and (
self._step >= self.start_decay_steps)):
self.start_decay = True
if self.start_decay:
if ((self._step - self.start_decay_steps)
% self.decay_steps == 0):
self.learning_rate = self.learning_rate * self.lr_decay
if self.method != 'sparseadam':
self.optimizer.param_groups[0]['lr'] = self.learning_rate
if self.max_grad_norm:
clip_grad_norm_(self.params, self.max_grad_norm)
self.optimizer.step() | [
"def",
"step",
"(",
"self",
")",
":",
"self",
".",
"_step",
"+=",
"1",
"# Decay method used in tensor2tensor.",
"if",
"self",
".",
"decay_method",
"==",
"\"noam\"",
":",
"self",
".",
"_set_rate",
"(",
"self",
".",
"original_lr",
"*",
"(",
"self",
".",
"model_size",
"**",
"(",
"-",
"0.5",
")",
"*",
"min",
"(",
"self",
".",
"_step",
"**",
"(",
"-",
"0.5",
")",
",",
"self",
".",
"_step",
"*",
"self",
".",
"warmup_steps",
"**",
"(",
"-",
"1.5",
")",
")",
")",
")",
"# Decay based on start_decay_steps every decay_steps",
"else",
":",
"if",
"(",
"(",
"self",
".",
"start_decay_steps",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"_step",
">=",
"self",
".",
"start_decay_steps",
")",
")",
":",
"self",
".",
"start_decay",
"=",
"True",
"if",
"self",
".",
"start_decay",
":",
"if",
"(",
"(",
"self",
".",
"_step",
"-",
"self",
".",
"start_decay_steps",
")",
"%",
"self",
".",
"decay_steps",
"==",
"0",
")",
":",
"self",
".",
"learning_rate",
"=",
"self",
".",
"learning_rate",
"*",
"self",
".",
"lr_decay",
"if",
"self",
".",
"method",
"!=",
"'sparseadam'",
":",
"self",
".",
"optimizer",
".",
"param_groups",
"[",
"0",
"]",
"[",
"'lr'",
"]",
"=",
"self",
".",
"learning_rate",
"if",
"self",
".",
"max_grad_norm",
":",
"clip_grad_norm_",
"(",
"self",
".",
"params",
",",
"self",
".",
"max_grad_norm",
")",
"self",
".",
"optimizer",
".",
"step",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/optimizers.py#L198-L228 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.all_gather_stats | (stat, max_size=4096) | return stats[0] | Gather a `Statistics` object accross multiple process/nodes
Args:
stat(:obj:Statistics): the statistics object to gather
accross all processes/nodes
max_size(int): max buffer size to use
Returns:
`Statistics`, the update stats object | Gather a `Statistics` object accross multiple process/nodes | [
"Gather",
"a",
"Statistics",
"object",
"accross",
"multiple",
"process",
"/",
"nodes"
] | def all_gather_stats(stat, max_size=4096):
"""
Gather a `Statistics` object accross multiple process/nodes
Args:
stat(:obj:Statistics): the statistics object to gather
accross all processes/nodes
max_size(int): max buffer size to use
Returns:
`Statistics`, the update stats object
"""
stats = Statistics.all_gather_stats_list([stat], max_size=max_size)
return stats[0] | [
"def",
"all_gather_stats",
"(",
"stat",
",",
"max_size",
"=",
"4096",
")",
":",
"stats",
"=",
"Statistics",
".",
"all_gather_stats_list",
"(",
"[",
"stat",
"]",
",",
"max_size",
"=",
"max_size",
")",
"return",
"stats",
"[",
"0",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L30-L43 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.all_gather_stats_list | (stat_list, max_size=4096) | return our_stats | Gather a `Statistics` list accross all processes/nodes
Args:
stat_list(list([`Statistics`])): list of statistics objects to
gather accross all processes/nodes
max_size(int): max buffer size to use
Returns:
our_stats(list([`Statistics`])): list of updated stats | Gather a `Statistics` list accross all processes/nodes | [
"Gather",
"a",
"Statistics",
"list",
"accross",
"all",
"processes",
"/",
"nodes"
] | def all_gather_stats_list(stat_list, max_size=4096):
"""
Gather a `Statistics` list accross all processes/nodes
Args:
stat_list(list([`Statistics`])): list of statistics objects to
gather accross all processes/nodes
max_size(int): max buffer size to use
Returns:
our_stats(list([`Statistics`])): list of updated stats
"""
# Get a list of world_size lists with len(stat_list) Statistics objects
all_stats = all_gather_list(stat_list, max_size=max_size)
our_rank = get_rank()
our_stats = all_stats[our_rank]
for other_rank, stats in enumerate(all_stats):
if other_rank == our_rank:
continue
for i, stat in enumerate(stats):
our_stats[i].update(stat, update_n_src_words=True)
return our_stats | [
"def",
"all_gather_stats_list",
"(",
"stat_list",
",",
"max_size",
"=",
"4096",
")",
":",
"# Get a list of world_size lists with len(stat_list) Statistics objects",
"all_stats",
"=",
"all_gather_list",
"(",
"stat_list",
",",
"max_size",
"=",
"max_size",
")",
"our_rank",
"=",
"get_rank",
"(",
")",
"our_stats",
"=",
"all_stats",
"[",
"our_rank",
"]",
"for",
"other_rank",
",",
"stats",
"in",
"enumerate",
"(",
"all_stats",
")",
":",
"if",
"other_rank",
"==",
"our_rank",
":",
"continue",
"for",
"i",
",",
"stat",
"in",
"enumerate",
"(",
"stats",
")",
":",
"our_stats",
"[",
"i",
"]",
".",
"update",
"(",
"stat",
",",
"update_n_src_words",
"=",
"True",
")",
"return",
"our_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L46-L68 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.update | (self, stat, update_n_src_words=False) | Update statistics by suming values with another `Statistics` object
Args:
stat: another statistic object
update_n_src_words(bool): whether to update (sum) `n_src_words`
or not | Update statistics by suming values with another `Statistics` object | [
"Update",
"statistics",
"by",
"suming",
"values",
"with",
"another",
"Statistics",
"object"
] | def update(self, stat, update_n_src_words=False):
"""
Update statistics by suming values with another `Statistics` object
Args:
stat: another statistic object
update_n_src_words(bool): whether to update (sum) `n_src_words`
or not
"""
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
if update_n_src_words:
self.n_src_words += stat.n_src_words | [
"def",
"update",
"(",
"self",
",",
"stat",
",",
"update_n_src_words",
"=",
"False",
")",
":",
"self",
".",
"loss",
"+=",
"stat",
".",
"loss",
"self",
".",
"n_words",
"+=",
"stat",
".",
"n_words",
"self",
".",
"n_correct",
"+=",
"stat",
".",
"n_correct",
"if",
"update_n_src_words",
":",
"self",
".",
"n_src_words",
"+=",
"stat",
".",
"n_src_words"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L70-L85 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.accuracy | (self) | return 100 * (self.n_correct / self.n_words) | compute accuracy | compute accuracy | [
"compute",
"accuracy"
] | def accuracy(self):
""" compute accuracy """
return 100 * (self.n_correct / self.n_words) | [
"def",
"accuracy",
"(",
"self",
")",
":",
"return",
"100",
"*",
"(",
"self",
".",
"n_correct",
"/",
"self",
".",
"n_words",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L87-L89 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.xent | (self) | return self.loss / self.n_words | compute cross entropy | compute cross entropy | [
"compute",
"cross",
"entropy"
] | def xent(self):
""" compute cross entropy """
return self.loss / self.n_words | [
"def",
"xent",
"(",
"self",
")",
":",
"return",
"self",
".",
"loss",
"/",
"self",
".",
"n_words"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L91-L93 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.ppl | (self) | return math.exp(min(self.loss / self.n_words, 100)) | compute perplexity | compute perplexity | [
"compute",
"perplexity"
] | def ppl(self):
""" compute perplexity """
return math.exp(min(self.loss / self.n_words, 100)) | [
"def",
"ppl",
"(",
"self",
")",
":",
"return",
"math",
".",
"exp",
"(",
"min",
"(",
"self",
".",
"loss",
"/",
"self",
".",
"n_words",
",",
"100",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L95-L97 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.elapsed_time | (self) | return time.time() - self.start_time | compute elapsed time | compute elapsed time | [
"compute",
"elapsed",
"time"
] | def elapsed_time(self):
""" compute elapsed time """
return time.time() - self.start_time | [
"def",
"elapsed_time",
"(",
"self",
")",
":",
"return",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"start_time"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L99-L101 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.output | (self, step, num_steps, learning_rate, start) | Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step. | Write out statistics to stdout. | [
"Write",
"out",
"statistics",
"to",
"stdout",
"."
] | def output(self, step, num_steps, learning_rate, start):
"""Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step.
"""
t = self.elapsed_time()
logger.info(
("Step %2d/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; " +
"lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec")
% (step, num_steps,
self.accuracy(),
self.ppl(),
self.xent(),
learning_rate,
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush() | [
"def",
"output",
"(",
"self",
",",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"start",
")",
":",
"t",
"=",
"self",
".",
"elapsed_time",
"(",
")",
"logger",
".",
"info",
"(",
"(",
"\"Step %2d/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; \"",
"+",
"\"lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec\"",
")",
"%",
"(",
"step",
",",
"num_steps",
",",
"self",
".",
"accuracy",
"(",
")",
",",
"self",
".",
"ppl",
"(",
")",
",",
"self",
".",
"xent",
"(",
")",
",",
"learning_rate",
",",
"self",
".",
"n_src_words",
"/",
"(",
"t",
"+",
"1e-5",
")",
",",
"self",
".",
"n_words",
"/",
"(",
"t",
"+",
"1e-5",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L103-L123 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/statistics.py | python | Statistics.log_tensorboard | (self, prefix, writer, learning_rate, step) | display statistics to tensorboard | display statistics to tensorboard | [
"display",
"statistics",
"to",
"tensorboard"
] | def log_tensorboard(self, prefix, writer, learning_rate, step):
""" display statistics to tensorboard """
t = self.elapsed_time()
writer.add_scalar(prefix + "/xent", self.xent(), step)
writer.add_scalar(prefix + "/ppl", self.ppl(), step)
writer.add_scalar(prefix + "/accuracy", self.accuracy(), step)
writer.add_scalar(prefix + "/tgtper", self.n_words / t, step)
writer.add_scalar(prefix + "/lr", learning_rate, step) | [
"def",
"log_tensorboard",
"(",
"self",
",",
"prefix",
",",
"writer",
",",
"learning_rate",
",",
"step",
")",
":",
"t",
"=",
"self",
".",
"elapsed_time",
"(",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/xent\"",
",",
"self",
".",
"xent",
"(",
")",
",",
"step",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/ppl\"",
",",
"self",
".",
"ppl",
"(",
")",
",",
"step",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/accuracy\"",
",",
"self",
".",
"accuracy",
"(",
")",
",",
"step",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/tgtper\"",
",",
"self",
".",
"n_words",
"/",
"t",
",",
"step",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/lr\"",
",",
"learning_rate",
",",
"step",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/statistics.py#L125-L132 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/rnn_factory.py | python | rnn_factory | (rnn_type, **kwargs) | return rnn, no_pack_padded_seq | rnn factory, Use pytorch version when available. | rnn factory, Use pytorch version when available. | [
"rnn",
"factory",
"Use",
"pytorch",
"version",
"when",
"available",
"."
] | def rnn_factory(rnn_type, **kwargs):
""" rnn factory, Use pytorch version when available. """
no_pack_padded_seq = False
if rnn_type == "SRU":
# SRU doesn't support PackedSequence.
no_pack_padded_seq = True
rnn = onmt.models.sru.SRU(**kwargs)
else:
rnn = getattr(nn, rnn_type)(**kwargs)
return rnn, no_pack_padded_seq | [
"def",
"rnn_factory",
"(",
"rnn_type",
",",
"*",
"*",
"kwargs",
")",
":",
"no_pack_padded_seq",
"=",
"False",
"if",
"rnn_type",
"==",
"\"SRU\"",
":",
"# SRU doesn't support PackedSequence.",
"no_pack_padded_seq",
"=",
"True",
"rnn",
"=",
"onmt",
".",
"models",
".",
"sru",
".",
"SRU",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"rnn",
"=",
"getattr",
"(",
"nn",
",",
"rnn_type",
")",
"(",
"*",
"*",
"kwargs",
")",
"return",
"rnn",
",",
"no_pack_padded_seq"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/rnn_factory.py#L10-L19 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/distributed.py | python | all_reduce_and_rescale_tensors | (tensors, rescale_denom,
buffer_size=10485760) | All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes | All-reduce and rescale tensors in chunks of the specified size. | [
"All",
"-",
"reduce",
"and",
"rescale",
"tensors",
"in",
"chunks",
"of",
"the",
"specified",
"size",
"."
] | def all_reduce_and_rescale_tensors(tensors, rescale_denom,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
torch.distributed.all_reduce(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
torch.distributed.all_reduce(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer() | [
"def",
"all_reduce_and_rescale_tensors",
"(",
"tensors",
",",
"rescale_denom",
",",
"buffer_size",
"=",
"10485760",
")",
":",
"# buffer size in bytes, determine equiv. # of elements based on data type",
"buffer_t",
"=",
"tensors",
"[",
"0",
"]",
".",
"new",
"(",
"math",
".",
"ceil",
"(",
"buffer_size",
"/",
"tensors",
"[",
"0",
"]",
".",
"element_size",
"(",
")",
")",
")",
".",
"zero_",
"(",
")",
"buffer",
"=",
"[",
"]",
"def",
"all_reduce_buffer",
"(",
")",
":",
"# copy tensors into buffer_t",
"offset",
"=",
"0",
"for",
"t",
"in",
"buffer",
":",
"numel",
"=",
"t",
".",
"numel",
"(",
")",
"buffer_t",
"[",
"offset",
":",
"offset",
"+",
"numel",
"]",
".",
"copy_",
"(",
"t",
".",
"view",
"(",
"-",
"1",
")",
")",
"offset",
"+=",
"numel",
"# all-reduce and rescale",
"torch",
".",
"distributed",
".",
"all_reduce",
"(",
"buffer_t",
"[",
":",
"offset",
"]",
")",
"buffer_t",
".",
"div_",
"(",
"rescale_denom",
")",
"# copy all-reduced buffer back into tensors",
"offset",
"=",
"0",
"for",
"t",
"in",
"buffer",
":",
"numel",
"=",
"t",
".",
"numel",
"(",
")",
"t",
".",
"view",
"(",
"-",
"1",
")",
".",
"copy_",
"(",
"buffer_t",
"[",
"offset",
":",
"offset",
"+",
"numel",
"]",
")",
"offset",
"+=",
"numel",
"filled",
"=",
"0",
"for",
"t",
"in",
"tensors",
":",
"sz",
"=",
"t",
".",
"numel",
"(",
")",
"*",
"t",
".",
"element_size",
"(",
")",
"if",
"sz",
">",
"buffer_size",
":",
"# tensor is bigger than buffer, all-reduce and rescale directly",
"torch",
".",
"distributed",
".",
"all_reduce",
"(",
"t",
")",
"t",
".",
"div_",
"(",
"rescale_denom",
")",
"elif",
"filled",
"+",
"sz",
">",
"buffer_size",
":",
"# buffer is full, all-reduce and replace buffer with grad",
"all_reduce_buffer",
"(",
")",
"buffer",
"=",
"[",
"t",
"]",
"filled",
"=",
"sz",
"else",
":",
"# add tensor to buffer",
"buffer",
".",
"append",
"(",
"t",
")",
"filled",
"+=",
"sz",
"if",
"len",
"(",
"buffer",
")",
">",
"0",
":",
"all_reduce_buffer",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/distributed.py#L35-L86 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/distributed.py | python | all_gather_list | (data, max_size=4096) | return results | Gathers arbitrary data from all nodes into a list. | Gathers arbitrary data from all nodes into a list. | [
"Gathers",
"arbitrary",
"data",
"from",
"all",
"nodes",
"into",
"a",
"list",
"."
] | def all_gather_list(data, max_size=4096):
"""Gathers arbitrary data from all nodes into a list."""
world_size = torch.distributed.get_world_size()
if not hasattr(all_gather_list, '_in_buffer') or \
max_size != all_gather_list._in_buffer.size():
all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
all_gather_list._out_buffers = [
torch.cuda.ByteTensor(max_size)
for i in range(world_size)
]
in_buffer = all_gather_list._in_buffer
out_buffers = all_gather_list._out_buffers
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 2 > max_size:
raise ValueError(
'encoded data exceeds max_size: {}'.format(enc_size + 2))
assert max_size < 255*256
in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k
in_buffer[1] = enc_size % 255
in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))
torch.distributed.all_gather(out_buffers, in_buffer.cuda())
results = []
for i in range(world_size):
out_buffer = out_buffers[i]
size = (255 * out_buffer[0].item()) + out_buffer[1].item()
bytes_list = bytes(out_buffer[2:size+2].tolist())
result = pickle.loads(bytes_list)
results.append(result)
return results | [
"def",
"all_gather_list",
"(",
"data",
",",
"max_size",
"=",
"4096",
")",
":",
"world_size",
"=",
"torch",
".",
"distributed",
".",
"get_world_size",
"(",
")",
"if",
"not",
"hasattr",
"(",
"all_gather_list",
",",
"'_in_buffer'",
")",
"or",
"max_size",
"!=",
"all_gather_list",
".",
"_in_buffer",
".",
"size",
"(",
")",
":",
"all_gather_list",
".",
"_in_buffer",
"=",
"torch",
".",
"cuda",
".",
"ByteTensor",
"(",
"max_size",
")",
"all_gather_list",
".",
"_out_buffers",
"=",
"[",
"torch",
".",
"cuda",
".",
"ByteTensor",
"(",
"max_size",
")",
"for",
"i",
"in",
"range",
"(",
"world_size",
")",
"]",
"in_buffer",
"=",
"all_gather_list",
".",
"_in_buffer",
"out_buffers",
"=",
"all_gather_list",
".",
"_out_buffers",
"enc",
"=",
"pickle",
".",
"dumps",
"(",
"data",
")",
"enc_size",
"=",
"len",
"(",
"enc",
")",
"if",
"enc_size",
"+",
"2",
">",
"max_size",
":",
"raise",
"ValueError",
"(",
"'encoded data exceeds max_size: {}'",
".",
"format",
"(",
"enc_size",
"+",
"2",
")",
")",
"assert",
"max_size",
"<",
"255",
"*",
"256",
"in_buffer",
"[",
"0",
"]",
"=",
"enc_size",
"//",
"255",
"# this encoding works for max_size < 65k",
"in_buffer",
"[",
"1",
"]",
"=",
"enc_size",
"%",
"255",
"in_buffer",
"[",
"2",
":",
"enc_size",
"+",
"2",
"]",
"=",
"torch",
".",
"ByteTensor",
"(",
"list",
"(",
"enc",
")",
")",
"torch",
".",
"distributed",
".",
"all_gather",
"(",
"out_buffers",
",",
"in_buffer",
".",
"cuda",
"(",
")",
")",
"results",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"world_size",
")",
":",
"out_buffer",
"=",
"out_buffers",
"[",
"i",
"]",
"size",
"=",
"(",
"255",
"*",
"out_buffer",
"[",
"0",
"]",
".",
"item",
"(",
")",
")",
"+",
"out_buffer",
"[",
"1",
"]",
".",
"item",
"(",
")",
"bytes_list",
"=",
"bytes",
"(",
"out_buffer",
"[",
"2",
":",
"size",
"+",
"2",
"]",
".",
"tolist",
"(",
")",
")",
"result",
"=",
"pickle",
".",
"loads",
"(",
"bytes_list",
")",
"results",
".",
"append",
"(",
"result",
")",
"return",
"results"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/distributed.py#L89-L122 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | build_loss_compute | (model, tgt_vocab, opt, train=True) | return compute | This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase. | This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase. | [
"This",
"returns",
"user",
"-",
"defined",
"LossCompute",
"object",
"which",
"is",
"used",
"to",
"compute",
"loss",
"in",
"train",
"/",
"validate",
"process",
".",
"You",
"can",
"implement",
"your",
"own",
"*",
"LossCompute",
"class",
"by",
"subclassing",
"LossComputeBase",
"."
] | def build_loss_compute(model, tgt_vocab, opt, train=True):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
device = torch.device("cuda" if onmt.utils.misc.use_gpu(opt) else "cpu")
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force,
opt.copy_loss_by_seqlength)
else:
compute = NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing if train else 0.0)
compute.to(device)
return compute | [
"def",
"build_loss_compute",
"(",
"model",
",",
"tgt_vocab",
",",
"opt",
",",
"train",
"=",
"True",
")",
":",
"device",
"=",
"torch",
".",
"device",
"(",
"\"cuda\"",
"if",
"onmt",
".",
"utils",
".",
"misc",
".",
"use_gpu",
"(",
"opt",
")",
"else",
"\"cpu\"",
")",
"if",
"opt",
".",
"copy_attn",
":",
"compute",
"=",
"onmt",
".",
"modules",
".",
"CopyGeneratorLossCompute",
"(",
"model",
".",
"generator",
",",
"tgt_vocab",
",",
"opt",
".",
"copy_attn_force",
",",
"opt",
".",
"copy_loss_by_seqlength",
")",
"else",
":",
"compute",
"=",
"NMTLossCompute",
"(",
"model",
".",
"generator",
",",
"tgt_vocab",
",",
"label_smoothing",
"=",
"opt",
".",
"label_smoothing",
"if",
"train",
"else",
"0.0",
")",
"compute",
".",
"to",
"(",
"device",
")",
"return",
"compute"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L17-L35 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | filter_shard_state | (state, shard_size=None) | ? | ? | [
"?"
] | def filter_shard_state(state, shard_size=None):
""" ? """
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split) | [
"def",
"filter_shard_state",
"(",
"state",
",",
"shard_size",
"=",
"None",
")",
":",
"for",
"k",
",",
"v",
"in",
"state",
".",
"items",
"(",
")",
":",
"if",
"shard_size",
"is",
"None",
":",
"yield",
"k",
",",
"v",
"if",
"v",
"is",
"not",
"None",
":",
"v_split",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"v",
",",
"torch",
".",
"Tensor",
")",
":",
"for",
"v_chunk",
"in",
"torch",
".",
"split",
"(",
"v",
",",
"shard_size",
")",
":",
"v_chunk",
"=",
"v_chunk",
".",
"data",
".",
"clone",
"(",
")",
"v_chunk",
".",
"requires_grad",
"=",
"v",
".",
"requires_grad",
"v_split",
".",
"append",
"(",
"v_chunk",
")",
"yield",
"k",
",",
"(",
"v",
",",
"v_split",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L252-L265 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | shards | (state, shard_size, eval_only=False) | Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation. | Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards. | [
"Args",
":",
"state",
":",
"A",
"dictionary",
"which",
"corresponds",
"to",
"the",
"output",
"of",
"*",
"LossCompute",
".",
"_make_shard_state",
"()",
".",
"The",
"values",
"for",
"those",
"keys",
"are",
"Tensor",
"-",
"like",
"or",
"None",
".",
"shard_size",
":",
"The",
"maximum",
"size",
"of",
"the",
"shards",
"yielded",
"by",
"the",
"model",
".",
"eval_only",
":",
"If",
"True",
"only",
"yield",
"the",
"state",
"nothing",
"else",
".",
"Otherwise",
"yield",
"shards",
"."
] | def shards(state, shard_size, eval_only=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads) | [
"def",
"shards",
"(",
"state",
",",
"shard_size",
",",
"eval_only",
"=",
"False",
")",
":",
"if",
"eval_only",
":",
"yield",
"filter_shard_state",
"(",
"state",
")",
"else",
":",
"# non_none: the subdict of the state dictionary where the values",
"# are not None.",
"non_none",
"=",
"dict",
"(",
"filter_shard_state",
"(",
"state",
",",
"shard_size",
")",
")",
"# Now, the iteration:",
"# state is a dictionary of sequences of tensor-like but we",
"# want a sequence of dictionaries of tensors.",
"# First, unzip the dictionary into a sequence of keys and a",
"# sequence of tensor-like sequences.",
"keys",
",",
"values",
"=",
"zip",
"(",
"*",
"(",
"(",
"k",
",",
"[",
"v_chunk",
"for",
"v_chunk",
"in",
"v_split",
"]",
")",
"for",
"k",
",",
"(",
"_",
",",
"v_split",
")",
"in",
"non_none",
".",
"items",
"(",
")",
")",
")",
"# Now, yield a dictionary for each shard. The keys are always",
"# the same. values is a sequence of length #keys where each",
"# element is a sequence of length #shards. We want to iterate",
"# over the shards, not over the keys: therefore, the values need",
"# to be re-zipped by shard and then each shard can be paired",
"# with the keys.",
"for",
"shard_tensors",
"in",
"zip",
"(",
"*",
"values",
")",
":",
"yield",
"dict",
"(",
"zip",
"(",
"keys",
",",
"shard_tensors",
")",
")",
"# Assumed backprop'd",
"variables",
"=",
"[",
"]",
"for",
"k",
",",
"(",
"v",
",",
"v_split",
")",
"in",
"non_none",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"torch",
".",
"Tensor",
")",
"and",
"state",
"[",
"k",
"]",
".",
"requires_grad",
":",
"variables",
".",
"extend",
"(",
"zip",
"(",
"torch",
".",
"split",
"(",
"state",
"[",
"k",
"]",
",",
"shard_size",
")",
",",
"[",
"v_chunk",
".",
"grad",
"for",
"v_chunk",
"in",
"v_split",
"]",
")",
")",
"inputs",
",",
"grads",
"=",
"zip",
"(",
"*",
"variables",
")",
"torch",
".",
"autograd",
".",
"backward",
"(",
"inputs",
",",
"grads",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L268-L315 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | LossComputeBase._make_shard_state | (self, batch, output, range_, attns=None) | return NotImplementedError | Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model. | Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model. | [
"Make",
"shard",
"state",
"dictionary",
"for",
"shards",
"()",
"to",
"return",
"iterable",
"shards",
"for",
"efficient",
"loss",
"computation",
".",
"Subclass",
"must",
"define",
"this",
"method",
"to",
"match",
"its",
"own",
"_compute_loss",
"()",
"interface",
".",
"Args",
":",
"batch",
":",
"the",
"current",
"batch",
".",
"output",
":",
"the",
"predict",
"output",
"from",
"the",
"model",
".",
"range_",
":",
"the",
"range",
"of",
"examples",
"for",
"computing",
"the",
"whole",
"batch",
"or",
"a",
"trunc",
"of",
"it?",
"attns",
":",
"the",
"attns",
"dictionary",
"returned",
"from",
"the",
"model",
"."
] | def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError | [
"def",
"_make_shard_state",
"(",
"self",
",",
"batch",
",",
"output",
",",
"range_",
",",
"attns",
"=",
"None",
")",
":",
"return",
"NotImplementedError"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L64-L76 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | LossComputeBase._compute_loss | (self, batch, output, target, **kwargs) | return NotImplementedError | Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss. | Compute the loss. Subclass must define this method. | [
"Compute",
"the",
"loss",
".",
"Subclass",
"must",
"define",
"this",
"method",
"."
] | def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError | [
"def",
"_compute_loss",
"(",
"self",
",",
"batch",
",",
"output",
",",
"target",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"NotImplementedError"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L78-L89 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | LossComputeBase.monolithic_compute_loss | (self, batch, output, attns) | return batch_stats | Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
Returns:
:obj:`onmt.utils.Statistics`: loss statistics | Compute the forward loss for the batch. | [
"Compute",
"the",
"forward",
"loss",
"for",
"the",
"batch",
"."
] | def monolithic_compute_loss(self, batch, output, attns):
"""
Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
Returns:
:obj:`onmt.utils.Statistics`: loss statistics
"""
range_ = (0, batch.tgt.size(0))
shard_state = self._make_shard_state(batch, output, range_, attns)
_, batch_stats = self._compute_loss(batch, **shard_state)
return batch_stats | [
"def",
"monolithic_compute_loss",
"(",
"self",
",",
"batch",
",",
"output",
",",
"attns",
")",
":",
"range_",
"=",
"(",
"0",
",",
"batch",
".",
"tgt",
".",
"size",
"(",
"0",
")",
")",
"shard_state",
"=",
"self",
".",
"_make_shard_state",
"(",
"batch",
",",
"output",
",",
"range_",
",",
"attns",
")",
"_",
",",
"batch_stats",
"=",
"self",
".",
"_compute_loss",
"(",
"batch",
",",
"*",
"*",
"shard_state",
")",
"return",
"batch_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L91-L109 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | LossComputeBase.sharded_compute_loss | (self, batch, output, attns,
cur_trunc, trunc_size, shard_size,
normalization) | return batch_stats | Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(cur_trunc, cur_trunc + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
cur_trunc (int) : starting position of truncation window
trunc_size (int) : length of truncation window
shard_size (int) : maximum number of examples in a shard
normalization (int) : Loss is divided by this number
Returns:
:obj:`onmt.utils.Statistics`: validation loss statistics | Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency. | [
"Compute",
"the",
"forward",
"loss",
"and",
"backpropagate",
".",
"Computation",
"is",
"done",
"with",
"shards",
"and",
"optionally",
"truncation",
"for",
"memory",
"efficiency",
"."
] | def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size,
normalization):
"""Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(cur_trunc, cur_trunc + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
cur_trunc (int) : starting position of truncation window
trunc_size (int) : length of truncation window
shard_size (int) : maximum number of examples in a shard
normalization (int) : Loss is divided by this number
Returns:
:obj:`onmt.utils.Statistics`: validation loss statistics
"""
batch_stats = onmt.utils.Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self._make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size):
loss, stats = self._compute_loss(batch, **shard)
loss.div(float(normalization)).backward()
batch_stats.update(stats)
return batch_stats | [
"def",
"sharded_compute_loss",
"(",
"self",
",",
"batch",
",",
"output",
",",
"attns",
",",
"cur_trunc",
",",
"trunc_size",
",",
"shard_size",
",",
"normalization",
")",
":",
"batch_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"range_",
"=",
"(",
"cur_trunc",
",",
"cur_trunc",
"+",
"trunc_size",
")",
"shard_state",
"=",
"self",
".",
"_make_shard_state",
"(",
"batch",
",",
"output",
",",
"range_",
",",
"attns",
")",
"for",
"shard",
"in",
"shards",
"(",
"shard_state",
",",
"shard_size",
")",
":",
"loss",
",",
"stats",
"=",
"self",
".",
"_compute_loss",
"(",
"batch",
",",
"*",
"*",
"shard",
")",
"loss",
".",
"div",
"(",
"float",
"(",
"normalization",
")",
")",
".",
"backward",
"(",
")",
"batch_stats",
".",
"update",
"(",
"stats",
")",
"return",
"batch_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L111-L149 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | LossComputeBase._stats | (self, loss, scores, target) | return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct) | Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch. | Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets | [
"Args",
":",
"loss",
"(",
":",
"obj",
":",
"FloatTensor",
")",
":",
"the",
"loss",
"computed",
"by",
"the",
"loss",
"criterion",
".",
"scores",
"(",
":",
"obj",
":",
"FloatTensor",
")",
":",
"a",
"score",
"for",
"each",
"possible",
"output",
"target",
"(",
":",
"obj",
":",
"FloatTensor",
")",
":",
"true",
"targets"
] | def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target) \
.masked_select(non_padding) \
.sum() \
.item()
num_non_padding = non_padding.sum().item()
return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct) | [
"def",
"_stats",
"(",
"self",
",",
"loss",
",",
"scores",
",",
"target",
")",
":",
"pred",
"=",
"scores",
".",
"max",
"(",
"1",
")",
"[",
"1",
"]",
"non_padding",
"=",
"target",
".",
"ne",
"(",
"self",
".",
"padding_idx",
")",
"num_correct",
"=",
"pred",
".",
"eq",
"(",
"target",
")",
".",
"masked_select",
"(",
"non_padding",
")",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
"num_non_padding",
"=",
"non_padding",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
"return",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
"loss",
".",
"item",
"(",
")",
",",
"num_non_padding",
",",
"num_correct",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L151-L168 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/loss.py | python | LabelSmoothingLoss.forward | (self, output, target) | return F.kl_div(output, model_prob, reduction='sum') | output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size | output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size | [
"output",
"(",
"FloatTensor",
")",
":",
"batch_size",
"x",
"n_classes",
"target",
"(",
"LongTensor",
")",
":",
"batch_size"
] | def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.padding_idx).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='sum') | [
"def",
"forward",
"(",
"self",
",",
"output",
",",
"target",
")",
":",
"model_prob",
"=",
"self",
".",
"one_hot",
".",
"repeat",
"(",
"target",
".",
"size",
"(",
"0",
")",
",",
"1",
")",
"model_prob",
".",
"scatter_",
"(",
"1",
",",
"target",
".",
"unsqueeze",
"(",
"1",
")",
",",
"self",
".",
"confidence",
")",
"model_prob",
".",
"masked_fill_",
"(",
"(",
"target",
"==",
"self",
".",
"padding_idx",
")",
".",
"unsqueeze",
"(",
"1",
")",
",",
"0",
")",
"return",
"F",
".",
"kl_div",
"(",
"output",
",",
"model_prob",
",",
"reduction",
"=",
"'sum'",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/loss.py#L195-L204 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/misc.py | python | aeq | (*args) | Assert all arguments have the same value | Assert all arguments have the same value | [
"Assert",
"all",
"arguments",
"have",
"the",
"same",
"value"
] | def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args) | [
"def",
"aeq",
"(",
"*",
"args",
")",
":",
"arguments",
"=",
"(",
"arg",
"for",
"arg",
"in",
"args",
")",
"first",
"=",
"next",
"(",
"arguments",
")",
"assert",
"all",
"(",
"arg",
"==",
"first",
"for",
"arg",
"in",
"arguments",
")",
",",
"\"Not all arguments have the same value: \"",
"+",
"str",
"(",
"args",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/misc.py#L6-L13 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/misc.py | python | sequence_mask | (lengths, max_len=None) | return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1))) | Creates a boolean mask from sequence lengths. | Creates a boolean mask from sequence lengths. | [
"Creates",
"a",
"boolean",
"mask",
"from",
"sequence",
"lengths",
"."
] | def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1))) | [
"def",
"sequence_mask",
"(",
"lengths",
",",
"max_len",
"=",
"None",
")",
":",
"batch_size",
"=",
"lengths",
".",
"numel",
"(",
")",
"max_len",
"=",
"max_len",
"or",
"lengths",
".",
"max",
"(",
")",
"return",
"(",
"torch",
".",
"arange",
"(",
"0",
",",
"max_len",
")",
".",
"type_as",
"(",
"lengths",
")",
".",
"repeat",
"(",
"batch_size",
",",
"1",
")",
".",
"lt",
"(",
"lengths",
".",
"unsqueeze",
"(",
"1",
")",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/misc.py#L16-L25 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/misc.py | python | tile | (x, count, dim=0) | return x | Tiles x on dimension dim count times. | Tiles x on dimension dim count times. | [
"Tiles",
"x",
"on",
"dimension",
"dim",
"count",
"times",
"."
] | def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x | [
"def",
"tile",
"(",
"x",
",",
"count",
",",
"dim",
"=",
"0",
")",
":",
"perm",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"x",
".",
"size",
"(",
")",
")",
")",
")",
"if",
"dim",
"!=",
"0",
":",
"perm",
"[",
"0",
"]",
",",
"perm",
"[",
"dim",
"]",
"=",
"perm",
"[",
"dim",
"]",
",",
"perm",
"[",
"0",
"]",
"x",
"=",
"x",
".",
"permute",
"(",
"perm",
")",
".",
"contiguous",
"(",
")",
"out_size",
"=",
"list",
"(",
"x",
".",
"size",
"(",
")",
")",
"out_size",
"[",
"0",
"]",
"*=",
"count",
"batch",
"=",
"x",
".",
"size",
"(",
"0",
")",
"x",
"=",
"x",
".",
"view",
"(",
"batch",
",",
"-",
"1",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"repeat",
"(",
"count",
",",
"1",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"*",
"out_size",
")",
"if",
"dim",
"!=",
"0",
":",
"x",
"=",
"x",
".",
"permute",
"(",
"perm",
")",
".",
"contiguous",
"(",
")",
"return",
"x"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/misc.py#L28-L47 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/misc.py | python | use_gpu | (opt) | return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1) | Creates a boolean if gpu used | Creates a boolean if gpu used | [
"Creates",
"a",
"boolean",
"if",
"gpu",
"used"
] | def use_gpu(opt):
"""
Creates a boolean if gpu used
"""
return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1) | [
"def",
"use_gpu",
"(",
"opt",
")",
":",
"return",
"(",
"hasattr",
"(",
"opt",
",",
"'gpu_ranks'",
")",
"and",
"len",
"(",
"opt",
".",
"gpu_ranks",
")",
">",
"0",
")",
"or",
"(",
"hasattr",
"(",
"opt",
",",
"'gpu'",
")",
"and",
"opt",
".",
"gpu",
">",
"-",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/misc.py#L50-L55 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/report_manager.py | python | ReportMgrBase.__init__ | (self, report_every, start_time=-1.) | Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()` | Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()` | [
"Args",
":",
"report_every",
"(",
"int",
")",
":",
"Report",
"status",
"every",
"this",
"many",
"sentences",
"start_time",
"(",
"float",
")",
":",
"manually",
"set",
"report",
"start",
"time",
".",
"Negative",
"values",
"means",
"that",
"you",
"will",
"need",
"to",
"set",
"it",
"later",
"or",
"use",
"start",
"()"
] | def __init__(self, report_every, start_time=-1.):
"""
Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()`
"""
self.report_every = report_every
self.progress_step = 0
self.start_time = start_time | [
"def",
"__init__",
"(",
"self",
",",
"report_every",
",",
"start_time",
"=",
"-",
"1.",
")",
":",
"self",
".",
"report_every",
"=",
"report_every",
"self",
".",
"progress_step",
"=",
"0",
"self",
".",
"start_time",
"=",
"start_time"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/report_manager.py#L33-L42 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/report_manager.py | python | ReportMgrBase.report_training | (self, step, num_steps, learning_rate,
report_stats, multigpu=False) | return onmt.utils.Statistics() | This is the user-defined batch-level traing progress
report function.
Args:
step(int): current step count.
num_steps(int): total number of batches.
learning_rate(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance. | This is the user-defined batch-level traing progress
report function. | [
"This",
"is",
"the",
"user",
"-",
"defined",
"batch",
"-",
"level",
"traing",
"progress",
"report",
"function",
"."
] | def report_training(self, step, num_steps, learning_rate,
report_stats, multigpu=False):
"""
This is the user-defined batch-level traing progress
report function.
Args:
step(int): current step count.
num_steps(int): total number of batches.
learning_rate(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if self.start_time < 0:
raise ValueError("""ReportMgr needs to be started
(set 'start_time' or use 'start()'""")
if multigpu:
report_stats = onmt.utils.Statistics.all_gather_stats(report_stats)
if step % self.report_every == 0:
self._report_training(
step, num_steps, learning_rate, report_stats)
self.progress_step += 1
return onmt.utils.Statistics() | [
"def",
"report_training",
"(",
"self",
",",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
",",
"multigpu",
"=",
"False",
")",
":",
"if",
"self",
".",
"start_time",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"\"\"ReportMgr needs to be started\n (set 'start_time' or use 'start()'\"\"\"",
")",
"if",
"multigpu",
":",
"report_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
".",
"all_gather_stats",
"(",
"report_stats",
")",
"if",
"step",
"%",
"self",
".",
"report_every",
"==",
"0",
":",
"self",
".",
"_report_training",
"(",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
")",
"self",
".",
"progress_step",
"+=",
"1",
"return",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/report_manager.py#L50-L75 |