nwo
stringlengths 6
76
| sha
stringlengths 40
40
| path
stringlengths 5
118
| language
stringclasses 1
value | identifier
stringlengths 1
89
| parameters
stringlengths 2
5.4k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
51.1k
| docstring
stringlengths 1
17.6k
| docstring_summary
stringlengths 0
7.02k
| docstring_tokens
sequence | function
stringlengths 30
51.1k
| function_tokens
sequence | url
stringlengths 85
218
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/rnn_encoder.py | python | RNNEncoder._bridge | (self, hidden) | return outs | Forward hidden state through bridge | Forward hidden state through bridge | [
"Forward",
"hidden",
"state",
"through",
"bridge"
] | def _bridge(self, hidden):
"""
Forward hidden state through bridge
"""
def bottle_hidden(linear, states):
"""
Transform from 3D to 2D, apply linear and return initial size
"""
size = states.size()
result = linear(states.view(-1, self.total_hidden_dim))
return F.relu(result).view(size)
if isinstance(hidden, tuple): # LSTM
outs = tuple([bottle_hidden(layer, hidden[ix])
for ix, layer in enumerate(self.bridge)])
else:
outs = bottle_hidden(self.bridge[0], hidden)
return outs | [
"def",
"_bridge",
"(",
"self",
",",
"hidden",
")",
":",
"def",
"bottle_hidden",
"(",
"linear",
",",
"states",
")",
":",
"\"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"",
"size",
"=",
"states",
".",
"size",
"(",
")",
"result",
"=",
"linear",
"(",
"states",
".",
"view",
"(",
"-",
"1",
",",
"self",
".",
"total_hidden_dim",
")",
")",
"return",
"F",
".",
"relu",
"(",
"result",
")",
".",
"view",
"(",
"size",
")",
"if",
"isinstance",
"(",
"hidden",
",",
"tuple",
")",
":",
"# LSTM",
"outs",
"=",
"tuple",
"(",
"[",
"bottle_hidden",
"(",
"layer",
",",
"hidden",
"[",
"ix",
"]",
")",
"for",
"ix",
",",
"layer",
"in",
"enumerate",
"(",
"self",
".",
"bridge",
")",
"]",
")",
"else",
":",
"outs",
"=",
"bottle_hidden",
"(",
"self",
".",
"bridge",
"[",
"0",
"]",
",",
"hidden",
")",
"return",
"outs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/rnn_encoder.py#L90-L107 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/image_encoder.py | python | ImageEncoder.load_pretrained_vectors | (self, opt) | Pass in needed options only when modify function definition. | Pass in needed options only when modify function definition. | [
"Pass",
"in",
"needed",
"options",
"only",
"when",
"modify",
"function",
"definition",
"."
] | def load_pretrained_vectors(self, opt):
""" Pass in needed options only when modify function definition."""
pass | [
"def",
"load_pretrained_vectors",
"(",
"self",
",",
"opt",
")",
":",
"pass"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/image_encoder.py#L50-L52 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/image_encoder.py | python | ImageEncoder.forward | (self, src, lengths=None) | return hidden_t, out | See :obj:`onmt.encoders.encoder.EncoderBase.forward()` | See :obj:`onmt.encoders.encoder.EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"encoders",
".",
"encoder",
".",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, src, lengths=None):
"See :obj:`onmt.encoders.encoder.EncoderBase.forward()`"
batch_size = src.size(0)
# (batch_size, 64, imgH, imgW)
# layer 1
src = F.relu(self.layer1(src[:, :, :, :] - 0.5), True)
# (batch_size, 64, imgH/2, imgW/2)
src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 128, imgH/2, imgW/2)
# layer 2
src = F.relu(self.layer2(src), True)
# (batch_size, 128, imgH/2/2, imgW/2/2)
src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer 3
# batch norm 1
src = F.relu(self.batch_norm1(self.layer3(src)), True)
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer4
src = F.relu(self.layer4(src), True)
# (batch_size, 256, imgH/2/2/2, imgW/2/2)
src = F.max_pool2d(src, kernel_size=(1, 2), stride=(1, 2))
# (batch_size, 512, imgH/2/2/2, imgW/2/2)
# layer 5
# batch norm 2
src = F.relu(self.batch_norm2(self.layer5(src)), True)
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
src = F.max_pool2d(src, kernel_size=(2, 1), stride=(2, 1))
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
src = F.relu(self.batch_norm3(self.layer6(src)), True)
# # (batch_size, 512, H, W)
all_outputs = []
for row in range(src.size(2)):
inp = src[:, :, row, :].transpose(0, 2) \
.transpose(1, 2)
row_vec = torch.Tensor(batch_size).type_as(inp.data) \
.long().fill_(row)
pos_emb = self.pos_lut(row_vec)
with_pos = torch.cat(
(pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
outputs, hidden_t = self.rnn(with_pos)
all_outputs.append(outputs)
out = torch.cat(all_outputs, 0)
return hidden_t, out | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"lengths",
"=",
"None",
")",
":",
"batch_size",
"=",
"src",
".",
"size",
"(",
"0",
")",
"# (batch_size, 64, imgH, imgW)",
"# layer 1",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"layer1",
"(",
"src",
"[",
":",
",",
":",
",",
":",
",",
":",
"]",
"-",
"0.5",
")",
",",
"True",
")",
"# (batch_size, 64, imgH/2, imgW/2)",
"src",
"=",
"F",
".",
"max_pool2d",
"(",
"src",
",",
"kernel_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
")",
"# (batch_size, 128, imgH/2, imgW/2)",
"# layer 2",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"layer2",
"(",
"src",
")",
",",
"True",
")",
"# (batch_size, 128, imgH/2/2, imgW/2/2)",
"src",
"=",
"F",
".",
"max_pool2d",
"(",
"src",
",",
"kernel_size",
"=",
"(",
"2",
",",
"2",
")",
",",
"stride",
"=",
"(",
"2",
",",
"2",
")",
")",
"# (batch_size, 256, imgH/2/2, imgW/2/2)",
"# layer 3",
"# batch norm 1",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"batch_norm1",
"(",
"self",
".",
"layer3",
"(",
"src",
")",
")",
",",
"True",
")",
"# (batch_size, 256, imgH/2/2, imgW/2/2)",
"# layer4",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"layer4",
"(",
"src",
")",
",",
"True",
")",
"# (batch_size, 256, imgH/2/2/2, imgW/2/2)",
"src",
"=",
"F",
".",
"max_pool2d",
"(",
"src",
",",
"kernel_size",
"=",
"(",
"1",
",",
"2",
")",
",",
"stride",
"=",
"(",
"1",
",",
"2",
")",
")",
"# (batch_size, 512, imgH/2/2/2, imgW/2/2)",
"# layer 5",
"# batch norm 2",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"batch_norm2",
"(",
"self",
".",
"layer5",
"(",
"src",
")",
")",
",",
"True",
")",
"# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)",
"src",
"=",
"F",
".",
"max_pool2d",
"(",
"src",
",",
"kernel_size",
"=",
"(",
"2",
",",
"1",
")",
",",
"stride",
"=",
"(",
"2",
",",
"1",
")",
")",
"# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)",
"src",
"=",
"F",
".",
"relu",
"(",
"self",
".",
"batch_norm3",
"(",
"self",
".",
"layer6",
"(",
"src",
")",
")",
",",
"True",
")",
"# # (batch_size, 512, H, W)",
"all_outputs",
"=",
"[",
"]",
"for",
"row",
"in",
"range",
"(",
"src",
".",
"size",
"(",
"2",
")",
")",
":",
"inp",
"=",
"src",
"[",
":",
",",
":",
",",
"row",
",",
":",
"]",
".",
"transpose",
"(",
"0",
",",
"2",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"row_vec",
"=",
"torch",
".",
"Tensor",
"(",
"batch_size",
")",
".",
"type_as",
"(",
"inp",
".",
"data",
")",
".",
"long",
"(",
")",
".",
"fill_",
"(",
"row",
")",
"pos_emb",
"=",
"self",
".",
"pos_lut",
"(",
"row_vec",
")",
"with_pos",
"=",
"torch",
".",
"cat",
"(",
"(",
"pos_emb",
".",
"view",
"(",
"1",
",",
"pos_emb",
".",
"size",
"(",
"0",
")",
",",
"pos_emb",
".",
"size",
"(",
"1",
")",
")",
",",
"inp",
")",
",",
"0",
")",
"outputs",
",",
"hidden_t",
"=",
"self",
".",
"rnn",
"(",
"with_pos",
")",
"all_outputs",
".",
"append",
"(",
"outputs",
")",
"out",
"=",
"torch",
".",
"cat",
"(",
"all_outputs",
",",
"0",
")",
"return",
"hidden_t",
",",
"out"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/image_encoder.py#L54-L109 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/encoders/cnn_encoder.py | python | CNNEncoder.forward | (self, input, lengths=None, hidden=None) | return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \
out.squeeze(3).transpose(0, 1).contiguous() | See :obj:`onmt.modules.EncoderBase.forward()` | See :obj:`onmt.modules.EncoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"modules",
".",
"EncoderBase",
".",
"forward",
"()"
] | def forward(self, input, lengths=None, hidden=None):
""" See :obj:`onmt.modules.EncoderBase.forward()`"""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
# s_len, batch, emb_dim = emb.size()
emb = emb.transpose(0, 1).contiguous()
emb_reshape = emb.view(emb.size(0) * emb.size(1), -1)
emb_remap = self.linear(emb_reshape)
emb_remap = emb_remap.view(emb.size(0), emb.size(1), -1)
emb_remap = shape_transform(emb_remap)
out = self.cnn(emb_remap)
return emb_remap.squeeze(3).transpose(0, 1).contiguous(), \
out.squeeze(3).transpose(0, 1).contiguous() | [
"def",
"forward",
"(",
"self",
",",
"input",
",",
"lengths",
"=",
"None",
",",
"hidden",
"=",
"None",
")",
":",
"self",
".",
"_check_args",
"(",
"input",
",",
"lengths",
",",
"hidden",
")",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"input",
")",
"# s_len, batch, emb_dim = emb.size()",
"emb",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"emb_reshape",
"=",
"emb",
".",
"view",
"(",
"emb",
".",
"size",
"(",
"0",
")",
"*",
"emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"emb_remap",
"=",
"self",
".",
"linear",
"(",
"emb_reshape",
")",
"emb_remap",
"=",
"emb_remap",
".",
"view",
"(",
"emb",
".",
"size",
"(",
"0",
")",
",",
"emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"emb_remap",
"=",
"shape_transform",
"(",
"emb_remap",
")",
"out",
"=",
"self",
".",
"cnn",
"(",
"emb_remap",
")",
"return",
"emb_remap",
".",
"squeeze",
"(",
"3",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
",",
"out",
".",
"squeeze",
"(",
"3",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/encoders/cnn_encoder.py#L28-L43 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/models/sru.py | python | check_sru_requirement | (abort=False) | return True | Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False. | Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False. | [
"Return",
"True",
"if",
"check",
"pass",
";",
"if",
"check",
"fails",
"and",
"abort",
"is",
"True",
"raise",
"an",
"Exception",
"othereise",
"return",
"False",
"."
] | def check_sru_requirement(abort=False):
"""
Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False.
"""
# Check 1.
try:
if platform.system() == 'Windows':
subprocess.check_output('pip freeze | findstr cupy', shell=True)
subprocess.check_output('pip freeze | findstr pynvrtc',
shell=True)
else: # Unix-like systems
subprocess.check_output('pip freeze | grep -w cupy', shell=True)
subprocess.check_output('pip freeze | grep -w pynvrtc',
shell=True)
except subprocess.CalledProcessError:
if not abort:
return False
raise AssertionError("Using SRU requires 'cupy' and 'pynvrtc' "
"python packages installed.")
# Check 2.
if torch.cuda.is_available() is False:
if not abort:
return False
raise AssertionError("Using SRU requires pytorch built with cuda.")
# Check 3.
pattern = re.compile(".*cuda/lib.*")
ld_path = os.getenv('LD_LIBRARY_PATH', "")
if re.match(pattern, ld_path) is None:
if not abort:
return False
raise AssertionError("Using SRU requires setting cuda lib path, e.g. "
"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.")
return True | [
"def",
"check_sru_requirement",
"(",
"abort",
"=",
"False",
")",
":",
"# Check 1.",
"try",
":",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"subprocess",
".",
"check_output",
"(",
"'pip freeze | findstr cupy'",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"check_output",
"(",
"'pip freeze | findstr pynvrtc'",
",",
"shell",
"=",
"True",
")",
"else",
":",
"# Unix-like systems",
"subprocess",
".",
"check_output",
"(",
"'pip freeze | grep -w cupy'",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"check_output",
"(",
"'pip freeze | grep -w pynvrtc'",
",",
"shell",
"=",
"True",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"if",
"not",
"abort",
":",
"return",
"False",
"raise",
"AssertionError",
"(",
"\"Using SRU requires 'cupy' and 'pynvrtc' \"",
"\"python packages installed.\"",
")",
"# Check 2.",
"if",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
"is",
"False",
":",
"if",
"not",
"abort",
":",
"return",
"False",
"raise",
"AssertionError",
"(",
"\"Using SRU requires pytorch built with cuda.\"",
")",
"# Check 3.",
"pattern",
"=",
"re",
".",
"compile",
"(",
"\".*cuda/lib.*\"",
")",
"ld_path",
"=",
"os",
".",
"getenv",
"(",
"'LD_LIBRARY_PATH'",
",",
"\"\"",
")",
"if",
"re",
".",
"match",
"(",
"pattern",
",",
"ld_path",
")",
"is",
"None",
":",
"if",
"not",
"abort",
":",
"return",
"False",
"raise",
"AssertionError",
"(",
"\"Using SRU requires setting cuda lib path, e.g. \"",
"\"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.\"",
")",
"return",
"True"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/models/sru.py#L32-L69 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/models/model.py | python | NMTModel.forward | (self, src, tgt, lengths, dec_state=None) | return decoder_outputs, attns, dec_state | Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (:obj:`Tensor`):
a source sequence passed to encoder.
typically for inputs this will be a padded :obj:`LongTensor`
of size `[len x batch x features]`. however, may be an
image or other generic input depending on encoder.
tgt (:obj:`LongTensor`):
a target sequence of size `[tgt_len x batch]`.
lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.
dec_state (:obj:`DecoderState`, optional): initial decoder state
Returns:
(:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):
* decoder output `[tgt_len x batch x hidden]`
* dictionary attention dists of `[tgt_len x batch x src_len]`
* final decoder state | Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state. | [
"Forward",
"propagate",
"a",
"src",
"and",
"tgt",
"pair",
"for",
"training",
".",
"Possible",
"initialized",
"with",
"a",
"beginning",
"decoder",
"state",
"."
] | def forward(self, src, tgt, lengths, dec_state=None):
"""Forward propagate a `src` and `tgt` pair for training.
Possible initialized with a beginning decoder state.
Args:
src (:obj:`Tensor`):
a source sequence passed to encoder.
typically for inputs this will be a padded :obj:`LongTensor`
of size `[len x batch x features]`. however, may be an
image or other generic input depending on encoder.
tgt (:obj:`LongTensor`):
a target sequence of size `[tgt_len x batch]`.
lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.
dec_state (:obj:`DecoderState`, optional): initial decoder state
Returns:
(:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):
* decoder output `[tgt_len x batch x hidden]`
* dictionary attention dists of `[tgt_len x batch x src_len]`
* final decoder state
"""
tgt = tgt[:-1] # exclude last target from inputs
enc_final, memory_bank = self.encoder(src, lengths)
enc_state = \
self.decoder.init_decoder_state(src, memory_bank, enc_final)
decoder_outputs, dec_state, attns = \
self.decoder(tgt, memory_bank,
enc_state if dec_state is None
else dec_state,
memory_lengths=lengths)
if self.multigpu:
# Not yet supported on multi-gpu
dec_state = None
attns = None
return decoder_outputs, attns, dec_state | [
"def",
"forward",
"(",
"self",
",",
"src",
",",
"tgt",
",",
"lengths",
",",
"dec_state",
"=",
"None",
")",
":",
"tgt",
"=",
"tgt",
"[",
":",
"-",
"1",
"]",
"# exclude last target from inputs",
"enc_final",
",",
"memory_bank",
"=",
"self",
".",
"encoder",
"(",
"src",
",",
"lengths",
")",
"enc_state",
"=",
"self",
".",
"decoder",
".",
"init_decoder_state",
"(",
"src",
",",
"memory_bank",
",",
"enc_final",
")",
"decoder_outputs",
",",
"dec_state",
",",
"attns",
"=",
"self",
".",
"decoder",
"(",
"tgt",
",",
"memory_bank",
",",
"enc_state",
"if",
"dec_state",
"is",
"None",
"else",
"dec_state",
",",
"memory_lengths",
"=",
"lengths",
")",
"if",
"self",
".",
"multigpu",
":",
"# Not yet supported on multi-gpu",
"dec_state",
"=",
"None",
"attns",
"=",
"None",
"return",
"decoder_outputs",
",",
"attns",
",",
"dec_state"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/models/model.py#L22-L57 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/models/model_saver.py | python | ModelSaverBase.maybe_save | (self, step) | Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic | Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic | [
"Main",
"entry",
"point",
"for",
"model",
"saver",
"It",
"wraps",
"the",
"_save",
"method",
"with",
"checks",
"and",
"apply",
"keep_checkpoint",
"related",
"logic"
] | def maybe_save(self, step):
"""
Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic
"""
if self.keep_checkpoint == 0:
return
if step % self.save_checkpoint_steps != 0:
return
chkpt, chkpt_name = self._save(step)
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name) | [
"def",
"maybe_save",
"(",
"self",
",",
"step",
")",
":",
"if",
"self",
".",
"keep_checkpoint",
"==",
"0",
":",
"return",
"if",
"step",
"%",
"self",
".",
"save_checkpoint_steps",
"!=",
"0",
":",
"return",
"chkpt",
",",
"chkpt_name",
"=",
"self",
".",
"_save",
"(",
"step",
")",
"if",
"self",
".",
"keep_checkpoint",
">",
"0",
":",
"if",
"len",
"(",
"self",
".",
"checkpoint_queue",
")",
"==",
"self",
".",
"checkpoint_queue",
".",
"maxlen",
":",
"todel",
"=",
"self",
".",
"checkpoint_queue",
".",
"popleft",
"(",
")",
"self",
".",
"_rm_checkpoint",
"(",
"todel",
")",
"self",
".",
"checkpoint_queue",
".",
"append",
"(",
"chkpt_name",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/models/model_saver.py#L43-L61 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/models/model_saver.py | python | ModelSaverBase._save | (self, step) | Save a resumable checkpoint.
Args:
step (int): step number
Returns:
checkpoint: the saved object
checkpoint_name: name (or path) of the saved checkpoint | Save a resumable checkpoint. | [
"Save",
"a",
"resumable",
"checkpoint",
"."
] | def _save(self, step):
""" Save a resumable checkpoint.
Args:
step (int): step number
Returns:
checkpoint: the saved object
checkpoint_name: name (or path) of the saved checkpoint
"""
raise NotImplementedError() | [
"def",
"_save",
"(",
"self",
",",
"step",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/models/model_saver.py#L63-L73 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/models/model_saver.py | python | ModelSaverBase._rm_checkpoint | (self, name) | Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath) | Remove a checkpoint | [
"Remove",
"a",
"checkpoint"
] | def _rm_checkpoint(self, name):
"""
Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath)
"""
raise NotImplementedError() | [
"def",
"_rm_checkpoint",
"(",
"self",
",",
"name",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/models/model_saver.py#L75-L83 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/cnn_factory.py | python | shape_transform | (x) | return torch.unsqueeze(torch.transpose(x, 1, 2), 3) | Tranform the size of the tensors to fit for conv input. | Tranform the size of the tensors to fit for conv input. | [
"Tranform",
"the",
"size",
"of",
"the",
"tensors",
"to",
"fit",
"for",
"conv",
"input",
"."
] | def shape_transform(x):
""" Tranform the size of the tensors to fit for conv input. """
return torch.unsqueeze(torch.transpose(x, 1, 2), 3) | [
"def",
"shape_transform",
"(",
"x",
")",
":",
"return",
"torch",
".",
"unsqueeze",
"(",
"torch",
".",
"transpose",
"(",
"x",
",",
"1",
",",
"2",
")",
",",
"3",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/cnn_factory.py#L14-L16 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | build_optim | (model, opt, checkpoint) | return optim | Build optimizer | Build optimizer | [
"Build",
"optimizer"
] | def build_optim(model, opt, checkpoint):
""" Build optimizer """
saved_optimizer_state_dict = None
if opt.train_from:
optim = checkpoint['optim']
# We need to save a copy of optim.optimizer.state_dict() for setting
# the, optimizer state later on in Stage 2 in this method, since
# the method optim.set_parameters(model.parameters()) will overwrite
# optim.optimizer, and with ith the values stored in
# optim.optimizer.state_dict()
saved_optimizer_state_dict = optim.optimizer.state_dict()
else:
optim = Optimizer(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_steps=opt.start_decay_steps,
decay_steps=opt.decay_steps,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
decay_method=opt.decay_method,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
# Stage 1:
# Essentially optim.set_parameters (re-)creates and optimizer using
# model.paramters() as parameters that will be stored in the
# optim.optimizer.param_groups field of the torch optimizer class.
# Importantly, this method does not yet load the optimizer state, as
# essentially it builds a new optimizer with empty optimizer state and
# parameters from the model.
optim.set_parameters(model.named_parameters())
if opt.train_from:
# Stage 2: In this stage, which is only performed when loading an
# optimizer from a checkpoint, we load the saved_optimizer_state_dict
# into the re-created optimizer, to set the optim.optimizer.state
# field, which was previously empty. For this, we use the optimizer
# state saved in the "saved_optimizer_state_dict" variable for
# this purpose.
# See also: https://github.com/pytorch/pytorch/issues/2830
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
# Convert back the state values to cuda type if applicable
if use_gpu(opt):
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
# We want to make sure that indeed we have a non-empty optimizer state
# when we loaded an existing model. This should be at least the case
# for Adam, which saves "exp_avg" and "exp_avg_sq" state
# (Exponential moving average of gradient and squared gradient values)
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
return optim | [
"def",
"build_optim",
"(",
"model",
",",
"opt",
",",
"checkpoint",
")",
":",
"saved_optimizer_state_dict",
"=",
"None",
"if",
"opt",
".",
"train_from",
":",
"optim",
"=",
"checkpoint",
"[",
"'optim'",
"]",
"# We need to save a copy of optim.optimizer.state_dict() for setting",
"# the, optimizer state later on in Stage 2 in this method, since",
"# the method optim.set_parameters(model.parameters()) will overwrite",
"# optim.optimizer, and with ith the values stored in",
"# optim.optimizer.state_dict()",
"saved_optimizer_state_dict",
"=",
"optim",
".",
"optimizer",
".",
"state_dict",
"(",
")",
"else",
":",
"optim",
"=",
"Optimizer",
"(",
"opt",
".",
"optim",
",",
"opt",
".",
"learning_rate",
",",
"opt",
".",
"max_grad_norm",
",",
"lr_decay",
"=",
"opt",
".",
"learning_rate_decay",
",",
"start_decay_steps",
"=",
"opt",
".",
"start_decay_steps",
",",
"decay_steps",
"=",
"opt",
".",
"decay_steps",
",",
"beta1",
"=",
"opt",
".",
"adam_beta1",
",",
"beta2",
"=",
"opt",
".",
"adam_beta2",
",",
"adagrad_accum",
"=",
"opt",
".",
"adagrad_accumulator_init",
",",
"decay_method",
"=",
"opt",
".",
"decay_method",
",",
"warmup_steps",
"=",
"opt",
".",
"warmup_steps",
",",
"model_size",
"=",
"opt",
".",
"rnn_size",
")",
"# Stage 1:",
"# Essentially optim.set_parameters (re-)creates and optimizer using",
"# model.paramters() as parameters that will be stored in the",
"# optim.optimizer.param_groups field of the torch optimizer class.",
"# Importantly, this method does not yet load the optimizer state, as",
"# essentially it builds a new optimizer with empty optimizer state and",
"# parameters from the model.",
"optim",
".",
"set_parameters",
"(",
"model",
".",
"named_parameters",
"(",
")",
")",
"if",
"opt",
".",
"train_from",
":",
"# Stage 2: In this stage, which is only performed when loading an",
"# optimizer from a checkpoint, we load the saved_optimizer_state_dict",
"# into the re-created optimizer, to set the optim.optimizer.state",
"# field, which was previously empty. For this, we use the optimizer",
"# state saved in the \"saved_optimizer_state_dict\" variable for",
"# this purpose.",
"# See also: https://github.com/pytorch/pytorch/issues/2830",
"optim",
".",
"optimizer",
".",
"load_state_dict",
"(",
"saved_optimizer_state_dict",
")",
"# Convert back the state values to cuda type if applicable",
"if",
"use_gpu",
"(",
"opt",
")",
":",
"for",
"state",
"in",
"optim",
".",
"optimizer",
".",
"state",
".",
"values",
"(",
")",
":",
"for",
"k",
",",
"v",
"in",
"state",
".",
"items",
"(",
")",
":",
"if",
"torch",
".",
"is_tensor",
"(",
"v",
")",
":",
"state",
"[",
"k",
"]",
"=",
"v",
".",
"cuda",
"(",
")",
"# We want to make sure that indeed we have a non-empty optimizer state",
"# when we loaded an existing model. This should be at least the case",
"# for Adam, which saves \"exp_avg\" and \"exp_avg_sq\" state",
"# (Exponential moving average of gradient and squared gradient values)",
"if",
"(",
"optim",
".",
"method",
"==",
"'adam'",
")",
"and",
"(",
"len",
"(",
"optim",
".",
"optimizer",
".",
"state",
")",
"<",
"1",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Error: loaded Adam optimizer from existing model\"",
"+",
"\" but optimizer state is empty\"",
")",
"return",
"optim"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L9-L68 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | MultipleOptimizer.__init__ | (self, op) | ? | ? | [
"?"
] | def __init__(self, op):
""" ? """
self.optimizers = op | [
"def",
"__init__",
"(",
"self",
",",
"op",
")",
":",
"self",
".",
"optimizers",
"=",
"op"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L74-L76 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | MultipleOptimizer.zero_grad | (self) | ? | ? | [
"?"
] | def zero_grad(self):
""" ? """
for op in self.optimizers:
op.zero_grad() | [
"def",
"zero_grad",
"(",
"self",
")",
":",
"for",
"op",
"in",
"self",
".",
"optimizers",
":",
"op",
".",
"zero_grad",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L78-L81 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | MultipleOptimizer.step | (self) | ? | ? | [
"?"
] | def step(self):
""" ? """
for op in self.optimizers:
op.step() | [
"def",
"step",
"(",
"self",
")",
":",
"for",
"op",
"in",
"self",
".",
"optimizers",
":",
"op",
".",
"step",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L83-L86 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | MultipleOptimizer.state | (self) | return {k: v for op in self.optimizers for k, v in op.state.items()} | ? | ? | [
"?"
] | def state(self):
""" ? """
return {k: v for op in self.optimizers for k, v in op.state.items()} | [
"def",
"state",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"op",
"in",
"self",
".",
"optimizers",
"for",
"k",
",",
"v",
"in",
"op",
".",
"state",
".",
"items",
"(",
")",
"}"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L89-L91 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | MultipleOptimizer.state_dict | (self) | return [op.state_dict() for op in self.optimizers] | ? | ? | [
"?"
] | def state_dict(self):
""" ? """
return [op.state_dict() for op in self.optimizers] | [
"def",
"state_dict",
"(",
"self",
")",
":",
"return",
"[",
"op",
".",
"state_dict",
"(",
")",
"for",
"op",
"in",
"self",
".",
"optimizers",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L93-L95 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | MultipleOptimizer.load_state_dict | (self, state_dicts) | ? | ? | [
"?"
] | def load_state_dict(self, state_dicts):
""" ? """
assert len(state_dicts) == len(self.optimizers)
for i in range(len(state_dicts)):
self.optimizers[i].load_state_dict(state_dicts[i]) | [
"def",
"load_state_dict",
"(",
"self",
",",
"state_dicts",
")",
":",
"assert",
"len",
"(",
"state_dicts",
")",
"==",
"len",
"(",
"self",
".",
"optimizers",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"state_dicts",
")",
")",
":",
"self",
".",
"optimizers",
"[",
"i",
"]",
".",
"load_state_dict",
"(",
"state_dicts",
"[",
"i",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L97-L101 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | Optimizer.set_parameters | (self, params) | ? | ? | [
"?"
] | def set_parameters(self, params):
""" ? """
self.params = []
self.sparse_params = []
for k, p in params:
if p.requires_grad:
if self.method != 'sparseadam' or "embed" not in k:
self.params.append(p)
else:
self.sparse_params.append(p)
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.learning_rate)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.learning_rate)
for group in self.optimizer.param_groups:
for p in group['params']:
self.optimizer.state[p]['sum'] = self.optimizer\
.state[p]['sum'].fill_(self.adagrad_accum)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.learning_rate)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.learning_rate,
betas=self.betas, eps=1e-9)
elif self.method == 'sparseadam':
self.optimizer = MultipleOptimizer(
[optim.Adam(self.params, lr=self.learning_rate,
betas=self.betas, eps=1e-8),
optim.SparseAdam(self.sparse_params, lr=self.learning_rate,
betas=self.betas, eps=1e-8)])
else:
raise RuntimeError("Invalid optim method: " + self.method) | [
"def",
"set_parameters",
"(",
"self",
",",
"params",
")",
":",
"self",
".",
"params",
"=",
"[",
"]",
"self",
".",
"sparse_params",
"=",
"[",
"]",
"for",
"k",
",",
"p",
"in",
"params",
":",
"if",
"p",
".",
"requires_grad",
":",
"if",
"self",
".",
"method",
"!=",
"'sparseadam'",
"or",
"\"embed\"",
"not",
"in",
"k",
":",
"self",
".",
"params",
".",
"append",
"(",
"p",
")",
"else",
":",
"self",
".",
"sparse_params",
".",
"append",
"(",
"p",
")",
"if",
"self",
".",
"method",
"==",
"'sgd'",
":",
"self",
".",
"optimizer",
"=",
"optim",
".",
"SGD",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
")",
"elif",
"self",
".",
"method",
"==",
"'adagrad'",
":",
"self",
".",
"optimizer",
"=",
"optim",
".",
"Adagrad",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
")",
"for",
"group",
"in",
"self",
".",
"optimizer",
".",
"param_groups",
":",
"for",
"p",
"in",
"group",
"[",
"'params'",
"]",
":",
"self",
".",
"optimizer",
".",
"state",
"[",
"p",
"]",
"[",
"'sum'",
"]",
"=",
"self",
".",
"optimizer",
".",
"state",
"[",
"p",
"]",
"[",
"'sum'",
"]",
".",
"fill_",
"(",
"self",
".",
"adagrad_accum",
")",
"elif",
"self",
".",
"method",
"==",
"'adadelta'",
":",
"self",
".",
"optimizer",
"=",
"optim",
".",
"Adadelta",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
")",
"elif",
"self",
".",
"method",
"==",
"'adam'",
":",
"self",
".",
"optimizer",
"=",
"optim",
".",
"Adam",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
",",
"betas",
"=",
"self",
".",
"betas",
",",
"eps",
"=",
"1e-9",
")",
"elif",
"self",
".",
"method",
"==",
"'sparseadam'",
":",
"self",
".",
"optimizer",
"=",
"MultipleOptimizer",
"(",
"[",
"optim",
".",
"Adam",
"(",
"self",
".",
"params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
",",
"betas",
"=",
"self",
".",
"betas",
",",
"eps",
"=",
"1e-8",
")",
",",
"optim",
".",
"SparseAdam",
"(",
"self",
".",
"sparse_params",
",",
"lr",
"=",
"self",
".",
"learning_rate",
",",
"betas",
"=",
"self",
".",
"betas",
",",
"eps",
"=",
"1e-8",
")",
"]",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Invalid optim method: \"",
"+",
"self",
".",
"method",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L158-L188 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/optimizers.py | python | Optimizer.step | (self) | Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate. | Update the model parameters based on current gradients. | [
"Update",
"the",
"model",
"parameters",
"based",
"on",
"current",
"gradients",
"."
] | def step(self):
"""Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate.
"""
self._step += 1
# Decay method used in tensor2tensor.
if self.decay_method == "noam":
self._set_rate(
self.original_lr *
(self.model_size ** (-0.5) *
min(self._step ** (-0.5),
self._step * self.warmup_steps**(-1.5))))
# Decay based on start_decay_steps every decay_steps
else:
if ((self.start_decay_steps is not None) and (
self._step >= self.start_decay_steps)):
self.start_decay = True
if self.start_decay:
if ((self._step - self.start_decay_steps)
% self.decay_steps == 0):
self.learning_rate = self.learning_rate * self.lr_decay
if self.method != 'sparseadam':
self.optimizer.param_groups[0]['lr'] = self.learning_rate
if self.max_grad_norm:
clip_grad_norm_(self.params, self.max_grad_norm)
self.optimizer.step() | [
"def",
"step",
"(",
"self",
")",
":",
"self",
".",
"_step",
"+=",
"1",
"# Decay method used in tensor2tensor.",
"if",
"self",
".",
"decay_method",
"==",
"\"noam\"",
":",
"self",
".",
"_set_rate",
"(",
"self",
".",
"original_lr",
"*",
"(",
"self",
".",
"model_size",
"**",
"(",
"-",
"0.5",
")",
"*",
"min",
"(",
"self",
".",
"_step",
"**",
"(",
"-",
"0.5",
")",
",",
"self",
".",
"_step",
"*",
"self",
".",
"warmup_steps",
"**",
"(",
"-",
"1.5",
")",
")",
")",
")",
"# Decay based on start_decay_steps every decay_steps",
"else",
":",
"if",
"(",
"(",
"self",
".",
"start_decay_steps",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"_step",
">=",
"self",
".",
"start_decay_steps",
")",
")",
":",
"self",
".",
"start_decay",
"=",
"True",
"if",
"self",
".",
"start_decay",
":",
"if",
"(",
"(",
"self",
".",
"_step",
"-",
"self",
".",
"start_decay_steps",
")",
"%",
"self",
".",
"decay_steps",
"==",
"0",
")",
":",
"self",
".",
"learning_rate",
"=",
"self",
".",
"learning_rate",
"*",
"self",
".",
"lr_decay",
"if",
"self",
".",
"method",
"!=",
"'sparseadam'",
":",
"self",
".",
"optimizer",
".",
"param_groups",
"[",
"0",
"]",
"[",
"'lr'",
"]",
"=",
"self",
".",
"learning_rate",
"if",
"self",
".",
"max_grad_norm",
":",
"clip_grad_norm_",
"(",
"self",
".",
"params",
",",
"self",
".",
"max_grad_norm",
")",
"self",
".",
"optimizer",
".",
"step",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/optimizers.py#L198-L228 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.all_gather_stats | (stat, max_size=4096) | return stats[0] | Gather a `Statistics` object accross multiple process/nodes
Args:
stat(:obj:Statistics): the statistics object to gather
accross all processes/nodes
max_size(int): max buffer size to use
Returns:
`Statistics`, the update stats object | Gather a `Statistics` object accross multiple process/nodes | [
"Gather",
"a",
"Statistics",
"object",
"accross",
"multiple",
"process",
"/",
"nodes"
] | def all_gather_stats(stat, max_size=4096):
"""
Gather a `Statistics` object accross multiple process/nodes
Args:
stat(:obj:Statistics): the statistics object to gather
accross all processes/nodes
max_size(int): max buffer size to use
Returns:
`Statistics`, the update stats object
"""
stats = Statistics.all_gather_stats_list([stat], max_size=max_size)
return stats[0] | [
"def",
"all_gather_stats",
"(",
"stat",
",",
"max_size",
"=",
"4096",
")",
":",
"stats",
"=",
"Statistics",
".",
"all_gather_stats_list",
"(",
"[",
"stat",
"]",
",",
"max_size",
"=",
"max_size",
")",
"return",
"stats",
"[",
"0",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L30-L43 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.all_gather_stats_list | (stat_list, max_size=4096) | return our_stats | Gather a `Statistics` list accross all processes/nodes
Args:
stat_list(list([`Statistics`])): list of statistics objects to
gather accross all processes/nodes
max_size(int): max buffer size to use
Returns:
our_stats(list([`Statistics`])): list of updated stats | Gather a `Statistics` list accross all processes/nodes | [
"Gather",
"a",
"Statistics",
"list",
"accross",
"all",
"processes",
"/",
"nodes"
] | def all_gather_stats_list(stat_list, max_size=4096):
"""
Gather a `Statistics` list accross all processes/nodes
Args:
stat_list(list([`Statistics`])): list of statistics objects to
gather accross all processes/nodes
max_size(int): max buffer size to use
Returns:
our_stats(list([`Statistics`])): list of updated stats
"""
# Get a list of world_size lists with len(stat_list) Statistics objects
all_stats = all_gather_list(stat_list, max_size=max_size)
our_rank = get_rank()
our_stats = all_stats[our_rank]
for other_rank, stats in enumerate(all_stats):
if other_rank == our_rank:
continue
for i, stat in enumerate(stats):
our_stats[i].update(stat, update_n_src_words=True)
return our_stats | [
"def",
"all_gather_stats_list",
"(",
"stat_list",
",",
"max_size",
"=",
"4096",
")",
":",
"# Get a list of world_size lists with len(stat_list) Statistics objects",
"all_stats",
"=",
"all_gather_list",
"(",
"stat_list",
",",
"max_size",
"=",
"max_size",
")",
"our_rank",
"=",
"get_rank",
"(",
")",
"our_stats",
"=",
"all_stats",
"[",
"our_rank",
"]",
"for",
"other_rank",
",",
"stats",
"in",
"enumerate",
"(",
"all_stats",
")",
":",
"if",
"other_rank",
"==",
"our_rank",
":",
"continue",
"for",
"i",
",",
"stat",
"in",
"enumerate",
"(",
"stats",
")",
":",
"our_stats",
"[",
"i",
"]",
".",
"update",
"(",
"stat",
",",
"update_n_src_words",
"=",
"True",
")",
"return",
"our_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L46-L68 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.update | (self, stat, update_n_src_words=False) | Update statistics by suming values with another `Statistics` object
Args:
stat: another statistic object
update_n_src_words(bool): whether to update (sum) `n_src_words`
or not | Update statistics by suming values with another `Statistics` object | [
"Update",
"statistics",
"by",
"suming",
"values",
"with",
"another",
"Statistics",
"object"
] | def update(self, stat, update_n_src_words=False):
"""
Update statistics by suming values with another `Statistics` object
Args:
stat: another statistic object
update_n_src_words(bool): whether to update (sum) `n_src_words`
or not
"""
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
if update_n_src_words:
self.n_src_words += stat.n_src_words | [
"def",
"update",
"(",
"self",
",",
"stat",
",",
"update_n_src_words",
"=",
"False",
")",
":",
"self",
".",
"loss",
"+=",
"stat",
".",
"loss",
"self",
".",
"n_words",
"+=",
"stat",
".",
"n_words",
"self",
".",
"n_correct",
"+=",
"stat",
".",
"n_correct",
"if",
"update_n_src_words",
":",
"self",
".",
"n_src_words",
"+=",
"stat",
".",
"n_src_words"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L70-L85 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.accuracy | (self) | return 100 * (self.n_correct / self.n_words) | compute accuracy | compute accuracy | [
"compute",
"accuracy"
] | def accuracy(self):
""" compute accuracy """
return 100 * (self.n_correct / self.n_words) | [
"def",
"accuracy",
"(",
"self",
")",
":",
"return",
"100",
"*",
"(",
"self",
".",
"n_correct",
"/",
"self",
".",
"n_words",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L87-L89 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.xent | (self) | return self.loss / self.n_words | compute cross entropy | compute cross entropy | [
"compute",
"cross",
"entropy"
] | def xent(self):
""" compute cross entropy """
return self.loss / self.n_words | [
"def",
"xent",
"(",
"self",
")",
":",
"return",
"self",
".",
"loss",
"/",
"self",
".",
"n_words"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L91-L93 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.ppl | (self) | return math.exp(min(self.loss / self.n_words, 100)) | compute perplexity | compute perplexity | [
"compute",
"perplexity"
] | def ppl(self):
""" compute perplexity """
return math.exp(min(self.loss / self.n_words, 100)) | [
"def",
"ppl",
"(",
"self",
")",
":",
"return",
"math",
".",
"exp",
"(",
"min",
"(",
"self",
".",
"loss",
"/",
"self",
".",
"n_words",
",",
"100",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L95-L97 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.elapsed_time | (self) | return time.time() - self.start_time | compute elapsed time | compute elapsed time | [
"compute",
"elapsed",
"time"
] | def elapsed_time(self):
""" compute elapsed time """
return time.time() - self.start_time | [
"def",
"elapsed_time",
"(",
"self",
")",
":",
"return",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"start_time"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L99-L101 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.output | (self, step, num_steps, learning_rate, start) | Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step. | Write out statistics to stdout. | [
"Write",
"out",
"statistics",
"to",
"stdout",
"."
] | def output(self, step, num_steps, learning_rate, start):
"""Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step.
"""
t = self.elapsed_time()
logger.info(
("Step %2d/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; " +
"lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec")
% (step, num_steps,
self.accuracy(),
self.ppl(),
self.xent(),
learning_rate,
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush() | [
"def",
"output",
"(",
"self",
",",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"start",
")",
":",
"t",
"=",
"self",
".",
"elapsed_time",
"(",
")",
"logger",
".",
"info",
"(",
"(",
"\"Step %2d/%5d; acc: %6.2f; ppl: %5.2f; xent: %4.2f; \"",
"+",
"\"lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec\"",
")",
"%",
"(",
"step",
",",
"num_steps",
",",
"self",
".",
"accuracy",
"(",
")",
",",
"self",
".",
"ppl",
"(",
")",
",",
"self",
".",
"xent",
"(",
")",
",",
"learning_rate",
",",
"self",
".",
"n_src_words",
"/",
"(",
"t",
"+",
"1e-5",
")",
",",
"self",
".",
"n_words",
"/",
"(",
"t",
"+",
"1e-5",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L103-L123 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/statistics.py | python | Statistics.log_tensorboard | (self, prefix, writer, learning_rate, step) | display statistics to tensorboard | display statistics to tensorboard | [
"display",
"statistics",
"to",
"tensorboard"
] | def log_tensorboard(self, prefix, writer, learning_rate, step):
""" display statistics to tensorboard """
t = self.elapsed_time()
writer.add_scalar(prefix + "/xent", self.xent(), step)
writer.add_scalar(prefix + "/ppl", self.ppl(), step)
writer.add_scalar(prefix + "/accuracy", self.accuracy(), step)
writer.add_scalar(prefix + "/tgtper", self.n_words / t, step)
writer.add_scalar(prefix + "/lr", learning_rate, step) | [
"def",
"log_tensorboard",
"(",
"self",
",",
"prefix",
",",
"writer",
",",
"learning_rate",
",",
"step",
")",
":",
"t",
"=",
"self",
".",
"elapsed_time",
"(",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/xent\"",
",",
"self",
".",
"xent",
"(",
")",
",",
"step",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/ppl\"",
",",
"self",
".",
"ppl",
"(",
")",
",",
"step",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/accuracy\"",
",",
"self",
".",
"accuracy",
"(",
")",
",",
"step",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/tgtper\"",
",",
"self",
".",
"n_words",
"/",
"t",
",",
"step",
")",
"writer",
".",
"add_scalar",
"(",
"prefix",
"+",
"\"/lr\"",
",",
"learning_rate",
",",
"step",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/statistics.py#L125-L132 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/rnn_factory.py | python | rnn_factory | (rnn_type, **kwargs) | return rnn, no_pack_padded_seq | rnn factory, Use pytorch version when available. | rnn factory, Use pytorch version when available. | [
"rnn",
"factory",
"Use",
"pytorch",
"version",
"when",
"available",
"."
] | def rnn_factory(rnn_type, **kwargs):
""" rnn factory, Use pytorch version when available. """
no_pack_padded_seq = False
if rnn_type == "SRU":
# SRU doesn't support PackedSequence.
no_pack_padded_seq = True
rnn = onmt.models.sru.SRU(**kwargs)
else:
rnn = getattr(nn, rnn_type)(**kwargs)
return rnn, no_pack_padded_seq | [
"def",
"rnn_factory",
"(",
"rnn_type",
",",
"*",
"*",
"kwargs",
")",
":",
"no_pack_padded_seq",
"=",
"False",
"if",
"rnn_type",
"==",
"\"SRU\"",
":",
"# SRU doesn't support PackedSequence.",
"no_pack_padded_seq",
"=",
"True",
"rnn",
"=",
"onmt",
".",
"models",
".",
"sru",
".",
"SRU",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"rnn",
"=",
"getattr",
"(",
"nn",
",",
"rnn_type",
")",
"(",
"*",
"*",
"kwargs",
")",
"return",
"rnn",
",",
"no_pack_padded_seq"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/rnn_factory.py#L10-L19 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/distributed.py | python | all_reduce_and_rescale_tensors | (tensors, rescale_denom,
buffer_size=10485760) | All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes | All-reduce and rescale tensors in chunks of the specified size. | [
"All",
"-",
"reduce",
"and",
"rescale",
"tensors",
"in",
"chunks",
"of",
"the",
"specified",
"size",
"."
] | def all_reduce_and_rescale_tensors(tensors, rescale_denom,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
torch.distributed.all_reduce(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
torch.distributed.all_reduce(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer() | [
"def",
"all_reduce_and_rescale_tensors",
"(",
"tensors",
",",
"rescale_denom",
",",
"buffer_size",
"=",
"10485760",
")",
":",
"# buffer size in bytes, determine equiv. # of elements based on data type",
"buffer_t",
"=",
"tensors",
"[",
"0",
"]",
".",
"new",
"(",
"math",
".",
"ceil",
"(",
"buffer_size",
"/",
"tensors",
"[",
"0",
"]",
".",
"element_size",
"(",
")",
")",
")",
".",
"zero_",
"(",
")",
"buffer",
"=",
"[",
"]",
"def",
"all_reduce_buffer",
"(",
")",
":",
"# copy tensors into buffer_t",
"offset",
"=",
"0",
"for",
"t",
"in",
"buffer",
":",
"numel",
"=",
"t",
".",
"numel",
"(",
")",
"buffer_t",
"[",
"offset",
":",
"offset",
"+",
"numel",
"]",
".",
"copy_",
"(",
"t",
".",
"view",
"(",
"-",
"1",
")",
")",
"offset",
"+=",
"numel",
"# all-reduce and rescale",
"torch",
".",
"distributed",
".",
"all_reduce",
"(",
"buffer_t",
"[",
":",
"offset",
"]",
")",
"buffer_t",
".",
"div_",
"(",
"rescale_denom",
")",
"# copy all-reduced buffer back into tensors",
"offset",
"=",
"0",
"for",
"t",
"in",
"buffer",
":",
"numel",
"=",
"t",
".",
"numel",
"(",
")",
"t",
".",
"view",
"(",
"-",
"1",
")",
".",
"copy_",
"(",
"buffer_t",
"[",
"offset",
":",
"offset",
"+",
"numel",
"]",
")",
"offset",
"+=",
"numel",
"filled",
"=",
"0",
"for",
"t",
"in",
"tensors",
":",
"sz",
"=",
"t",
".",
"numel",
"(",
")",
"*",
"t",
".",
"element_size",
"(",
")",
"if",
"sz",
">",
"buffer_size",
":",
"# tensor is bigger than buffer, all-reduce and rescale directly",
"torch",
".",
"distributed",
".",
"all_reduce",
"(",
"t",
")",
"t",
".",
"div_",
"(",
"rescale_denom",
")",
"elif",
"filled",
"+",
"sz",
">",
"buffer_size",
":",
"# buffer is full, all-reduce and replace buffer with grad",
"all_reduce_buffer",
"(",
")",
"buffer",
"=",
"[",
"t",
"]",
"filled",
"=",
"sz",
"else",
":",
"# add tensor to buffer",
"buffer",
".",
"append",
"(",
"t",
")",
"filled",
"+=",
"sz",
"if",
"len",
"(",
"buffer",
")",
">",
"0",
":",
"all_reduce_buffer",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/distributed.py#L35-L86 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/distributed.py | python | all_gather_list | (data, max_size=4096) | return results | Gathers arbitrary data from all nodes into a list. | Gathers arbitrary data from all nodes into a list. | [
"Gathers",
"arbitrary",
"data",
"from",
"all",
"nodes",
"into",
"a",
"list",
"."
] | def all_gather_list(data, max_size=4096):
"""Gathers arbitrary data from all nodes into a list."""
world_size = torch.distributed.get_world_size()
if not hasattr(all_gather_list, '_in_buffer') or \
max_size != all_gather_list._in_buffer.size():
all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
all_gather_list._out_buffers = [
torch.cuda.ByteTensor(max_size)
for i in range(world_size)
]
in_buffer = all_gather_list._in_buffer
out_buffers = all_gather_list._out_buffers
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 2 > max_size:
raise ValueError(
'encoded data exceeds max_size: {}'.format(enc_size + 2))
assert max_size < 255*256
in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k
in_buffer[1] = enc_size % 255
in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))
torch.distributed.all_gather(out_buffers, in_buffer.cuda())
results = []
for i in range(world_size):
out_buffer = out_buffers[i]
size = (255 * out_buffer[0].item()) + out_buffer[1].item()
bytes_list = bytes(out_buffer[2:size+2].tolist())
result = pickle.loads(bytes_list)
results.append(result)
return results | [
"def",
"all_gather_list",
"(",
"data",
",",
"max_size",
"=",
"4096",
")",
":",
"world_size",
"=",
"torch",
".",
"distributed",
".",
"get_world_size",
"(",
")",
"if",
"not",
"hasattr",
"(",
"all_gather_list",
",",
"'_in_buffer'",
")",
"or",
"max_size",
"!=",
"all_gather_list",
".",
"_in_buffer",
".",
"size",
"(",
")",
":",
"all_gather_list",
".",
"_in_buffer",
"=",
"torch",
".",
"cuda",
".",
"ByteTensor",
"(",
"max_size",
")",
"all_gather_list",
".",
"_out_buffers",
"=",
"[",
"torch",
".",
"cuda",
".",
"ByteTensor",
"(",
"max_size",
")",
"for",
"i",
"in",
"range",
"(",
"world_size",
")",
"]",
"in_buffer",
"=",
"all_gather_list",
".",
"_in_buffer",
"out_buffers",
"=",
"all_gather_list",
".",
"_out_buffers",
"enc",
"=",
"pickle",
".",
"dumps",
"(",
"data",
")",
"enc_size",
"=",
"len",
"(",
"enc",
")",
"if",
"enc_size",
"+",
"2",
">",
"max_size",
":",
"raise",
"ValueError",
"(",
"'encoded data exceeds max_size: {}'",
".",
"format",
"(",
"enc_size",
"+",
"2",
")",
")",
"assert",
"max_size",
"<",
"255",
"*",
"256",
"in_buffer",
"[",
"0",
"]",
"=",
"enc_size",
"//",
"255",
"# this encoding works for max_size < 65k",
"in_buffer",
"[",
"1",
"]",
"=",
"enc_size",
"%",
"255",
"in_buffer",
"[",
"2",
":",
"enc_size",
"+",
"2",
"]",
"=",
"torch",
".",
"ByteTensor",
"(",
"list",
"(",
"enc",
")",
")",
"torch",
".",
"distributed",
".",
"all_gather",
"(",
"out_buffers",
",",
"in_buffer",
".",
"cuda",
"(",
")",
")",
"results",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"world_size",
")",
":",
"out_buffer",
"=",
"out_buffers",
"[",
"i",
"]",
"size",
"=",
"(",
"255",
"*",
"out_buffer",
"[",
"0",
"]",
".",
"item",
"(",
")",
")",
"+",
"out_buffer",
"[",
"1",
"]",
".",
"item",
"(",
")",
"bytes_list",
"=",
"bytes",
"(",
"out_buffer",
"[",
"2",
":",
"size",
"+",
"2",
"]",
".",
"tolist",
"(",
")",
")",
"result",
"=",
"pickle",
".",
"loads",
"(",
"bytes_list",
")",
"results",
".",
"append",
"(",
"result",
")",
"return",
"results"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/distributed.py#L89-L122 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | build_loss_compute | (model, tgt_vocab, opt, train=True) | return compute | This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase. | This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase. | [
"This",
"returns",
"user",
"-",
"defined",
"LossCompute",
"object",
"which",
"is",
"used",
"to",
"compute",
"loss",
"in",
"train",
"/",
"validate",
"process",
".",
"You",
"can",
"implement",
"your",
"own",
"*",
"LossCompute",
"class",
"by",
"subclassing",
"LossComputeBase",
"."
] | def build_loss_compute(model, tgt_vocab, opt, train=True):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
device = torch.device("cuda" if onmt.utils.misc.use_gpu(opt) else "cpu")
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, opt.copy_attn_force,
opt.copy_loss_by_seqlength)
else:
compute = NMTLossCompute(
model.generator, tgt_vocab,
label_smoothing=opt.label_smoothing if train else 0.0)
compute.to(device)
return compute | [
"def",
"build_loss_compute",
"(",
"model",
",",
"tgt_vocab",
",",
"opt",
",",
"train",
"=",
"True",
")",
":",
"device",
"=",
"torch",
".",
"device",
"(",
"\"cuda\"",
"if",
"onmt",
".",
"utils",
".",
"misc",
".",
"use_gpu",
"(",
"opt",
")",
"else",
"\"cpu\"",
")",
"if",
"opt",
".",
"copy_attn",
":",
"compute",
"=",
"onmt",
".",
"modules",
".",
"CopyGeneratorLossCompute",
"(",
"model",
".",
"generator",
",",
"tgt_vocab",
",",
"opt",
".",
"copy_attn_force",
",",
"opt",
".",
"copy_loss_by_seqlength",
")",
"else",
":",
"compute",
"=",
"NMTLossCompute",
"(",
"model",
".",
"generator",
",",
"tgt_vocab",
",",
"label_smoothing",
"=",
"opt",
".",
"label_smoothing",
"if",
"train",
"else",
"0.0",
")",
"compute",
".",
"to",
"(",
"device",
")",
"return",
"compute"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L17-L35 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | filter_shard_state | (state, shard_size=None) | ? | ? | [
"?"
] | def filter_shard_state(state, shard_size=None):
""" ? """
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split) | [
"def",
"filter_shard_state",
"(",
"state",
",",
"shard_size",
"=",
"None",
")",
":",
"for",
"k",
",",
"v",
"in",
"state",
".",
"items",
"(",
")",
":",
"if",
"shard_size",
"is",
"None",
":",
"yield",
"k",
",",
"v",
"if",
"v",
"is",
"not",
"None",
":",
"v_split",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"v",
",",
"torch",
".",
"Tensor",
")",
":",
"for",
"v_chunk",
"in",
"torch",
".",
"split",
"(",
"v",
",",
"shard_size",
")",
":",
"v_chunk",
"=",
"v_chunk",
".",
"data",
".",
"clone",
"(",
")",
"v_chunk",
".",
"requires_grad",
"=",
"v",
".",
"requires_grad",
"v_split",
".",
"append",
"(",
"v_chunk",
")",
"yield",
"k",
",",
"(",
"v",
",",
"v_split",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L252-L265 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | shards | (state, shard_size, eval_only=False) | Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation. | Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards. | [
"Args",
":",
"state",
":",
"A",
"dictionary",
"which",
"corresponds",
"to",
"the",
"output",
"of",
"*",
"LossCompute",
".",
"_make_shard_state",
"()",
".",
"The",
"values",
"for",
"those",
"keys",
"are",
"Tensor",
"-",
"like",
"or",
"None",
".",
"shard_size",
":",
"The",
"maximum",
"size",
"of",
"the",
"shards",
"yielded",
"by",
"the",
"model",
".",
"eval_only",
":",
"If",
"True",
"only",
"yield",
"the",
"state",
"nothing",
"else",
".",
"Otherwise",
"yield",
"shards",
"."
] | def shards(state, shard_size, eval_only=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads) | [
"def",
"shards",
"(",
"state",
",",
"shard_size",
",",
"eval_only",
"=",
"False",
")",
":",
"if",
"eval_only",
":",
"yield",
"filter_shard_state",
"(",
"state",
")",
"else",
":",
"# non_none: the subdict of the state dictionary where the values",
"# are not None.",
"non_none",
"=",
"dict",
"(",
"filter_shard_state",
"(",
"state",
",",
"shard_size",
")",
")",
"# Now, the iteration:",
"# state is a dictionary of sequences of tensor-like but we",
"# want a sequence of dictionaries of tensors.",
"# First, unzip the dictionary into a sequence of keys and a",
"# sequence of tensor-like sequences.",
"keys",
",",
"values",
"=",
"zip",
"(",
"*",
"(",
"(",
"k",
",",
"[",
"v_chunk",
"for",
"v_chunk",
"in",
"v_split",
"]",
")",
"for",
"k",
",",
"(",
"_",
",",
"v_split",
")",
"in",
"non_none",
".",
"items",
"(",
")",
")",
")",
"# Now, yield a dictionary for each shard. The keys are always",
"# the same. values is a sequence of length #keys where each",
"# element is a sequence of length #shards. We want to iterate",
"# over the shards, not over the keys: therefore, the values need",
"# to be re-zipped by shard and then each shard can be paired",
"# with the keys.",
"for",
"shard_tensors",
"in",
"zip",
"(",
"*",
"values",
")",
":",
"yield",
"dict",
"(",
"zip",
"(",
"keys",
",",
"shard_tensors",
")",
")",
"# Assumed backprop'd",
"variables",
"=",
"[",
"]",
"for",
"k",
",",
"(",
"v",
",",
"v_split",
")",
"in",
"non_none",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"torch",
".",
"Tensor",
")",
"and",
"state",
"[",
"k",
"]",
".",
"requires_grad",
":",
"variables",
".",
"extend",
"(",
"zip",
"(",
"torch",
".",
"split",
"(",
"state",
"[",
"k",
"]",
",",
"shard_size",
")",
",",
"[",
"v_chunk",
".",
"grad",
"for",
"v_chunk",
"in",
"v_split",
"]",
")",
")",
"inputs",
",",
"grads",
"=",
"zip",
"(",
"*",
"variables",
")",
"torch",
".",
"autograd",
".",
"backward",
"(",
"inputs",
",",
"grads",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L268-L315 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | LossComputeBase._make_shard_state | (self, batch, output, range_, attns=None) | return NotImplementedError | Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model. | Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model. | [
"Make",
"shard",
"state",
"dictionary",
"for",
"shards",
"()",
"to",
"return",
"iterable",
"shards",
"for",
"efficient",
"loss",
"computation",
".",
"Subclass",
"must",
"define",
"this",
"method",
"to",
"match",
"its",
"own",
"_compute_loss",
"()",
"interface",
".",
"Args",
":",
"batch",
":",
"the",
"current",
"batch",
".",
"output",
":",
"the",
"predict",
"output",
"from",
"the",
"model",
".",
"range_",
":",
"the",
"range",
"of",
"examples",
"for",
"computing",
"the",
"whole",
"batch",
"or",
"a",
"trunc",
"of",
"it?",
"attns",
":",
"the",
"attns",
"dictionary",
"returned",
"from",
"the",
"model",
"."
] | def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError | [
"def",
"_make_shard_state",
"(",
"self",
",",
"batch",
",",
"output",
",",
"range_",
",",
"attns",
"=",
"None",
")",
":",
"return",
"NotImplementedError"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L64-L76 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | LossComputeBase._compute_loss | (self, batch, output, target, **kwargs) | return NotImplementedError | Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss. | Compute the loss. Subclass must define this method. | [
"Compute",
"the",
"loss",
".",
"Subclass",
"must",
"define",
"this",
"method",
"."
] | def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError | [
"def",
"_compute_loss",
"(",
"self",
",",
"batch",
",",
"output",
",",
"target",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"NotImplementedError"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L78-L89 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | LossComputeBase.monolithic_compute_loss | (self, batch, output, attns) | return batch_stats | Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
Returns:
:obj:`onmt.utils.Statistics`: loss statistics | Compute the forward loss for the batch. | [
"Compute",
"the",
"forward",
"loss",
"for",
"the",
"batch",
"."
] | def monolithic_compute_loss(self, batch, output, attns):
"""
Compute the forward loss for the batch.
Args:
batch (batch): batch of labeled examples
output (:obj:`FloatTensor`):
output of decoder model `[tgt_len x batch x hidden]`
attns (dict of :obj:`FloatTensor`) :
dictionary of attention distributions
`[tgt_len x batch x src_len]`
Returns:
:obj:`onmt.utils.Statistics`: loss statistics
"""
range_ = (0, batch.tgt.size(0))
shard_state = self._make_shard_state(batch, output, range_, attns)
_, batch_stats = self._compute_loss(batch, **shard_state)
return batch_stats | [
"def",
"monolithic_compute_loss",
"(",
"self",
",",
"batch",
",",
"output",
",",
"attns",
")",
":",
"range_",
"=",
"(",
"0",
",",
"batch",
".",
"tgt",
".",
"size",
"(",
"0",
")",
")",
"shard_state",
"=",
"self",
".",
"_make_shard_state",
"(",
"batch",
",",
"output",
",",
"range_",
",",
"attns",
")",
"_",
",",
"batch_stats",
"=",
"self",
".",
"_compute_loss",
"(",
"batch",
",",
"*",
"*",
"shard_state",
")",
"return",
"batch_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L91-L109 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | LossComputeBase.sharded_compute_loss | (self, batch, output, attns,
cur_trunc, trunc_size, shard_size,
normalization) | return batch_stats | Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(cur_trunc, cur_trunc + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
cur_trunc (int) : starting position of truncation window
trunc_size (int) : length of truncation window
shard_size (int) : maximum number of examples in a shard
normalization (int) : Loss is divided by this number
Returns:
:obj:`onmt.utils.Statistics`: validation loss statistics | Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency. | [
"Compute",
"the",
"forward",
"loss",
"and",
"backpropagate",
".",
"Computation",
"is",
"done",
"with",
"shards",
"and",
"optionally",
"truncation",
"for",
"memory",
"efficiency",
"."
] | def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size,
normalization):
"""Compute the forward loss and backpropagate. Computation is done
with shards and optionally truncation for memory efficiency.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(cur_trunc, cur_trunc + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
cur_trunc (int) : starting position of truncation window
trunc_size (int) : length of truncation window
shard_size (int) : maximum number of examples in a shard
normalization (int) : Loss is divided by this number
Returns:
:obj:`onmt.utils.Statistics`: validation loss statistics
"""
batch_stats = onmt.utils.Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self._make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size):
loss, stats = self._compute_loss(batch, **shard)
loss.div(float(normalization)).backward()
batch_stats.update(stats)
return batch_stats | [
"def",
"sharded_compute_loss",
"(",
"self",
",",
"batch",
",",
"output",
",",
"attns",
",",
"cur_trunc",
",",
"trunc_size",
",",
"shard_size",
",",
"normalization",
")",
":",
"batch_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"range_",
"=",
"(",
"cur_trunc",
",",
"cur_trunc",
"+",
"trunc_size",
")",
"shard_state",
"=",
"self",
".",
"_make_shard_state",
"(",
"batch",
",",
"output",
",",
"range_",
",",
"attns",
")",
"for",
"shard",
"in",
"shards",
"(",
"shard_state",
",",
"shard_size",
")",
":",
"loss",
",",
"stats",
"=",
"self",
".",
"_compute_loss",
"(",
"batch",
",",
"*",
"*",
"shard",
")",
"loss",
".",
"div",
"(",
"float",
"(",
"normalization",
")",
")",
".",
"backward",
"(",
")",
"batch_stats",
".",
"update",
"(",
"stats",
")",
"return",
"batch_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L111-L149 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | LossComputeBase._stats | (self, loss, scores, target) | return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct) | Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch. | Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets | [
"Args",
":",
"loss",
"(",
":",
"obj",
":",
"FloatTensor",
")",
":",
"the",
"loss",
"computed",
"by",
"the",
"loss",
"criterion",
".",
"scores",
"(",
":",
"obj",
":",
"FloatTensor",
")",
":",
"a",
"score",
"for",
"each",
"possible",
"output",
"target",
"(",
":",
"obj",
":",
"FloatTensor",
")",
":",
"true",
"targets"
] | def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target) \
.masked_select(non_padding) \
.sum() \
.item()
num_non_padding = non_padding.sum().item()
return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct) | [
"def",
"_stats",
"(",
"self",
",",
"loss",
",",
"scores",
",",
"target",
")",
":",
"pred",
"=",
"scores",
".",
"max",
"(",
"1",
")",
"[",
"1",
"]",
"non_padding",
"=",
"target",
".",
"ne",
"(",
"self",
".",
"padding_idx",
")",
"num_correct",
"=",
"pred",
".",
"eq",
"(",
"target",
")",
".",
"masked_select",
"(",
"non_padding",
")",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
"num_non_padding",
"=",
"non_padding",
".",
"sum",
"(",
")",
".",
"item",
"(",
")",
"return",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
"loss",
".",
"item",
"(",
")",
",",
"num_non_padding",
",",
"num_correct",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L151-L168 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/loss.py | python | LabelSmoothingLoss.forward | (self, output, target) | return F.kl_div(output, model_prob, reduction='sum') | output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size | output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size | [
"output",
"(",
"FloatTensor",
")",
":",
"batch_size",
"x",
"n_classes",
"target",
"(",
"LongTensor",
")",
":",
"batch_size"
] | def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.padding_idx).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='sum') | [
"def",
"forward",
"(",
"self",
",",
"output",
",",
"target",
")",
":",
"model_prob",
"=",
"self",
".",
"one_hot",
".",
"repeat",
"(",
"target",
".",
"size",
"(",
"0",
")",
",",
"1",
")",
"model_prob",
".",
"scatter_",
"(",
"1",
",",
"target",
".",
"unsqueeze",
"(",
"1",
")",
",",
"self",
".",
"confidence",
")",
"model_prob",
".",
"masked_fill_",
"(",
"(",
"target",
"==",
"self",
".",
"padding_idx",
")",
".",
"unsqueeze",
"(",
"1",
")",
",",
"0",
")",
"return",
"F",
".",
"kl_div",
"(",
"output",
",",
"model_prob",
",",
"reduction",
"=",
"'sum'",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/loss.py#L195-L204 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/misc.py | python | aeq | (*args) | Assert all arguments have the same value | Assert all arguments have the same value | [
"Assert",
"all",
"arguments",
"have",
"the",
"same",
"value"
] | def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args) | [
"def",
"aeq",
"(",
"*",
"args",
")",
":",
"arguments",
"=",
"(",
"arg",
"for",
"arg",
"in",
"args",
")",
"first",
"=",
"next",
"(",
"arguments",
")",
"assert",
"all",
"(",
"arg",
"==",
"first",
"for",
"arg",
"in",
"arguments",
")",
",",
"\"Not all arguments have the same value: \"",
"+",
"str",
"(",
"args",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/misc.py#L6-L13 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/misc.py | python | sequence_mask | (lengths, max_len=None) | return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1))) | Creates a boolean mask from sequence lengths. | Creates a boolean mask from sequence lengths. | [
"Creates",
"a",
"boolean",
"mask",
"from",
"sequence",
"lengths",
"."
] | def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1))) | [
"def",
"sequence_mask",
"(",
"lengths",
",",
"max_len",
"=",
"None",
")",
":",
"batch_size",
"=",
"lengths",
".",
"numel",
"(",
")",
"max_len",
"=",
"max_len",
"or",
"lengths",
".",
"max",
"(",
")",
"return",
"(",
"torch",
".",
"arange",
"(",
"0",
",",
"max_len",
")",
".",
"type_as",
"(",
"lengths",
")",
".",
"repeat",
"(",
"batch_size",
",",
"1",
")",
".",
"lt",
"(",
"lengths",
".",
"unsqueeze",
"(",
"1",
")",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/misc.py#L16-L25 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/misc.py | python | tile | (x, count, dim=0) | return x | Tiles x on dimension dim count times. | Tiles x on dimension dim count times. | [
"Tiles",
"x",
"on",
"dimension",
"dim",
"count",
"times",
"."
] | def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x | [
"def",
"tile",
"(",
"x",
",",
"count",
",",
"dim",
"=",
"0",
")",
":",
"perm",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"x",
".",
"size",
"(",
")",
")",
")",
")",
"if",
"dim",
"!=",
"0",
":",
"perm",
"[",
"0",
"]",
",",
"perm",
"[",
"dim",
"]",
"=",
"perm",
"[",
"dim",
"]",
",",
"perm",
"[",
"0",
"]",
"x",
"=",
"x",
".",
"permute",
"(",
"perm",
")",
".",
"contiguous",
"(",
")",
"out_size",
"=",
"list",
"(",
"x",
".",
"size",
"(",
")",
")",
"out_size",
"[",
"0",
"]",
"*=",
"count",
"batch",
"=",
"x",
".",
"size",
"(",
"0",
")",
"x",
"=",
"x",
".",
"view",
"(",
"batch",
",",
"-",
"1",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"repeat",
"(",
"count",
",",
"1",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"*",
"out_size",
")",
"if",
"dim",
"!=",
"0",
":",
"x",
"=",
"x",
".",
"permute",
"(",
"perm",
")",
".",
"contiguous",
"(",
")",
"return",
"x"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/misc.py#L28-L47 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/misc.py | python | use_gpu | (opt) | return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1) | Creates a boolean if gpu used | Creates a boolean if gpu used | [
"Creates",
"a",
"boolean",
"if",
"gpu",
"used"
] | def use_gpu(opt):
"""
Creates a boolean if gpu used
"""
return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1) | [
"def",
"use_gpu",
"(",
"opt",
")",
":",
"return",
"(",
"hasattr",
"(",
"opt",
",",
"'gpu_ranks'",
")",
"and",
"len",
"(",
"opt",
".",
"gpu_ranks",
")",
">",
"0",
")",
"or",
"(",
"hasattr",
"(",
"opt",
",",
"'gpu'",
")",
"and",
"opt",
".",
"gpu",
">",
"-",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/misc.py#L50-L55 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/report_manager.py | python | ReportMgrBase.__init__ | (self, report_every, start_time=-1.) | Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()` | Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()` | [
"Args",
":",
"report_every",
"(",
"int",
")",
":",
"Report",
"status",
"every",
"this",
"many",
"sentences",
"start_time",
"(",
"float",
")",
":",
"manually",
"set",
"report",
"start",
"time",
".",
"Negative",
"values",
"means",
"that",
"you",
"will",
"need",
"to",
"set",
"it",
"later",
"or",
"use",
"start",
"()"
] | def __init__(self, report_every, start_time=-1.):
"""
Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()`
"""
self.report_every = report_every
self.progress_step = 0
self.start_time = start_time | [
"def",
"__init__",
"(",
"self",
",",
"report_every",
",",
"start_time",
"=",
"-",
"1.",
")",
":",
"self",
".",
"report_every",
"=",
"report_every",
"self",
".",
"progress_step",
"=",
"0",
"self",
".",
"start_time",
"=",
"start_time"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/report_manager.py#L33-L42 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/report_manager.py | python | ReportMgrBase.report_training | (self, step, num_steps, learning_rate,
report_stats, multigpu=False) | return onmt.utils.Statistics() | This is the user-defined batch-level traing progress
report function.
Args:
step(int): current step count.
num_steps(int): total number of batches.
learning_rate(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance. | This is the user-defined batch-level traing progress
report function. | [
"This",
"is",
"the",
"user",
"-",
"defined",
"batch",
"-",
"level",
"traing",
"progress",
"report",
"function",
"."
] | def report_training(self, step, num_steps, learning_rate,
report_stats, multigpu=False):
"""
This is the user-defined batch-level traing progress
report function.
Args:
step(int): current step count.
num_steps(int): total number of batches.
learning_rate(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if self.start_time < 0:
raise ValueError("""ReportMgr needs to be started
(set 'start_time' or use 'start()'""")
if multigpu:
report_stats = onmt.utils.Statistics.all_gather_stats(report_stats)
if step % self.report_every == 0:
self._report_training(
step, num_steps, learning_rate, report_stats)
self.progress_step += 1
return onmt.utils.Statistics() | [
"def",
"report_training",
"(",
"self",
",",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
",",
"multigpu",
"=",
"False",
")",
":",
"if",
"self",
".",
"start_time",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"\"\"ReportMgr needs to be started\n (set 'start_time' or use 'start()'\"\"\"",
")",
"if",
"multigpu",
":",
"report_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
".",
"all_gather_stats",
"(",
"report_stats",
")",
"if",
"step",
"%",
"self",
".",
"report_every",
"==",
"0",
":",
"self",
".",
"_report_training",
"(",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
")",
"self",
".",
"progress_step",
"+=",
"1",
"return",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/report_manager.py#L50-L75 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/report_manager.py | python | ReportMgrBase._report_training | (self, *args, **kwargs) | To be overridden | To be overridden | [
"To",
"be",
"overridden"
] | def _report_training(self, *args, **kwargs):
""" To be overridden """
raise NotImplementedError() | [
"def",
"_report_training",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/report_manager.py#L77-L79 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/report_manager.py | python | ReportMgrBase.report_step | (self, lr, step, train_stats=None, valid_stats=None) | Report stats of a step
Args:
train_stats(Statistics): training stats
valid_stats(Statistics): validation stats
lr(float): current learning rate | Report stats of a step | [
"Report",
"stats",
"of",
"a",
"step"
] | def report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
Report stats of a step
Args:
train_stats(Statistics): training stats
valid_stats(Statistics): validation stats
lr(float): current learning rate
"""
self._report_step(
lr, step, train_stats=train_stats, valid_stats=valid_stats) | [
"def",
"report_step",
"(",
"self",
",",
"lr",
",",
"step",
",",
"train_stats",
"=",
"None",
",",
"valid_stats",
"=",
"None",
")",
":",
"self",
".",
"_report_step",
"(",
"lr",
",",
"step",
",",
"train_stats",
"=",
"train_stats",
",",
"valid_stats",
"=",
"valid_stats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/report_manager.py#L81-L91 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/report_manager.py | python | ReportMgr.__init__ | (self, report_every, start_time=-1., tensorboard_writer=None) | A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None | A report manager that writes statistics on standard output as well as
(optionally) TensorBoard | [
"A",
"report",
"manager",
"that",
"writes",
"statistics",
"on",
"standard",
"output",
"as",
"well",
"as",
"(",
"optionally",
")",
"TensorBoard"
] | def __init__(self, report_every, start_time=-1., tensorboard_writer=None):
"""
A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None
"""
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer | [
"def",
"__init__",
"(",
"self",
",",
"report_every",
",",
"start_time",
"=",
"-",
"1.",
",",
"tensorboard_writer",
"=",
"None",
")",
":",
"super",
"(",
"ReportMgr",
",",
"self",
")",
".",
"__init__",
"(",
"report_every",
",",
"start_time",
")",
"self",
".",
"tensorboard_writer",
"=",
"tensorboard_writer"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/report_manager.py#L98-L109 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/report_manager.py | python | ReportMgr._report_training | (self, step, num_steps, learning_rate,
report_stats) | return report_stats | See base class method `ReportMgrBase.report_training`. | See base class method `ReportMgrBase.report_training`. | [
"See",
"base",
"class",
"method",
"ReportMgrBase",
".",
"report_training",
"."
] | def _report_training(self, step, num_steps, learning_rate,
report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps,
learning_rate, self.start_time)
# Log the progress using the number of batches on the x-axis.
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
self.progress_step)
report_stats = onmt.utils.Statistics()
return report_stats | [
"def",
"_report_training",
"(",
"self",
",",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
")",
":",
"report_stats",
".",
"output",
"(",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"self",
".",
"start_time",
")",
"# Log the progress using the number of batches on the x-axis.",
"self",
".",
"maybe_log_tensorboard",
"(",
"report_stats",
",",
"\"progress\"",
",",
"learning_rate",
",",
"self",
".",
"progress_step",
")",
"report_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"return",
"report_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/report_manager.py#L116-L131 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/utils/report_manager.py | python | ReportMgr._report_step | (self, lr, step, train_stats=None, valid_stats=None) | See base class method `ReportMgrBase.report_step`. | See base class method `ReportMgrBase.report_step`. | [
"See",
"base",
"class",
"method",
"ReportMgrBase",
".",
"report_step",
"."
] | def _report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
See base class method `ReportMgrBase.report_step`.
"""
if train_stats is not None:
self.log('Train perplexity: %g' % train_stats.ppl())
self.log('Train accuracy: %g' % train_stats.accuracy())
self.maybe_log_tensorboard(train_stats,
"train",
lr,
step)
if valid_stats is not None:
self.log('Validation perplexity: %g' % valid_stats.ppl())
self.log('Validation accuracy: %g' % valid_stats.accuracy())
self.maybe_log_tensorboard(valid_stats,
"valid",
lr,
step) | [
"def",
"_report_step",
"(",
"self",
",",
"lr",
",",
"step",
",",
"train_stats",
"=",
"None",
",",
"valid_stats",
"=",
"None",
")",
":",
"if",
"train_stats",
"is",
"not",
"None",
":",
"self",
".",
"log",
"(",
"'Train perplexity: %g'",
"%",
"train_stats",
".",
"ppl",
"(",
")",
")",
"self",
".",
"log",
"(",
"'Train accuracy: %g'",
"%",
"train_stats",
".",
"accuracy",
"(",
")",
")",
"self",
".",
"maybe_log_tensorboard",
"(",
"train_stats",
",",
"\"train\"",
",",
"lr",
",",
"step",
")",
"if",
"valid_stats",
"is",
"not",
"None",
":",
"self",
".",
"log",
"(",
"'Validation perplexity: %g'",
"%",
"valid_stats",
".",
"ppl",
"(",
")",
")",
"self",
".",
"log",
"(",
"'Validation accuracy: %g'",
"%",
"valid_stats",
".",
"accuracy",
"(",
")",
")",
"self",
".",
"maybe_log_tensorboard",
"(",
"valid_stats",
",",
"\"valid\"",
",",
"lr",
",",
"step",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/utils/report_manager.py#L133-L153 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/multi_headed_attn.py | python | MultiHeadedAttention.forward | (self, key, value, query, mask=None,
layer_cache=None, type=None) | return output, top_attn | Compute the context vector and the attention vectors.
Args:
key (`FloatTensor`): set of `key_len`
key vectors `[batch, key_len, dim]`
value (`FloatTensor`): set of `key_len`
value vectors `[batch, key_len, dim]`
query (`FloatTensor`): set of `query_len`
query vectors `[batch, query_len, dim]`
mask: binary mask indicating which keys have
non-zero attention `[batch, query_len, key_len]`
Returns:
(`FloatTensor`, `FloatTensor`) :
* output context vectors `[batch, query_len, dim]`
* one of the attention vectors `[batch, query_len, key_len]` | Compute the context vector and the attention vectors. | [
"Compute",
"the",
"context",
"vector",
"and",
"the",
"attention",
"vectors",
"."
] | def forward(self, key, value, query, mask=None,
layer_cache=None, type=None):
"""
Compute the context vector and the attention vectors.
Args:
key (`FloatTensor`): set of `key_len`
key vectors `[batch, key_len, dim]`
value (`FloatTensor`): set of `key_len`
value vectors `[batch, key_len, dim]`
query (`FloatTensor`): set of `query_len`
query vectors `[batch, query_len, dim]`
mask: binary mask indicating which keys have
non-zero attention `[batch, query_len, key_len]`
Returns:
(`FloatTensor`, `FloatTensor`) :
* output context vectors `[batch, query_len, dim]`
* one of the attention vectors `[batch, query_len, key_len]`
"""
# CHECKS
# batch, k_len, d = key.size()
# batch_, k_len_, d_ = value.size()
# aeq(batch, batch_)
# aeq(k_len, k_len_)
# aeq(d, d_)
# batch_, q_len, d_ = query.size()
# aeq(batch, batch_)
# aeq(d, d_)
# aeq(self.model_dim % 8, 0)
# if mask is not None:
# batch_, q_len_, k_len_ = mask.size()
# aeq(batch_, batch)
# aeq(k_len_, k_len)
# aeq(q_len_ == q_len)
# END CHECKS
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
key_len = key.size(1)
query_len = query.size(1)
def shape(x):
""" projection """
return x.view(batch_size, -1, head_count, dim_per_head) \
.transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous() \
.view(batch_size, -1, head_count * dim_per_head)
# 1) Project key, value, and query.
if layer_cache is not None:
if type == "self":
query, key, value = self.linear_query(query),\
self.linear_keys(query),\
self.linear_values(query)
key = shape(key)
value = shape(value)
if layer_cache is not None:
device = key.device
if layer_cache["self_keys"] is not None:
key = torch.cat(
(layer_cache["self_keys"].to(device), key),
dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat(
(layer_cache["self_values"].to(device), value),
dim=2)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif type == "context":
query = self.linear_query(query)
if layer_cache is not None:
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = layer_cache["memory_keys"],\
layer_cache["memory_values"]
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
query = shape(query)
key_len = key.size(2)
query_len = query.size(2)
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
scores = torch.matmul(query, key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(scores)
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores)
drop_attn = self.dropout(attn)
context = unshape(torch.matmul(drop_attn, value))
output = self.final_linear(context)
# CHECK
# batch_, q_len_, d_ = output.size()
# aeq(q_len, q_len_)
# aeq(batch, batch_)
# aeq(d, d_)
# Return one attn
top_attn = attn \
.view(batch_size, head_count,
query_len, key_len)[:, 0, :, :] \
.contiguous()
return output, top_attn | [
"def",
"forward",
"(",
"self",
",",
"key",
",",
"value",
",",
"query",
",",
"mask",
"=",
"None",
",",
"layer_cache",
"=",
"None",
",",
"type",
"=",
"None",
")",
":",
"# CHECKS",
"# batch, k_len, d = key.size()",
"# batch_, k_len_, d_ = value.size()",
"# aeq(batch, batch_)",
"# aeq(k_len, k_len_)",
"# aeq(d, d_)",
"# batch_, q_len, d_ = query.size()",
"# aeq(batch, batch_)",
"# aeq(d, d_)",
"# aeq(self.model_dim % 8, 0)",
"# if mask is not None:",
"# batch_, q_len_, k_len_ = mask.size()",
"# aeq(batch_, batch)",
"# aeq(k_len_, k_len)",
"# aeq(q_len_ == q_len)",
"# END CHECKS",
"batch_size",
"=",
"key",
".",
"size",
"(",
"0",
")",
"dim_per_head",
"=",
"self",
".",
"dim_per_head",
"head_count",
"=",
"self",
".",
"head_count",
"key_len",
"=",
"key",
".",
"size",
"(",
"1",
")",
"query_len",
"=",
"query",
".",
"size",
"(",
"1",
")",
"def",
"shape",
"(",
"x",
")",
":",
"\"\"\" projection \"\"\"",
"return",
"x",
".",
"view",
"(",
"batch_size",
",",
"-",
"1",
",",
"head_count",
",",
"dim_per_head",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"def",
"unshape",
"(",
"x",
")",
":",
"\"\"\" compute context \"\"\"",
"return",
"x",
".",
"transpose",
"(",
"1",
",",
"2",
")",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"batch_size",
",",
"-",
"1",
",",
"head_count",
"*",
"dim_per_head",
")",
"# 1) Project key, value, and query.",
"if",
"layer_cache",
"is",
"not",
"None",
":",
"if",
"type",
"==",
"\"self\"",
":",
"query",
",",
"key",
",",
"value",
"=",
"self",
".",
"linear_query",
"(",
"query",
")",
",",
"self",
".",
"linear_keys",
"(",
"query",
")",
",",
"self",
".",
"linear_values",
"(",
"query",
")",
"key",
"=",
"shape",
"(",
"key",
")",
"value",
"=",
"shape",
"(",
"value",
")",
"if",
"layer_cache",
"is",
"not",
"None",
":",
"device",
"=",
"key",
".",
"device",
"if",
"layer_cache",
"[",
"\"self_keys\"",
"]",
"is",
"not",
"None",
":",
"key",
"=",
"torch",
".",
"cat",
"(",
"(",
"layer_cache",
"[",
"\"self_keys\"",
"]",
".",
"to",
"(",
"device",
")",
",",
"key",
")",
",",
"dim",
"=",
"2",
")",
"if",
"layer_cache",
"[",
"\"self_values\"",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"torch",
".",
"cat",
"(",
"(",
"layer_cache",
"[",
"\"self_values\"",
"]",
".",
"to",
"(",
"device",
")",
",",
"value",
")",
",",
"dim",
"=",
"2",
")",
"layer_cache",
"[",
"\"self_keys\"",
"]",
"=",
"key",
"layer_cache",
"[",
"\"self_values\"",
"]",
"=",
"value",
"elif",
"type",
"==",
"\"context\"",
":",
"query",
"=",
"self",
".",
"linear_query",
"(",
"query",
")",
"if",
"layer_cache",
"is",
"not",
"None",
":",
"if",
"layer_cache",
"[",
"\"memory_keys\"",
"]",
"is",
"None",
":",
"key",
",",
"value",
"=",
"self",
".",
"linear_keys",
"(",
"key",
")",
",",
"self",
".",
"linear_values",
"(",
"value",
")",
"key",
"=",
"shape",
"(",
"key",
")",
"value",
"=",
"shape",
"(",
"value",
")",
"else",
":",
"key",
",",
"value",
"=",
"layer_cache",
"[",
"\"memory_keys\"",
"]",
",",
"layer_cache",
"[",
"\"memory_values\"",
"]",
"layer_cache",
"[",
"\"memory_keys\"",
"]",
"=",
"key",
"layer_cache",
"[",
"\"memory_values\"",
"]",
"=",
"value",
"else",
":",
"key",
",",
"value",
"=",
"self",
".",
"linear_keys",
"(",
"key",
")",
",",
"self",
".",
"linear_values",
"(",
"value",
")",
"key",
"=",
"shape",
"(",
"key",
")",
"value",
"=",
"shape",
"(",
"value",
")",
"else",
":",
"key",
"=",
"self",
".",
"linear_keys",
"(",
"key",
")",
"value",
"=",
"self",
".",
"linear_values",
"(",
"value",
")",
"query",
"=",
"self",
".",
"linear_query",
"(",
"query",
")",
"key",
"=",
"shape",
"(",
"key",
")",
"value",
"=",
"shape",
"(",
"value",
")",
"query",
"=",
"shape",
"(",
"query",
")",
"key_len",
"=",
"key",
".",
"size",
"(",
"2",
")",
"query_len",
"=",
"query",
".",
"size",
"(",
"2",
")",
"# 2) Calculate and scale scores.",
"query",
"=",
"query",
"/",
"math",
".",
"sqrt",
"(",
"dim_per_head",
")",
"scores",
"=",
"torch",
".",
"matmul",
"(",
"query",
",",
"key",
".",
"transpose",
"(",
"2",
",",
"3",
")",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"mask",
"=",
"mask",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand_as",
"(",
"scores",
")",
"scores",
"=",
"scores",
".",
"masked_fill",
"(",
"mask",
",",
"-",
"1e18",
")",
"# 3) Apply attention dropout and compute context vectors.",
"attn",
"=",
"self",
".",
"softmax",
"(",
"scores",
")",
"drop_attn",
"=",
"self",
".",
"dropout",
"(",
"attn",
")",
"context",
"=",
"unshape",
"(",
"torch",
".",
"matmul",
"(",
"drop_attn",
",",
"value",
")",
")",
"output",
"=",
"self",
".",
"final_linear",
"(",
"context",
")",
"# CHECK",
"# batch_, q_len_, d_ = output.size()",
"# aeq(q_len, q_len_)",
"# aeq(batch, batch_)",
"# aeq(d, d_)",
"# Return one attn",
"top_attn",
"=",
"attn",
".",
"view",
"(",
"batch_size",
",",
"head_count",
",",
"query_len",
",",
"key_len",
")",
"[",
":",
",",
"0",
",",
":",
",",
":",
"]",
".",
"contiguous",
"(",
")",
"return",
"output",
",",
"top_attn"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/multi_headed_attn.py#L69-L201 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/position_ffn.py | python | PositionwiseFeedForward.forward | (self, x) | return output + x | Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ] | Layer definition. | [
"Layer",
"definition",
"."
] | def forward(self, x):
"""
Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ]
"""
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x | [
"def",
"forward",
"(",
"self",
",",
"x",
")",
":",
"inter",
"=",
"self",
".",
"dropout_1",
"(",
"self",
".",
"relu",
"(",
"self",
".",
"w_1",
"(",
"self",
".",
"layer_norm",
"(",
"x",
")",
")",
")",
")",
"output",
"=",
"self",
".",
"dropout_2",
"(",
"self",
".",
"w_2",
"(",
"inter",
")",
")",
"return",
"output",
"+",
"x"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/position_ffn.py#L29-L42 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/sparse_activations.py | python | threshold_and_support | (z, dim=0) | return tau_z, k_z | z: any dimension
dim: dimension along which to apply the sparsemax | z: any dimension
dim: dimension along which to apply the sparsemax | [
"z",
":",
"any",
"dimension",
"dim",
":",
"dimension",
"along",
"which",
"to",
"apply",
"the",
"sparsemax"
] | def threshold_and_support(z, dim=0):
"""
z: any dimension
dim: dimension along which to apply the sparsemax
"""
sorted_z, _ = torch.sort(z, descending=True, dim=dim)
z_sum = sorted_z.cumsum(dim) - 1 # sort of a misnomer
k = torch.arange(1, sorted_z.size(dim) + 1, device=z.device).float().view(
torch.Size([-1] + [1] * (z.dim() - 1))
).transpose(0, dim)
support = k * sorted_z > z_sum
k_z_indices = support.sum(dim=dim).unsqueeze(dim)
k_z = k_z_indices.float()
tau_z = z_sum.gather(dim, k_z_indices - 1) / k_z
return tau_z, k_z | [
"def",
"threshold_and_support",
"(",
"z",
",",
"dim",
"=",
"0",
")",
":",
"sorted_z",
",",
"_",
"=",
"torch",
".",
"sort",
"(",
"z",
",",
"descending",
"=",
"True",
",",
"dim",
"=",
"dim",
")",
"z_sum",
"=",
"sorted_z",
".",
"cumsum",
"(",
"dim",
")",
"-",
"1",
"# sort of a misnomer",
"k",
"=",
"torch",
".",
"arange",
"(",
"1",
",",
"sorted_z",
".",
"size",
"(",
"dim",
")",
"+",
"1",
",",
"device",
"=",
"z",
".",
"device",
")",
".",
"float",
"(",
")",
".",
"view",
"(",
"torch",
".",
"Size",
"(",
"[",
"-",
"1",
"]",
"+",
"[",
"1",
"]",
"*",
"(",
"z",
".",
"dim",
"(",
")",
"-",
"1",
")",
")",
")",
".",
"transpose",
"(",
"0",
",",
"dim",
")",
"support",
"=",
"k",
"*",
"sorted_z",
">",
"z_sum",
"k_z_indices",
"=",
"support",
".",
"sum",
"(",
"dim",
"=",
"dim",
")",
".",
"unsqueeze",
"(",
"dim",
")",
"k_z",
"=",
"k_z_indices",
".",
"float",
"(",
")",
"tau_z",
"=",
"z_sum",
".",
"gather",
"(",
"dim",
",",
"k_z_indices",
"-",
"1",
")",
"/",
"k_z",
"return",
"tau_z",
",",
"k_z"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/sparse_activations.py#L11-L26 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/sparse_activations.py | python | SparsemaxFunction.forward | (ctx, input, dim=0) | return output | input (FloatTensor): any shape
returns (FloatTensor): same shape with sparsemax computed on given dim | input (FloatTensor): any shape
returns (FloatTensor): same shape with sparsemax computed on given dim | [
"input",
"(",
"FloatTensor",
")",
":",
"any",
"shape",
"returns",
"(",
"FloatTensor",
")",
":",
"same",
"shape",
"with",
"sparsemax",
"computed",
"on",
"given",
"dim"
] | def forward(ctx, input, dim=0):
"""
input (FloatTensor): any shape
returns (FloatTensor): same shape with sparsemax computed on given dim
"""
ctx.dim = dim
tau_z, k_z = threshold_and_support(input, dim=dim)
output = torch.clamp(input - tau_z, min=0)
ctx.save_for_backward(k_z, output)
return output | [
"def",
"forward",
"(",
"ctx",
",",
"input",
",",
"dim",
"=",
"0",
")",
":",
"ctx",
".",
"dim",
"=",
"dim",
"tau_z",
",",
"k_z",
"=",
"threshold_and_support",
"(",
"input",
",",
"dim",
"=",
"dim",
")",
"output",
"=",
"torch",
".",
"clamp",
"(",
"input",
"-",
"tau_z",
",",
"min",
"=",
"0",
")",
"ctx",
".",
"save_for_backward",
"(",
"k_z",
",",
"output",
")",
"return",
"output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/sparse_activations.py#L32-L41 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/global_attention.py | python | GlobalAttention.score | (self, h_t, h_s) | Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]` | Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]` | [
"Args",
":",
"h_t",
"(",
"FloatTensor",
")",
":",
"sequence",
"of",
"queries",
"[",
"batch",
"x",
"tgt_len",
"x",
"dim",
"]",
"h_s",
"(",
"FloatTensor",
")",
":",
"sequence",
"of",
"sources",
"[",
"batch",
"x",
"src_len",
"x",
"dim",
"]"
] | def score(self, h_t, h_s):
"""
Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]`
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) | [
"def",
"score",
"(",
"self",
",",
"h_t",
",",
"h_s",
")",
":",
"# Check input sizes",
"src_batch",
",",
"src_len",
",",
"src_dim",
"=",
"h_s",
".",
"size",
"(",
")",
"tgt_batch",
",",
"tgt_len",
",",
"tgt_dim",
"=",
"h_t",
".",
"size",
"(",
")",
"aeq",
"(",
"src_batch",
",",
"tgt_batch",
")",
"aeq",
"(",
"src_dim",
",",
"tgt_dim",
")",
"aeq",
"(",
"self",
".",
"dim",
",",
"src_dim",
")",
"if",
"self",
".",
"attn_type",
"in",
"[",
"\"general\"",
",",
"\"dot\"",
"]",
":",
"if",
"self",
".",
"attn_type",
"==",
"\"general\"",
":",
"h_t_",
"=",
"h_t",
".",
"view",
"(",
"tgt_batch",
"*",
"tgt_len",
",",
"tgt_dim",
")",
"h_t_",
"=",
"self",
".",
"linear_in",
"(",
"h_t_",
")",
"h_t",
"=",
"h_t_",
".",
"view",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"tgt_dim",
")",
"h_s_",
"=",
"h_s",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)",
"return",
"torch",
".",
"bmm",
"(",
"h_t",
",",
"h_s_",
")",
"else",
":",
"dim",
"=",
"self",
".",
"dim",
"wq",
"=",
"self",
".",
"linear_query",
"(",
"h_t",
".",
"view",
"(",
"-",
"1",
",",
"dim",
")",
")",
"wq",
"=",
"wq",
".",
"view",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"1",
",",
"dim",
")",
"wq",
"=",
"wq",
".",
"expand",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"src_len",
",",
"dim",
")",
"uh",
"=",
"self",
".",
"linear_context",
"(",
"h_s",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"-",
"1",
",",
"dim",
")",
")",
"uh",
"=",
"uh",
".",
"view",
"(",
"src_batch",
",",
"1",
",",
"src_len",
",",
"dim",
")",
"uh",
"=",
"uh",
".",
"expand",
"(",
"src_batch",
",",
"tgt_len",
",",
"src_len",
",",
"dim",
")",
"# (batch, t_len, s_len, d)",
"wquh",
"=",
"torch",
".",
"tanh",
"(",
"wq",
"+",
"uh",
")",
"return",
"self",
".",
"v",
"(",
"wquh",
".",
"view",
"(",
"-",
"1",
",",
"dim",
")",
")",
".",
"view",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"src_len",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/global_attention.py#L95-L136 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/global_attention.py | python | GlobalAttention.forward | (self, source, memory_bank, memory_lengths=None, coverage=None) | return attn_h, align_vectors | Args:
source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`
memory_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]` | [] | def forward(self, source, memory_bank, memory_lengths=None, coverage=None):
"""
Args:
source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`
memory_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]`
"""
# one step input
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
batch, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, source_l_ = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
memory_bank += self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
# compute attention scores, as in Luong et al.
align = self.score(source, memory_bank)
if memory_lengths is not None:
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1) # Make it broadcastable.
align.masked_fill_(1 - mask, -float('inf'))
# Softmax or sparsemax to normalize attention weights
if self.attn_func == "softmax":
align_vectors = F.softmax(align.view(batch*target_l, source_l), -1)
else:
align_vectors = sparsemax(align.view(batch*target_l, source_l), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, memory_bank)
# concatenate
concat_c = torch.cat([c, source], 2).view(batch*target_l, dim*2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ["general", "dot"]:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, source_l_ = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
target_l_, batch_, dim_ = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
target_l_, batch_, source_l_ = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
return attn_h, align_vectors | [
"def",
"forward",
"(",
"self",
",",
"source",
",",
"memory_bank",
",",
"memory_lengths",
"=",
"None",
",",
"coverage",
"=",
"None",
")",
":",
"# one step input",
"if",
"source",
".",
"dim",
"(",
")",
"==",
"2",
":",
"one_step",
"=",
"True",
"source",
"=",
"source",
".",
"unsqueeze",
"(",
"1",
")",
"else",
":",
"one_step",
"=",
"False",
"batch",
",",
"source_l",
",",
"dim",
"=",
"memory_bank",
".",
"size",
"(",
")",
"batch_",
",",
"target_l",
",",
"dim_",
"=",
"source",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"dim",
",",
"dim_",
")",
"aeq",
"(",
"self",
".",
"dim",
",",
"dim",
")",
"if",
"coverage",
"is",
"not",
"None",
":",
"batch_",
",",
"source_l_",
"=",
"coverage",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"source_l",
",",
"source_l_",
")",
"if",
"coverage",
"is",
"not",
"None",
":",
"cover",
"=",
"coverage",
".",
"view",
"(",
"-",
"1",
")",
".",
"unsqueeze",
"(",
"1",
")",
"memory_bank",
"+=",
"self",
".",
"linear_cover",
"(",
"cover",
")",
".",
"view_as",
"(",
"memory_bank",
")",
"memory_bank",
"=",
"torch",
".",
"tanh",
"(",
"memory_bank",
")",
"# compute attention scores, as in Luong et al.",
"align",
"=",
"self",
".",
"score",
"(",
"source",
",",
"memory_bank",
")",
"if",
"memory_lengths",
"is",
"not",
"None",
":",
"mask",
"=",
"sequence_mask",
"(",
"memory_lengths",
",",
"max_len",
"=",
"align",
".",
"size",
"(",
"-",
"1",
")",
")",
"mask",
"=",
"mask",
".",
"unsqueeze",
"(",
"1",
")",
"# Make it broadcastable.",
"align",
".",
"masked_fill_",
"(",
"1",
"-",
"mask",
",",
"-",
"float",
"(",
"'inf'",
")",
")",
"# Softmax or sparsemax to normalize attention weights",
"if",
"self",
".",
"attn_func",
"==",
"\"softmax\"",
":",
"align_vectors",
"=",
"F",
".",
"softmax",
"(",
"align",
".",
"view",
"(",
"batch",
"*",
"target_l",
",",
"source_l",
")",
",",
"-",
"1",
")",
"else",
":",
"align_vectors",
"=",
"sparsemax",
"(",
"align",
".",
"view",
"(",
"batch",
"*",
"target_l",
",",
"source_l",
")",
",",
"-",
"1",
")",
"align_vectors",
"=",
"align_vectors",
".",
"view",
"(",
"batch",
",",
"target_l",
",",
"source_l",
")",
"# each context vector c_t is the weighted average",
"# over all the source hidden states",
"c",
"=",
"torch",
".",
"bmm",
"(",
"align_vectors",
",",
"memory_bank",
")",
"# concatenate",
"concat_c",
"=",
"torch",
".",
"cat",
"(",
"[",
"c",
",",
"source",
"]",
",",
"2",
")",
".",
"view",
"(",
"batch",
"*",
"target_l",
",",
"dim",
"*",
"2",
")",
"attn_h",
"=",
"self",
".",
"linear_out",
"(",
"concat_c",
")",
".",
"view",
"(",
"batch",
",",
"target_l",
",",
"dim",
")",
"if",
"self",
".",
"attn_type",
"in",
"[",
"\"general\"",
",",
"\"dot\"",
"]",
":",
"attn_h",
"=",
"torch",
".",
"tanh",
"(",
"attn_h",
")",
"if",
"one_step",
":",
"attn_h",
"=",
"attn_h",
".",
"squeeze",
"(",
"1",
")",
"align_vectors",
"=",
"align_vectors",
".",
"squeeze",
"(",
"1",
")",
"# Check output sizes",
"batch_",
",",
"dim_",
"=",
"attn_h",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"dim",
",",
"dim_",
")",
"batch_",
",",
"source_l_",
"=",
"align_vectors",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"source_l",
",",
"source_l_",
")",
"else",
":",
"attn_h",
"=",
"attn_h",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"align_vectors",
"=",
"align_vectors",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# Check output sizes",
"target_l_",
",",
"batch_",
",",
"dim_",
"=",
"attn_h",
".",
"size",
"(",
")",
"aeq",
"(",
"target_l",
",",
"target_l_",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"dim",
",",
"dim_",
")",
"target_l_",
",",
"batch_",
",",
"source_l_",
"=",
"align_vectors",
".",
"size",
"(",
")",
"aeq",
"(",
"target_l",
",",
"target_l_",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"source_l",
",",
"source_l_",
")",
"return",
"attn_h",
",",
"align_vectors"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/global_attention.py#L138-L227 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/gate.py | python | context_gate_factory | (gate_type, embeddings_size, decoder_size,
attention_size, output_size) | return gate_types[gate_type](embeddings_size, decoder_size, attention_size,
output_size) | Returns the correct ContextGate class | Returns the correct ContextGate class | [
"Returns",
"the",
"correct",
"ContextGate",
"class"
] | def context_gate_factory(gate_type, embeddings_size, decoder_size,
attention_size, output_size):
"""Returns the correct ContextGate class"""
gate_types = {'source': SourceContextGate,
'target': TargetContextGate,
'both': BothContextGate}
assert gate_type in gate_types, "Not valid ContextGate type: {0}".format(
gate_type)
return gate_types[gate_type](embeddings_size, decoder_size, attention_size,
output_size) | [
"def",
"context_gate_factory",
"(",
"gate_type",
",",
"embeddings_size",
",",
"decoder_size",
",",
"attention_size",
",",
"output_size",
")",
":",
"gate_types",
"=",
"{",
"'source'",
":",
"SourceContextGate",
",",
"'target'",
":",
"TargetContextGate",
",",
"'both'",
":",
"BothContextGate",
"}",
"assert",
"gate_type",
"in",
"gate_types",
",",
"\"Not valid ContextGate type: {0}\"",
".",
"format",
"(",
"gate_type",
")",
"return",
"gate_types",
"[",
"gate_type",
"]",
"(",
"embeddings_size",
",",
"decoder_size",
",",
"attention_size",
",",
"output_size",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/gate.py#L6-L17 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/average_attn.py | python | AverageAttention.cumulative_average_mask | (self, batch_size, inputs_len) | return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) | Builds the mask to compute the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(`FloatTensor`):
* A Tensor of shape `[batch_size x input_len x input_len]` | Builds the mask to compute the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Figure 3 | [
"Builds",
"the",
"mask",
"to",
"compute",
"the",
"cumulative",
"average",
"as",
"described",
"in",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1805",
".",
"00631",
"--",
"Figure",
"3"
] | def cumulative_average_mask(self, batch_size, inputs_len):
"""
Builds the mask to compute the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(`FloatTensor`):
* A Tensor of shape `[batch_size x input_len x input_len]`
"""
triangle = torch.tril(torch.ones(inputs_len, inputs_len))
weights = torch.ones(1, inputs_len) / torch.arange(
1, inputs_len + 1, dtype=torch.float)
mask = triangle * weights.transpose(0, 1)
return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) | [
"def",
"cumulative_average_mask",
"(",
"self",
",",
"batch_size",
",",
"inputs_len",
")",
":",
"triangle",
"=",
"torch",
".",
"tril",
"(",
"torch",
".",
"ones",
"(",
"inputs_len",
",",
"inputs_len",
")",
")",
"weights",
"=",
"torch",
".",
"ones",
"(",
"1",
",",
"inputs_len",
")",
"/",
"torch",
".",
"arange",
"(",
"1",
",",
"inputs_len",
"+",
"1",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
"mask",
"=",
"triangle",
"*",
"weights",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"return",
"mask",
".",
"unsqueeze",
"(",
"0",
")",
".",
"expand",
"(",
"batch_size",
",",
"inputs_len",
",",
"inputs_len",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/average_attn.py#L31-L51 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/average_attn.py | python | AverageAttention.cumulative_average | (self, inputs, mask_or_step,
layer_cache=None, step=None) | Computes the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Equations (1) (5) (6)
Args:
inputs (`FloatTensor`): sequence to average
`[batch_size x input_len x dimension]`
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
cache: a dictionary containing the cumulative average
of the previous step. | Computes the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Equations (1) (5) (6) | [
"Computes",
"the",
"cumulative",
"average",
"as",
"described",
"in",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1805",
".",
"00631",
"--",
"Equations",
"(",
"1",
")",
"(",
"5",
")",
"(",
"6",
")"
] | def cumulative_average(self, inputs, mask_or_step,
layer_cache=None, step=None):
"""
Computes the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Equations (1) (5) (6)
Args:
inputs (`FloatTensor`): sequence to average
`[batch_size x input_len x dimension]`
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
cache: a dictionary containing the cumulative average
of the previous step.
"""
if layer_cache is not None:
step = mask_or_step
device = inputs.device
average_attention = (inputs + step *
layer_cache["prev_g"].to(device)) / (step + 1)
layer_cache["prev_g"] = average_attention
return average_attention
else:
mask = mask_or_step
return torch.matmul(mask, inputs) | [
"def",
"cumulative_average",
"(",
"self",
",",
"inputs",
",",
"mask_or_step",
",",
"layer_cache",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"if",
"layer_cache",
"is",
"not",
"None",
":",
"step",
"=",
"mask_or_step",
"device",
"=",
"inputs",
".",
"device",
"average_attention",
"=",
"(",
"inputs",
"+",
"step",
"*",
"layer_cache",
"[",
"\"prev_g\"",
"]",
".",
"to",
"(",
"device",
")",
")",
"/",
"(",
"step",
"+",
"1",
")",
"layer_cache",
"[",
"\"prev_g\"",
"]",
"=",
"average_attention",
"return",
"average_attention",
"else",
":",
"mask",
"=",
"mask_or_step",
"return",
"torch",
".",
"matmul",
"(",
"mask",
",",
"inputs",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/average_attn.py#L53-L78 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/average_attn.py | python | AverageAttention.forward | (self, inputs, mask=None, layer_cache=None, step=None) | return gating_outputs, average_outputs | Args:
inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`
Returns:
(`FloatTensor`, `FloatTensor`):
* gating_outputs `[batch_size x 1 x model_dim]`
* average_outputs average attention `[batch_size x 1 x model_dim]` | Args:
inputs (`FloatTensor`): `[batch_size x input_len x model_dim]` | [
"Args",
":",
"inputs",
"(",
"FloatTensor",
")",
":",
"[",
"batch_size",
"x",
"input_len",
"x",
"model_dim",
"]"
] | def forward(self, inputs, mask=None, layer_cache=None, step=None):
"""
Args:
inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`
Returns:
(`FloatTensor`, `FloatTensor`):
* gating_outputs `[batch_size x 1 x model_dim]`
* average_outputs average attention `[batch_size x 1 x model_dim]`
"""
batch_size = inputs.size(0)
inputs_len = inputs.size(1)
device = inputs.device
average_outputs = self.cumulative_average(
inputs, self.cumulative_average_mask(batch_size,
inputs_len).to(device).float()
if layer_cache is None else step, layer_cache=layer_cache)
average_outputs = self.average_layer(average_outputs)
gating_outputs = self.gating_layer(torch.cat((inputs,
average_outputs), -1))
input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)
gating_outputs = torch.sigmoid(input_gate) * inputs + \
torch.sigmoid(forget_gate) * average_outputs
return gating_outputs, average_outputs | [
"def",
"forward",
"(",
"self",
",",
"inputs",
",",
"mask",
"=",
"None",
",",
"layer_cache",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"batch_size",
"=",
"inputs",
".",
"size",
"(",
"0",
")",
"inputs_len",
"=",
"inputs",
".",
"size",
"(",
"1",
")",
"device",
"=",
"inputs",
".",
"device",
"average_outputs",
"=",
"self",
".",
"cumulative_average",
"(",
"inputs",
",",
"self",
".",
"cumulative_average_mask",
"(",
"batch_size",
",",
"inputs_len",
")",
".",
"to",
"(",
"device",
")",
".",
"float",
"(",
")",
"if",
"layer_cache",
"is",
"None",
"else",
"step",
",",
"layer_cache",
"=",
"layer_cache",
")",
"average_outputs",
"=",
"self",
".",
"average_layer",
"(",
"average_outputs",
")",
"gating_outputs",
"=",
"self",
".",
"gating_layer",
"(",
"torch",
".",
"cat",
"(",
"(",
"inputs",
",",
"average_outputs",
")",
",",
"-",
"1",
")",
")",
"input_gate",
",",
"forget_gate",
"=",
"torch",
".",
"chunk",
"(",
"gating_outputs",
",",
"2",
",",
"dim",
"=",
"2",
")",
"gating_outputs",
"=",
"torch",
".",
"sigmoid",
"(",
"input_gate",
")",
"*",
"inputs",
"+",
"torch",
".",
"sigmoid",
"(",
"forget_gate",
")",
"*",
"average_outputs",
"return",
"gating_outputs",
",",
"average_outputs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/average_attn.py#L80-L106 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/copy_generator.py | python | CopyGenerator.forward | (self, hidden, attn, src_map) | return torch.cat([out_prob, copy_prob], 1) | Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by compying
source words.
Args:
hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`
attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`
src_map (`FloatTensor`):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
`[src_len, batch, extra_words]` | Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by compying
source words. | [
"Compute",
"a",
"distribution",
"over",
"the",
"target",
"dictionary",
"extended",
"by",
"the",
"dynamic",
"dictionary",
"implied",
"by",
"compying",
"source",
"words",
"."
] | def forward(self, hidden, attn, src_map):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by compying
source words.
Args:
hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`
attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`
src_map (`FloatTensor`):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
`[src_len, batch, extra_words]`
"""
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.tgt_dict.stoi[inputters.PAD_WORD]] = -float('inf')
prob = self.softmax(logits)
# Probability of copying p(z=1) batch.
p_copy = self.sigmoid(self.linear_copy(hidden))
# Probibility of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - p_copy.expand_as(prob))
mul_attn = torch.mul(attn, p_copy.expand_as(attn))
copy_prob = torch.bmm(mul_attn.view(-1, batch, slen)
.transpose(0, 1),
src_map.transpose(0, 1)).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1) | [
"def",
"forward",
"(",
"self",
",",
"hidden",
",",
"attn",
",",
"src_map",
")",
":",
"# CHECKS",
"batch_by_tlen",
",",
"_",
"=",
"hidden",
".",
"size",
"(",
")",
"batch_by_tlen_",
",",
"slen",
"=",
"attn",
".",
"size",
"(",
")",
"slen_",
",",
"batch",
",",
"cvocab",
"=",
"src_map",
".",
"size",
"(",
")",
"aeq",
"(",
"batch_by_tlen",
",",
"batch_by_tlen_",
")",
"aeq",
"(",
"slen",
",",
"slen_",
")",
"# Original probabilities.",
"logits",
"=",
"self",
".",
"linear",
"(",
"hidden",
")",
"logits",
"[",
":",
",",
"self",
".",
"tgt_dict",
".",
"stoi",
"[",
"inputters",
".",
"PAD_WORD",
"]",
"]",
"=",
"-",
"float",
"(",
"'inf'",
")",
"prob",
"=",
"self",
".",
"softmax",
"(",
"logits",
")",
"# Probability of copying p(z=1) batch.",
"p_copy",
"=",
"self",
".",
"sigmoid",
"(",
"self",
".",
"linear_copy",
"(",
"hidden",
")",
")",
"# Probibility of not copying: p_{word}(w) * (1 - p(z))",
"out_prob",
"=",
"torch",
".",
"mul",
"(",
"prob",
",",
"1",
"-",
"p_copy",
".",
"expand_as",
"(",
"prob",
")",
")",
"mul_attn",
"=",
"torch",
".",
"mul",
"(",
"attn",
",",
"p_copy",
".",
"expand_as",
"(",
"attn",
")",
")",
"copy_prob",
"=",
"torch",
".",
"bmm",
"(",
"mul_attn",
".",
"view",
"(",
"-",
"1",
",",
"batch",
",",
"slen",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"src_map",
".",
"transpose",
"(",
"0",
",",
"1",
")",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"copy_prob",
"=",
"copy_prob",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"-",
"1",
",",
"cvocab",
")",
"return",
"torch",
".",
"cat",
"(",
"[",
"out_prob",
",",
"copy_prob",
"]",
",",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/copy_generator.py#L71-L106 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/copy_generator.py | python | CopyGeneratorLossCompute._make_shard_state | (self, batch, output, range_, attns) | return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
} | See base class for args description. | See base class for args description. | [
"See",
"base",
"class",
"for",
"args",
"description",
"."
] | def _make_shard_state(self, batch, output, range_, attns):
""" See base class for args description. """
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
} | [
"def",
"_make_shard_state",
"(",
"self",
",",
"batch",
",",
"output",
",",
"range_",
",",
"attns",
")",
":",
"if",
"getattr",
"(",
"batch",
",",
"\"alignment\"",
",",
"None",
")",
"is",
"None",
":",
"raise",
"AssertionError",
"(",
"\"using -copy_attn you need to pass in \"",
"\"-dynamic_dict during preprocess stage.\"",
")",
"return",
"{",
"\"output\"",
":",
"output",
",",
"\"target\"",
":",
"batch",
".",
"tgt",
"[",
"range_",
"[",
"0",
"]",
"+",
"1",
":",
"range_",
"[",
"1",
"]",
"]",
",",
"\"copy_attn\"",
":",
"attns",
".",
"get",
"(",
"\"copy\"",
")",
",",
"\"align\"",
":",
"batch",
".",
"alignment",
"[",
"range_",
"[",
"0",
"]",
"+",
"1",
":",
"range_",
"[",
"1",
"]",
"]",
"}"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/copy_generator.py#L163-L174 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/copy_generator.py | python | CopyGeneratorLossCompute._compute_loss | (self, batch, output, target, copy_attn, align) | return loss, stats | Compute the loss. The args must match self._make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info. | Compute the loss. The args must match self._make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info. | [
"Compute",
"the",
"loss",
".",
"The",
"args",
"must",
"match",
"self",
".",
"_make_shard_state",
"()",
".",
"Args",
":",
"batch",
":",
"the",
"current",
"batch",
".",
"output",
":",
"the",
"predict",
"output",
"from",
"the",
"model",
".",
"target",
":",
"the",
"validate",
"target",
"to",
"compare",
"output",
"with",
".",
"copy_attn",
":",
"the",
"copy",
"attention",
"value",
".",
"align",
":",
"the",
"align",
"info",
"."
] | def _compute_loss(self, batch, output, target, copy_attn, align):
"""
Compute the loss. The args must match self._make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(self._bottle(output),
self._bottle(copy_attn),
batch.src_map)
loss = self.criterion(scores, align, target)
scores_data = scores.data.clone()
scores_data = inputters.TextDataset.collapse_copy_scores(
self._unbottle(scores_data, batch.batch_size),
batch, self.tgt_vocab, batch.dataset.src_vocabs)
scores_data = self._bottle(scores_data)
# Correct target copy token instead of <unk>
# tgt[i] = align[i] + len(tgt_vocab)
# for i such that tgt[i] == 0 and align[i] != 0
target_data = target.data.clone()
correct_mask = target_data.eq(0) * align.data.ne(0)
correct_copy = (align.data + len(self.tgt_vocab)) * correct_mask.long()
target_data = target_data + correct_copy
# Compute sum of perplexities for stats
loss_data = loss.sum().data.clone()
stats = self._stats(loss_data, scores_data, target_data)
if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
# Compute Sequence Lengths
pad_ix = batch.dataset.fields['tgt'].vocab.stoi[inputters.PAD_WORD]
tgt_lens = batch.tgt.ne(pad_ix).float().sum(0)
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:
loss = loss.sum()
return loss, stats | [
"def",
"_compute_loss",
"(",
"self",
",",
"batch",
",",
"output",
",",
"target",
",",
"copy_attn",
",",
"align",
")",
":",
"target",
"=",
"target",
".",
"view",
"(",
"-",
"1",
")",
"align",
"=",
"align",
".",
"view",
"(",
"-",
"1",
")",
"scores",
"=",
"self",
".",
"generator",
"(",
"self",
".",
"_bottle",
"(",
"output",
")",
",",
"self",
".",
"_bottle",
"(",
"copy_attn",
")",
",",
"batch",
".",
"src_map",
")",
"loss",
"=",
"self",
".",
"criterion",
"(",
"scores",
",",
"align",
",",
"target",
")",
"scores_data",
"=",
"scores",
".",
"data",
".",
"clone",
"(",
")",
"scores_data",
"=",
"inputters",
".",
"TextDataset",
".",
"collapse_copy_scores",
"(",
"self",
".",
"_unbottle",
"(",
"scores_data",
",",
"batch",
".",
"batch_size",
")",
",",
"batch",
",",
"self",
".",
"tgt_vocab",
",",
"batch",
".",
"dataset",
".",
"src_vocabs",
")",
"scores_data",
"=",
"self",
".",
"_bottle",
"(",
"scores_data",
")",
"# Correct target copy token instead of <unk>",
"# tgt[i] = align[i] + len(tgt_vocab)",
"# for i such that tgt[i] == 0 and align[i] != 0",
"target_data",
"=",
"target",
".",
"data",
".",
"clone",
"(",
")",
"correct_mask",
"=",
"target_data",
".",
"eq",
"(",
"0",
")",
"*",
"align",
".",
"data",
".",
"ne",
"(",
"0",
")",
"correct_copy",
"=",
"(",
"align",
".",
"data",
"+",
"len",
"(",
"self",
".",
"tgt_vocab",
")",
")",
"*",
"correct_mask",
".",
"long",
"(",
")",
"target_data",
"=",
"target_data",
"+",
"correct_copy",
"# Compute sum of perplexities for stats",
"loss_data",
"=",
"loss",
".",
"sum",
"(",
")",
".",
"data",
".",
"clone",
"(",
")",
"stats",
"=",
"self",
".",
"_stats",
"(",
"loss_data",
",",
"scores_data",
",",
"target_data",
")",
"if",
"self",
".",
"normalize_by_length",
":",
"# Compute Loss as NLL divided by seq length",
"# Compute Sequence Lengths",
"pad_ix",
"=",
"batch",
".",
"dataset",
".",
"fields",
"[",
"'tgt'",
"]",
".",
"vocab",
".",
"stoi",
"[",
"inputters",
".",
"PAD_WORD",
"]",
"tgt_lens",
"=",
"batch",
".",
"tgt",
".",
"ne",
"(",
"pad_ix",
")",
".",
"float",
"(",
")",
".",
"sum",
"(",
"0",
")",
"# Compute Total Loss per sequence in batch",
"loss",
"=",
"loss",
".",
"view",
"(",
"-",
"1",
",",
"batch",
".",
"batch_size",
")",
".",
"sum",
"(",
"0",
")",
"# Divide by length of each sequence and sum",
"loss",
"=",
"torch",
".",
"div",
"(",
"loss",
",",
"tgt_lens",
")",
".",
"sum",
"(",
")",
"else",
":",
"loss",
"=",
"loss",
".",
"sum",
"(",
")",
"return",
"loss",
",",
"stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/copy_generator.py#L176-L222 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/embeddings.py | python | Embeddings.word_lut | (self) | return self.make_embedding[0][0] | word look-up table | word look-up table | [
"word",
"look",
"-",
"up",
"table"
] | def word_lut(self):
""" word look-up table """
return self.make_embedding[0][0] | [
"def",
"word_lut",
"(",
"self",
")",
":",
"return",
"self",
".",
"make_embedding",
"[",
"0",
"]",
"[",
"0",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/embeddings.py#L160-L162 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/embeddings.py | python | Embeddings.emb_luts | (self) | return self.make_embedding[0] | embedding look-up table | embedding look-up table | [
"embedding",
"look",
"-",
"up",
"table"
] | def emb_luts(self):
""" embedding look-up table """
return self.make_embedding[0] | [
"def",
"emb_luts",
"(",
"self",
")",
":",
"return",
"self",
".",
"make_embedding",
"[",
"0",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/embeddings.py#L165-L167 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/embeddings.py | python | Embeddings.load_pretrained_vectors | (self, emb_file, fixed) | Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
fixed (bool) : if true, embeddings are not updated | Load in pretrained embeddings. | [
"Load",
"in",
"pretrained",
"embeddings",
"."
] | def load_pretrained_vectors(self, emb_file, fixed):
"""Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
fixed (bool) : if true, embeddings are not updated
"""
if emb_file:
pretrained = torch.load(emb_file)
pretrained_vec_size = pretrained.size(1)
if self.word_vec_size > pretrained_vec_size:
self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained
elif self.word_vec_size < pretrained_vec_size:
self.word_lut.weight.data \
.copy_(pretrained[:, :self.word_vec_size])
else:
self.word_lut.weight.data.copy_(pretrained)
if fixed:
self.word_lut.weight.requires_grad = False | [
"def",
"load_pretrained_vectors",
"(",
"self",
",",
"emb_file",
",",
"fixed",
")",
":",
"if",
"emb_file",
":",
"pretrained",
"=",
"torch",
".",
"load",
"(",
"emb_file",
")",
"pretrained_vec_size",
"=",
"pretrained",
".",
"size",
"(",
"1",
")",
"if",
"self",
".",
"word_vec_size",
">",
"pretrained_vec_size",
":",
"self",
".",
"word_lut",
".",
"weight",
".",
"data",
"[",
":",
",",
":",
"pretrained_vec_size",
"]",
"=",
"pretrained",
"elif",
"self",
".",
"word_vec_size",
"<",
"pretrained_vec_size",
":",
"self",
".",
"word_lut",
".",
"weight",
".",
"data",
".",
"copy_",
"(",
"pretrained",
"[",
":",
",",
":",
"self",
".",
"word_vec_size",
"]",
")",
"else",
":",
"self",
".",
"word_lut",
".",
"weight",
".",
"data",
".",
"copy_",
"(",
"pretrained",
")",
"if",
"fixed",
":",
"self",
".",
"word_lut",
".",
"weight",
".",
"requires_grad",
"=",
"False"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/embeddings.py#L169-L187 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/embeddings.py | python | Embeddings.forward | (self, source, step=None) | return source | Computes the embeddings for words and features.
Args:
source (`LongTensor`): index tensor `[len x batch x nfeat]`
Return:
`FloatTensor`: word embeddings `[len x batch x embedding_size]` | Computes the embeddings for words and features. | [
"Computes",
"the",
"embeddings",
"for",
"words",
"and",
"features",
"."
] | def forward(self, source, step=None):
"""
Computes the embeddings for words and features.
Args:
source (`LongTensor`): index tensor `[len x batch x nfeat]`
Return:
`FloatTensor`: word embeddings `[len x batch x embedding_size]`
"""
if self.position_encoding:
for i, module in enumerate(self.make_embedding._modules.values()):
if i == len(self.make_embedding._modules.values()) - 1:
source = module(source, step=step)
else:
source = module(source)
else:
source = self.make_embedding(source)
return source | [
"def",
"forward",
"(",
"self",
",",
"source",
",",
"step",
"=",
"None",
")",
":",
"if",
"self",
".",
"position_encoding",
":",
"for",
"i",
",",
"module",
"in",
"enumerate",
"(",
"self",
".",
"make_embedding",
".",
"_modules",
".",
"values",
"(",
")",
")",
":",
"if",
"i",
"==",
"len",
"(",
"self",
".",
"make_embedding",
".",
"_modules",
".",
"values",
"(",
")",
")",
"-",
"1",
":",
"source",
"=",
"module",
"(",
"source",
",",
"step",
"=",
"step",
")",
"else",
":",
"source",
"=",
"module",
"(",
"source",
")",
"else",
":",
"source",
"=",
"self",
".",
"make_embedding",
"(",
"source",
")",
"return",
"source"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/embeddings.py#L189-L207 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/sparse_losses.py | python | SparsemaxLossFunction.forward | (ctx, input, target) | return torch.clamp(x / 2 - z_k + 0.5, min=0.0) | input (FloatTensor): n x num_classes
target (LongTensor): n, the indices of the target classes | input (FloatTensor): n x num_classes
target (LongTensor): n, the indices of the target classes | [
"input",
"(",
"FloatTensor",
")",
":",
"n",
"x",
"num_classes",
"target",
"(",
"LongTensor",
")",
":",
"n",
"the",
"indices",
"of",
"the",
"target",
"classes"
] | def forward(ctx, input, target):
"""
input (FloatTensor): n x num_classes
target (LongTensor): n, the indices of the target classes
"""
input_batch, classes = input.size()
target_batch = target.size(0)
aeq(input_batch, target_batch)
z_k = input.gather(1, target.unsqueeze(1)).squeeze()
tau_z, support_size = threshold_and_support(input, dim=1)
support = input > tau_z
x = torch.where(
support, input**2 - tau_z**2,
torch.tensor(0.0, device=input.device)
).sum(dim=1)
ctx.save_for_backward(input, target, tau_z)
# clamping necessary because of numerical errors: loss should be lower
# bounded by zero, but negative values near zero are possible without
# the clamp
return torch.clamp(x / 2 - z_k + 0.5, min=0.0) | [
"def",
"forward",
"(",
"ctx",
",",
"input",
",",
"target",
")",
":",
"input_batch",
",",
"classes",
"=",
"input",
".",
"size",
"(",
")",
"target_batch",
"=",
"target",
".",
"size",
"(",
"0",
")",
"aeq",
"(",
"input_batch",
",",
"target_batch",
")",
"z_k",
"=",
"input",
".",
"gather",
"(",
"1",
",",
"target",
".",
"unsqueeze",
"(",
"1",
")",
")",
".",
"squeeze",
"(",
")",
"tau_z",
",",
"support_size",
"=",
"threshold_and_support",
"(",
"input",
",",
"dim",
"=",
"1",
")",
"support",
"=",
"input",
">",
"tau_z",
"x",
"=",
"torch",
".",
"where",
"(",
"support",
",",
"input",
"**",
"2",
"-",
"tau_z",
"**",
"2",
",",
"torch",
".",
"tensor",
"(",
"0.0",
",",
"device",
"=",
"input",
".",
"device",
")",
")",
".",
"sum",
"(",
"dim",
"=",
"1",
")",
"ctx",
".",
"save_for_backward",
"(",
"input",
",",
"target",
",",
"tau_z",
")",
"# clamping necessary because of numerical errors: loss should be lower",
"# bounded by zero, but negative values near zero are possible without",
"# the clamp",
"return",
"torch",
".",
"clamp",
"(",
"x",
"/",
"2",
"-",
"z_k",
"+",
"0.5",
",",
"min",
"=",
"0.0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/sparse_losses.py#L11-L31 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/conv_multi_step_attention.py | python | seq_linear | (linear, x) | return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2) | linear transform for 3-d tensor | linear transform for 3-d tensor | [
"linear",
"transform",
"for",
"3",
"-",
"d",
"tensor"
] | def seq_linear(linear, x):
""" linear transform for 3-d tensor """
batch, hidden_size, length, _ = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view(
batch * length, hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2) | [
"def",
"seq_linear",
"(",
"linear",
",",
"x",
")",
":",
"batch",
",",
"hidden_size",
",",
"length",
",",
"_",
"=",
"x",
".",
"size",
"(",
")",
"h",
"=",
"linear",
"(",
"torch",
".",
"transpose",
"(",
"x",
",",
"1",
",",
"2",
")",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"batch",
"*",
"length",
",",
"hidden_size",
")",
")",
"return",
"torch",
".",
"transpose",
"(",
"h",
".",
"view",
"(",
"batch",
",",
"length",
",",
"hidden_size",
",",
"1",
")",
",",
"1",
",",
"2",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/conv_multi_step_attention.py#L11-L16 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/conv_multi_step_attention.py | python | ConvMultiStepAttention.apply_mask | (self, mask) | Apply mask | Apply mask | [
"Apply",
"mask"
] | def apply_mask(self, mask):
""" Apply mask """
self.mask = mask | [
"def",
"apply_mask",
"(",
"self",
",",
"mask",
")",
":",
"self",
".",
"mask",
"=",
"mask"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/conv_multi_step_attention.py#L34-L36 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/conv_multi_step_attention.py | python | ConvMultiStepAttention.forward | (self, base_target_emb, input_from_dec, encoder_out_top,
encoder_out_combine) | return context_output, attn | Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode | Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode | [
"Args",
":",
"base_target_emb",
":",
"target",
"emb",
"tensor",
"input",
":",
"output",
"of",
"decode",
"conv",
"encoder_out_t",
":",
"the",
"key",
"matrix",
"for",
"calculation",
"of",
"attetion",
"weight",
"which",
"is",
"the",
"top",
"output",
"of",
"encode",
"conv",
"encoder_out_combine",
":",
"the",
"value",
"matrix",
"for",
"the",
"attention",
"-",
"weighted",
"sum",
"which",
"is",
"the",
"combination",
"of",
"base",
"emb",
"and",
"top",
"output",
"of",
"encode"
] | def forward(self, base_target_emb, input_from_dec, encoder_out_top,
encoder_out_combine):
"""
Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode
"""
# checks
# batch, channel, height, width = base_target_emb.size()
batch, _, height, _ = base_target_emb.size()
# batch_, channel_, height_, width_ = input_from_dec.size()
batch_, _, height_, _ = input_from_dec.size()
aeq(batch, batch_)
aeq(height, height_)
# enc_batch, enc_channel, enc_height = encoder_out_top.size()
enc_batch, _, enc_height = encoder_out_top.size()
# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()
enc_batch_, _, enc_height_ = encoder_out_combine.size()
aeq(enc_batch, enc_batch_)
aeq(enc_height, enc_height_)
preatt = seq_linear(self.linear_in, input_from_dec)
target = (base_target_emb + preatt) * SCALE_WEIGHT
target = torch.squeeze(target, 3)
target = torch.transpose(target, 1, 2)
pre_attn = torch.bmm(target, encoder_out_top)
if self.mask is not None:
pre_attn.data.masked_fill_(self.mask, -float('inf'))
pre_attn = pre_attn.transpose(0, 2)
attn = F.softmax(pre_attn, dim=-1)
attn = attn.transpose(0, 2).contiguous()
context_output = torch.bmm(
attn, torch.transpose(encoder_out_combine, 1, 2))
context_output = torch.transpose(
torch.unsqueeze(context_output, 3), 1, 2)
return context_output, attn | [
"def",
"forward",
"(",
"self",
",",
"base_target_emb",
",",
"input_from_dec",
",",
"encoder_out_top",
",",
"encoder_out_combine",
")",
":",
"# checks",
"# batch, channel, height, width = base_target_emb.size()",
"batch",
",",
"_",
",",
"height",
",",
"_",
"=",
"base_target_emb",
".",
"size",
"(",
")",
"# batch_, channel_, height_, width_ = input_from_dec.size()",
"batch_",
",",
"_",
",",
"height_",
",",
"_",
"=",
"input_from_dec",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"height",
",",
"height_",
")",
"# enc_batch, enc_channel, enc_height = encoder_out_top.size()",
"enc_batch",
",",
"_",
",",
"enc_height",
"=",
"encoder_out_top",
".",
"size",
"(",
")",
"# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()",
"enc_batch_",
",",
"_",
",",
"enc_height_",
"=",
"encoder_out_combine",
".",
"size",
"(",
")",
"aeq",
"(",
"enc_batch",
",",
"enc_batch_",
")",
"aeq",
"(",
"enc_height",
",",
"enc_height_",
")",
"preatt",
"=",
"seq_linear",
"(",
"self",
".",
"linear_in",
",",
"input_from_dec",
")",
"target",
"=",
"(",
"base_target_emb",
"+",
"preatt",
")",
"*",
"SCALE_WEIGHT",
"target",
"=",
"torch",
".",
"squeeze",
"(",
"target",
",",
"3",
")",
"target",
"=",
"torch",
".",
"transpose",
"(",
"target",
",",
"1",
",",
"2",
")",
"pre_attn",
"=",
"torch",
".",
"bmm",
"(",
"target",
",",
"encoder_out_top",
")",
"if",
"self",
".",
"mask",
"is",
"not",
"None",
":",
"pre_attn",
".",
"data",
".",
"masked_fill_",
"(",
"self",
".",
"mask",
",",
"-",
"float",
"(",
"'inf'",
")",
")",
"pre_attn",
"=",
"pre_attn",
".",
"transpose",
"(",
"0",
",",
"2",
")",
"attn",
"=",
"F",
".",
"softmax",
"(",
"pre_attn",
",",
"dim",
"=",
"-",
"1",
")",
"attn",
"=",
"attn",
".",
"transpose",
"(",
"0",
",",
"2",
")",
".",
"contiguous",
"(",
")",
"context_output",
"=",
"torch",
".",
"bmm",
"(",
"attn",
",",
"torch",
".",
"transpose",
"(",
"encoder_out_combine",
",",
"1",
",",
"2",
")",
")",
"context_output",
"=",
"torch",
".",
"transpose",
"(",
"torch",
".",
"unsqueeze",
"(",
"context_output",
",",
"3",
")",
",",
"1",
",",
"2",
")",
"return",
"context_output",
",",
"attn"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/conv_multi_step_attention.py#L38-L83 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/weight_norm.py | python | get_var_maybe_avg | (namespace, var_name, training, polyak_decay) | utility for retrieving polyak averaged params
Update average | utility for retrieving polyak averaged params
Update average | [
"utility",
"for",
"retrieving",
"polyak",
"averaged",
"params",
"Update",
"average"
] | def get_var_maybe_avg(namespace, var_name, training, polyak_decay):
""" utility for retrieving polyak averaged params
Update average
"""
v = getattr(namespace, var_name)
v_avg = getattr(namespace, var_name + '_avg')
v_avg -= (1 - polyak_decay) * (v_avg - v.data)
if training:
return v
else:
return v_avg | [
"def",
"get_var_maybe_avg",
"(",
"namespace",
",",
"var_name",
",",
"training",
",",
"polyak_decay",
")",
":",
"v",
"=",
"getattr",
"(",
"namespace",
",",
"var_name",
")",
"v_avg",
"=",
"getattr",
"(",
"namespace",
",",
"var_name",
"+",
"'_avg'",
")",
"v_avg",
"-=",
"(",
"1",
"-",
"polyak_decay",
")",
"*",
"(",
"v_avg",
"-",
"v",
".",
"data",
")",
"if",
"training",
":",
"return",
"v",
"else",
":",
"return",
"v_avg"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/weight_norm.py#L8-L19 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/modules/weight_norm.py | python | get_vars_maybe_avg | (namespace, var_names, training, polyak_decay) | return vars | utility for retrieving polyak averaged params | utility for retrieving polyak averaged params | [
"utility",
"for",
"retrieving",
"polyak",
"averaged",
"params"
] | def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
""" utility for retrieving polyak averaged params """
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(
namespace, vn, training, polyak_decay))
return vars | [
"def",
"get_vars_maybe_avg",
"(",
"namespace",
",",
"var_names",
",",
"training",
",",
"polyak_decay",
")",
":",
"vars",
"=",
"[",
"]",
"for",
"vn",
"in",
"var_names",
":",
"vars",
".",
"append",
"(",
"get_var_maybe_avg",
"(",
"namespace",
",",
"vn",
",",
"training",
",",
"polyak_decay",
")",
")",
"return",
"vars"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/modules/weight_norm.py#L22-L28 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py | python | DatasetBase.__reduce_ex__ | (self, proto) | return super(DatasetBase, self).__reduce_ex__() | This is a hack. Something is broken with torch pickle. | This is a hack. Something is broken with torch pickle. | [
"This",
"is",
"a",
"hack",
".",
"Something",
"is",
"broken",
"with",
"torch",
"pickle",
"."
] | def __reduce_ex__(self, proto):
"This is a hack. Something is broken with torch pickle."
return super(DatasetBase, self).__reduce_ex__() | [
"def",
"__reduce_ex__",
"(",
"self",
",",
"proto",
")",
":",
"return",
"super",
"(",
"DatasetBase",
",",
"self",
")",
".",
"__reduce_ex__",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py#L37-L39 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py | python | DatasetBase.load_fields | (self, vocab_dict) | Load fields from vocab.pt, and set the `fields` attribute.
Args:
vocab_dict (dict): a dict of loaded vocab from vocab.pt file. | Load fields from vocab.pt, and set the `fields` attribute. | [
"Load",
"fields",
"from",
"vocab",
".",
"pt",
"and",
"set",
"the",
"fields",
"attribute",
"."
] | def load_fields(self, vocab_dict):
""" Load fields from vocab.pt, and set the `fields` attribute.
Args:
vocab_dict (dict): a dict of loaded vocab from vocab.pt file.
"""
fields = onmt.inputters.inputter.load_fields_from_vocab(
vocab_dict.items(), self.data_type)
self.fields = dict([(k, f) for (k, f) in fields.items()
if k in self.examples[0].__dict__]) | [
"def",
"load_fields",
"(",
"self",
",",
"vocab_dict",
")",
":",
"fields",
"=",
"onmt",
".",
"inputters",
".",
"inputter",
".",
"load_fields_from_vocab",
"(",
"vocab_dict",
".",
"items",
"(",
")",
",",
"self",
".",
"data_type",
")",
"self",
".",
"fields",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"f",
")",
"for",
"(",
"k",
",",
"f",
")",
"in",
"fields",
".",
"items",
"(",
")",
"if",
"k",
"in",
"self",
".",
"examples",
"[",
"0",
"]",
".",
"__dict__",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py#L41-L50 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py | python | DatasetBase.extract_text_features | (tokens) | return tuple(words), features, n_feats - 1 | Args:
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features. | Args:
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features. | [
"Args",
":",
"tokens",
":",
"A",
"list",
"of",
"tokens",
"where",
"each",
"token",
"consists",
"of",
"a",
"word",
"optionally",
"followed",
"by",
"u",
"│",
"-",
"delimited",
"features",
".",
"Returns",
":",
"A",
"sequence",
"of",
"words",
"a",
"sequence",
"of",
"features",
"and",
"num",
"of",
"features",
"."
] | def extract_text_features(tokens):
"""
Args:
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features.
"""
if not tokens:
return [], [], -1
specials = [PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD]
words = []
features = []
n_feats = None
for token in tokens:
split_token = token.split(u"│")
assert all([special != split_token[0] for special in specials]), \
"Dataset cannot contain Special Tokens"
if split_token[0]:
words += [split_token[0]]
features += [split_token[1:]]
if n_feats is None:
n_feats = len(split_token)
else:
assert len(split_token) == n_feats, \
"all words must have the same number of features"
features = list(zip(*features))
return tuple(words), features, n_feats - 1 | [
"def",
"extract_text_features",
"(",
"tokens",
")",
":",
"if",
"not",
"tokens",
":",
"return",
"[",
"]",
",",
"[",
"]",
",",
"-",
"1",
"specials",
"=",
"[",
"PAD_WORD",
",",
"UNK_WORD",
",",
"BOS_WORD",
",",
"EOS_WORD",
"]",
"words",
"=",
"[",
"]",
"features",
"=",
"[",
"]",
"n_feats",
"=",
"None",
"for",
"token",
"in",
"tokens",
":",
"split_token",
"=",
"token",
".",
"split",
"(",
"u\"│\")",
"",
"assert",
"all",
"(",
"[",
"special",
"!=",
"split_token",
"[",
"0",
"]",
"for",
"special",
"in",
"specials",
"]",
")",
",",
"\"Dataset cannot contain Special Tokens\"",
"if",
"split_token",
"[",
"0",
"]",
":",
"words",
"+=",
"[",
"split_token",
"[",
"0",
"]",
"]",
"features",
"+=",
"[",
"split_token",
"[",
"1",
":",
"]",
"]",
"if",
"n_feats",
"is",
"None",
":",
"n_feats",
"=",
"len",
"(",
"split_token",
")",
"else",
":",
"assert",
"len",
"(",
"split_token",
")",
"==",
"n_feats",
",",
"\"all words must have the same number of features\"",
"features",
"=",
"list",
"(",
"zip",
"(",
"*",
"features",
")",
")",
"return",
"tuple",
"(",
"words",
")",
",",
"features",
",",
"n_feats",
"-",
"1"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py#L53-L83 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py | python | DatasetBase._join_dicts | (self, *args) | return dict(chain(*[d.items() for d in args])) | Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys. | Args:
dictionaries with disjoint keys. | [
"Args",
":",
"dictionaries",
"with",
"disjoint",
"keys",
"."
] | def _join_dicts(self, *args):
"""
Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys.
"""
return dict(chain(*[d.items() for d in args])) | [
"def",
"_join_dicts",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"dict",
"(",
"chain",
"(",
"*",
"[",
"d",
".",
"items",
"(",
")",
"for",
"d",
"in",
"args",
"]",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py#L87-L95 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py | python | DatasetBase._peek | (self, seq) | return first, chain([first], seq) | Args:
seq: an iterator.
Returns:
the first thing returned by calling next() on the iterator
and an iterator created by re-chaining that value to the beginning
of the iterator. | Args:
seq: an iterator. | [
"Args",
":",
"seq",
":",
"an",
"iterator",
"."
] | def _peek(self, seq):
"""
Args:
seq: an iterator.
Returns:
the first thing returned by calling next() on the iterator
and an iterator created by re-chaining that value to the beginning
of the iterator.
"""
first = next(seq)
return first, chain([first], seq) | [
"def",
"_peek",
"(",
"self",
",",
"seq",
")",
":",
"first",
"=",
"next",
"(",
"seq",
")",
"return",
"first",
",",
"chain",
"(",
"[",
"first",
"]",
",",
"seq",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py#L97-L108 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py | python | DatasetBase._construct_example_fromlist | (self, data, fields) | return ex | Args:
data: the data to be set as the value of the attributes of
the to-be-created `Example`, associating with respective
`Field` objects with same key.
fields: a dict of `torchtext.data.Field` objects. The keys
are attributes of the to-be-created `Example`.
Returns:
the created `Example` object. | Args:
data: the data to be set as the value of the attributes of
the to-be-created `Example`, associating with respective
`Field` objects with same key.
fields: a dict of `torchtext.data.Field` objects. The keys
are attributes of the to-be-created `Example`. | [
"Args",
":",
"data",
":",
"the",
"data",
"to",
"be",
"set",
"as",
"the",
"value",
"of",
"the",
"attributes",
"of",
"the",
"to",
"-",
"be",
"-",
"created",
"Example",
"associating",
"with",
"respective",
"Field",
"objects",
"with",
"same",
"key",
".",
"fields",
":",
"a",
"dict",
"of",
"torchtext",
".",
"data",
".",
"Field",
"objects",
".",
"The",
"keys",
"are",
"attributes",
"of",
"the",
"to",
"-",
"be",
"-",
"created",
"Example",
"."
] | def _construct_example_fromlist(self, data, fields):
"""
Args:
data: the data to be set as the value of the attributes of
the to-be-created `Example`, associating with respective
`Field` objects with same key.
fields: a dict of `torchtext.data.Field` objects. The keys
are attributes of the to-be-created `Example`.
Returns:
the created `Example` object.
"""
ex = torchtext.data.Example()
for (name, field), val in zip(fields, data):
if field is not None:
setattr(ex, name, field.preprocess(val))
else:
setattr(ex, name, val)
return ex | [
"def",
"_construct_example_fromlist",
"(",
"self",
",",
"data",
",",
"fields",
")",
":",
"ex",
"=",
"torchtext",
".",
"data",
".",
"Example",
"(",
")",
"for",
"(",
"name",
",",
"field",
")",
",",
"val",
"in",
"zip",
"(",
"fields",
",",
"data",
")",
":",
"if",
"field",
"is",
"not",
"None",
":",
"setattr",
"(",
"ex",
",",
"name",
",",
"field",
".",
"preprocess",
"(",
"val",
")",
")",
"else",
":",
"setattr",
"(",
"ex",
",",
"name",
",",
"val",
")",
"return",
"ex"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/dataset_base.py#L110-L128 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | TextDataset.sort_key | (self, ex) | return len(ex.src) | Sort using length of source sentences. | Sort using length of source sentences. | [
"Sort",
"using",
"length",
"of",
"source",
"sentences",
"."
] | def sort_key(self, ex):
""" Sort using length of source sentences. """
# Default to a balanced sort, prioritizing tgt len match.
# TODO: make this configurable.
if hasattr(ex, "tgt"):
return len(ex.src), len(ex.tgt)
return len(ex.src) | [
"def",
"sort_key",
"(",
"self",
",",
"ex",
")",
":",
"# Default to a balanced sort, prioritizing tgt len match.",
"# TODO: make this configurable.",
"if",
"hasattr",
"(",
"ex",
",",
"\"tgt\"",
")",
":",
"return",
"len",
"(",
"ex",
".",
"src",
")",
",",
"len",
"(",
"ex",
".",
"tgt",
")",
"return",
"len",
"(",
"ex",
".",
"src",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L97-L103 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | TextDataset.collapse_copy_scores | (scores, batch, tgt_vocab, src_vocabs) | return scores | Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious. | Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious. | [
"Given",
"scores",
"from",
"an",
"expanded",
"dictionary",
"corresponeding",
"to",
"a",
"batch",
"sums",
"together",
"copies",
"with",
"a",
"dictionary",
"word",
"when",
"it",
"is",
"ambigious",
"."
] | def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious.
"""
offset = len(tgt_vocab)
for b in range(batch.batch_size):
blank = []
fill = []
index = batch.indices.data[b]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.Tensor(blank).type_as(batch.indices.data)
fill = torch.Tensor(fill).type_as(batch.indices.data)
scores[:, b].index_add_(1, fill,
scores[:, b].index_select(1, blank))
scores[:, b].index_fill_(1, blank, 1e-10)
return scores | [
"def",
"collapse_copy_scores",
"(",
"scores",
",",
"batch",
",",
"tgt_vocab",
",",
"src_vocabs",
")",
":",
"offset",
"=",
"len",
"(",
"tgt_vocab",
")",
"for",
"b",
"in",
"range",
"(",
"batch",
".",
"batch_size",
")",
":",
"blank",
"=",
"[",
"]",
"fill",
"=",
"[",
"]",
"index",
"=",
"batch",
".",
"indices",
".",
"data",
"[",
"b",
"]",
"src_vocab",
"=",
"src_vocabs",
"[",
"index",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"src_vocab",
")",
")",
":",
"sw",
"=",
"src_vocab",
".",
"itos",
"[",
"i",
"]",
"ti",
"=",
"tgt_vocab",
".",
"stoi",
"[",
"sw",
"]",
"if",
"ti",
"!=",
"0",
":",
"blank",
".",
"append",
"(",
"offset",
"+",
"i",
")",
"fill",
".",
"append",
"(",
"ti",
")",
"if",
"blank",
":",
"blank",
"=",
"torch",
".",
"Tensor",
"(",
"blank",
")",
".",
"type_as",
"(",
"batch",
".",
"indices",
".",
"data",
")",
"fill",
"=",
"torch",
".",
"Tensor",
"(",
"fill",
")",
".",
"type_as",
"(",
"batch",
".",
"indices",
".",
"data",
")",
"scores",
"[",
":",
",",
"b",
"]",
".",
"index_add_",
"(",
"1",
",",
"fill",
",",
"scores",
"[",
":",
",",
"b",
"]",
".",
"index_select",
"(",
"1",
",",
"blank",
")",
")",
"scores",
"[",
":",
",",
"b",
"]",
".",
"index_fill_",
"(",
"1",
",",
"blank",
",",
"1e-10",
")",
"return",
"scores"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L106-L130 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | TextDataset.make_text_examples_nfeats_tpl | (text_iter, text_path, truncate, side) | return (examples_iter, num_feats) | Args:
text_iter(iterator): an iterator (or None) that we can loop over
to read examples.
It may be an openned file, a string list etc...
text_path(str): path to file or None
path (str): location of a src or tgt file.
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Returns:
(example_dict iterator, num_feats) tuple. | Args:
text_iter(iterator): an iterator (or None) that we can loop over
to read examples.
It may be an openned file, a string list etc...
text_path(str): path to file or None
path (str): location of a src or tgt file.
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt". | [
"Args",
":",
"text_iter",
"(",
"iterator",
")",
":",
"an",
"iterator",
"(",
"or",
"None",
")",
"that",
"we",
"can",
"loop",
"over",
"to",
"read",
"examples",
".",
"It",
"may",
"be",
"an",
"openned",
"file",
"a",
"string",
"list",
"etc",
"...",
"text_path",
"(",
"str",
")",
":",
"path",
"to",
"file",
"or",
"None",
"path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"or",
"tgt",
"file",
".",
"truncate",
"(",
"int",
")",
":",
"maximum",
"sequence",
"length",
"(",
"0",
"for",
"unlimited",
")",
".",
"side",
"(",
"str",
")",
":",
"src",
"or",
"tgt",
"."
] | def make_text_examples_nfeats_tpl(text_iter, text_path, truncate, side):
"""
Args:
text_iter(iterator): an iterator (or None) that we can loop over
to read examples.
It may be an openned file, a string list etc...
text_path(str): path to file or None
path (str): location of a src or tgt file.
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Returns:
(example_dict iterator, num_feats) tuple.
"""
assert side in ['src', 'tgt']
if text_iter is None:
if text_path is not None:
text_iter = TextDataset.make_text_iterator_from_file(text_path)
else:
return (None, 0)
# All examples have same number of features, so we peek first one
# to get the num_feats.
examples_nfeats_iter = \
TextDataset.make_examples(text_iter, truncate, side)
first_ex = next(examples_nfeats_iter)
num_feats = first_ex[1]
# Chain back the first element - we only want to peek it.
examples_nfeats_iter = chain([first_ex], examples_nfeats_iter)
examples_iter = (ex for ex, nfeats in examples_nfeats_iter)
return (examples_iter, num_feats) | [
"def",
"make_text_examples_nfeats_tpl",
"(",
"text_iter",
",",
"text_path",
",",
"truncate",
",",
"side",
")",
":",
"assert",
"side",
"in",
"[",
"'src'",
",",
"'tgt'",
"]",
"if",
"text_iter",
"is",
"None",
":",
"if",
"text_path",
"is",
"not",
"None",
":",
"text_iter",
"=",
"TextDataset",
".",
"make_text_iterator_from_file",
"(",
"text_path",
")",
"else",
":",
"return",
"(",
"None",
",",
"0",
")",
"# All examples have same number of features, so we peek first one",
"# to get the num_feats.",
"examples_nfeats_iter",
"=",
"TextDataset",
".",
"make_examples",
"(",
"text_iter",
",",
"truncate",
",",
"side",
")",
"first_ex",
"=",
"next",
"(",
"examples_nfeats_iter",
")",
"num_feats",
"=",
"first_ex",
"[",
"1",
"]",
"# Chain back the first element - we only want to peek it.",
"examples_nfeats_iter",
"=",
"chain",
"(",
"[",
"first_ex",
"]",
",",
"examples_nfeats_iter",
")",
"examples_iter",
"=",
"(",
"ex",
"for",
"ex",
",",
"nfeats",
"in",
"examples_nfeats_iter",
")",
"return",
"(",
"examples_iter",
",",
"num_feats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L133-L167 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | TextDataset.make_examples | (text_iter, truncate, side) | Args:
text_iter (iterator): iterator of text sequences
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Yields:
(word, features, nfeat) triples for each line. | Args:
text_iter (iterator): iterator of text sequences
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt". | [
"Args",
":",
"text_iter",
"(",
"iterator",
")",
":",
"iterator",
"of",
"text",
"sequences",
"truncate",
"(",
"int",
")",
":",
"maximum",
"sequence",
"length",
"(",
"0",
"for",
"unlimited",
")",
".",
"side",
"(",
"str",
")",
":",
"src",
"or",
"tgt",
"."
] | def make_examples(text_iter, truncate, side):
"""
Args:
text_iter (iterator): iterator of text sequences
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Yields:
(word, features, nfeat) triples for each line.
"""
for i, line in enumerate(text_iter):
line = line.strip().split()
if truncate:
line = line[:truncate]
words, feats, n_feats = \
TextDataset.extract_text_features(line)
example_dict = {side: words, "indices": i}
if feats:
prefix = side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
yield example_dict, n_feats | [
"def",
"make_examples",
"(",
"text_iter",
",",
"truncate",
",",
"side",
")",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"text_iter",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"truncate",
":",
"line",
"=",
"line",
"[",
":",
"truncate",
"]",
"words",
",",
"feats",
",",
"n_feats",
"=",
"TextDataset",
".",
"extract_text_features",
"(",
"line",
")",
"example_dict",
"=",
"{",
"side",
":",
"words",
",",
"\"indices\"",
":",
"i",
"}",
"if",
"feats",
":",
"prefix",
"=",
"side",
"+",
"\"_feat_\"",
"example_dict",
".",
"update",
"(",
"(",
"prefix",
"+",
"str",
"(",
"j",
")",
",",
"f",
")",
"for",
"j",
",",
"f",
"in",
"enumerate",
"(",
"feats",
")",
")",
"yield",
"example_dict",
",",
"n_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L170-L193 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | TextDataset.get_fields | (n_src_features, n_tgt_features) | return fields | Args:
n_src_features (int): the number of source features to
create `torchtext.data.Field` for.
n_tgt_features (int): the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects. | Args:
n_src_features (int): the number of source features to
create `torchtext.data.Field` for.
n_tgt_features (int): the number of target features to
create `torchtext.data.Field` for. | [
"Args",
":",
"n_src_features",
"(",
"int",
")",
":",
"the",
"number",
"of",
"source",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
".",
"n_tgt_features",
"(",
"int",
")",
":",
"the",
"number",
"of",
"target",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
"."
] | def get_fields(n_src_features, n_tgt_features):
"""
Args:
n_src_features (int): the number of source features to
create `torchtext.data.Field` for.
n_tgt_features (int): the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects.
"""
fields = {}
fields["src"] = torchtext.data.Field(
pad_token=PAD_WORD,
include_lengths=True)
for j in range(n_src_features):
fields["src_feat_" + str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_" + str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, vocab):
""" ? """
#pdb.set_trace()
src_size = max([t.size(0) for t in data])
src_vocab_size = int(max([t.max() for t in data])) + 1
try:
alignment = torch.zeros(src_size, len(data), src_vocab_size)
except:
print(src_size)
print(len(data))
print(src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
def make_tgt(data, vocab):
""" ? """
#pdb.set_trace()
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
sequential=False)
return fields | [
"def",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
":",
"fields",
"=",
"{",
"}",
"fields",
"[",
"\"src\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"pad_token",
"=",
"PAD_WORD",
",",
"include_lengths",
"=",
"True",
")",
"for",
"j",
"in",
"range",
"(",
"n_src_features",
")",
":",
"fields",
"[",
"\"src_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"pad_token",
"=",
"PAD_WORD",
")",
"fields",
"[",
"\"tgt\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"for",
"j",
"in",
"range",
"(",
"n_tgt_features",
")",
":",
"fields",
"[",
"\"tgt_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"def",
"make_src",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"#pdb.set_trace()",
"src_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"src_vocab_size",
"=",
"int",
"(",
"max",
"(",
"[",
"t",
".",
"max",
"(",
")",
"for",
"t",
"in",
"data",
"]",
")",
")",
"+",
"1",
"try",
":",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"src_size",
",",
"len",
"(",
"data",
")",
",",
"src_vocab_size",
")",
"except",
":",
"print",
"(",
"src_size",
")",
"print",
"(",
"len",
"(",
"data",
")",
")",
"print",
"(",
"src_vocab_size",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"for",
"j",
",",
"t",
"in",
"enumerate",
"(",
"sent",
")",
":",
"alignment",
"[",
"j",
",",
"i",
",",
"t",
"]",
"=",
"1",
"return",
"alignment",
"fields",
"[",
"\"src_map\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_src",
",",
"sequential",
"=",
"False",
")",
"def",
"make_tgt",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"#pdb.set_trace()",
"tgt_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"tgt_size",
",",
"len",
"(",
"data",
")",
")",
".",
"long",
"(",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"alignment",
"[",
":",
"sent",
".",
"size",
"(",
"0",
")",
",",
"i",
"]",
"=",
"sent",
"return",
"alignment",
"fields",
"[",
"\"alignment\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"postprocessing",
"=",
"make_tgt",
",",
"sequential",
"=",
"False",
")",
"fields",
"[",
"\"indices\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"sequential",
"=",
"False",
")",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L202-L274 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | TextDataset.get_num_features | (corpus_file, side) | return num_feats | Peek one line and get number of features of it.
(All lines must have same number of features).
For text corpus, both sides are in text form, thus
it works the same.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`. | Peek one line and get number of features of it.
(All lines must have same number of features).
For text corpus, both sides are in text form, thus
it works the same. | [
"Peek",
"one",
"line",
"and",
"get",
"number",
"of",
"features",
"of",
"it",
".",
"(",
"All",
"lines",
"must",
"have",
"same",
"number",
"of",
"features",
")",
".",
"For",
"text",
"corpus",
"both",
"sides",
"are",
"in",
"text",
"form",
"thus",
"it",
"works",
"the",
"same",
"."
] | def get_num_features(corpus_file, side):
"""
Peek one line and get number of features of it.
(All lines must have same number of features).
For text corpus, both sides are in text form, thus
it works the same.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`.
"""
with codecs.open(corpus_file, "r", "utf-8") as cf:
f_line = cf.readline().strip().split()
_, _, num_feats = TextDataset.extract_text_features(f_line)
return num_feats | [
"def",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"corpus_file",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"cf",
":",
"f_line",
"=",
"cf",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"_",
",",
"_",
",",
"num_feats",
"=",
"TextDataset",
".",
"extract_text_features",
"(",
"f_line",
")",
"return",
"num_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L277-L295 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | ShardedTextCorpusIterator.__init__ | (self, corpus_path, line_truncate, side, shard_size,
assoc_iter=None) | Args:
corpus_path: the corpus file path.
line_truncate: the maximum length of a line to read.
0 for unlimited.
side: "src" or "tgt".
shard_size: the shard size, 0 means not sharding the file.
assoc_iter: if not None, it is the associate iterator that
this iterator should align its step with. | Args:
corpus_path: the corpus file path.
line_truncate: the maximum length of a line to read.
0 for unlimited.
side: "src" or "tgt".
shard_size: the shard size, 0 means not sharding the file.
assoc_iter: if not None, it is the associate iterator that
this iterator should align its step with. | [
"Args",
":",
"corpus_path",
":",
"the",
"corpus",
"file",
"path",
".",
"line_truncate",
":",
"the",
"maximum",
"length",
"of",
"a",
"line",
"to",
"read",
".",
"0",
"for",
"unlimited",
".",
"side",
":",
"src",
"or",
"tgt",
".",
"shard_size",
":",
"the",
"shard",
"size",
"0",
"means",
"not",
"sharding",
"the",
"file",
".",
"assoc_iter",
":",
"if",
"not",
"None",
"it",
"is",
"the",
"associate",
"iterator",
"that",
"this",
"iterator",
"should",
"align",
"its",
"step",
"with",
"."
] | def __init__(self, corpus_path, line_truncate, side, shard_size,
assoc_iter=None):
"""
Args:
corpus_path: the corpus file path.
line_truncate: the maximum length of a line to read.
0 for unlimited.
side: "src" or "tgt".
shard_size: the shard size, 0 means not sharding the file.
assoc_iter: if not None, it is the associate iterator that
this iterator should align its step with.
"""
try:
# The codecs module seems to have bugs with seek()/tell(),
# so we use io.open().
self.corpus = io.open(corpus_path, "r", encoding="utf-8")
except IOError:
sys.stderr.write("Failed to open corpus file: %s" % corpus_path)
sys.exit(1)
self.line_truncate = line_truncate
self.side = side
self.shard_size = shard_size
self.assoc_iter = assoc_iter
self.last_pos = 0
self.line_index = -1
self.eof = False | [
"def",
"__init__",
"(",
"self",
",",
"corpus_path",
",",
"line_truncate",
",",
"side",
",",
"shard_size",
",",
"assoc_iter",
"=",
"None",
")",
":",
"try",
":",
"# The codecs module seems to have bugs with seek()/tell(),",
"# so we use io.open().",
"self",
".",
"corpus",
"=",
"io",
".",
"open",
"(",
"corpus_path",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"except",
"IOError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Failed to open corpus file: %s\"",
"%",
"corpus_path",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"self",
".",
"line_truncate",
"=",
"line_truncate",
"self",
".",
"side",
"=",
"side",
"self",
".",
"shard_size",
"=",
"shard_size",
"self",
".",
"assoc_iter",
"=",
"assoc_iter",
"self",
".",
"last_pos",
"=",
"0",
"self",
".",
"line_index",
"=",
"-",
"1",
"self",
".",
"eof",
"=",
"False"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L326-L352 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | ShardedTextCorpusIterator.__iter__ | (self) | Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`. | Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`. | [
"Iterator",
"of",
"(",
"example_dict",
"nfeats",
")",
".",
"On",
"each",
"call",
"it",
"iterates",
"over",
"as",
"many",
"(",
"example_dict",
"nfeats",
")",
"tuples",
"until",
"this",
"shard",
"s",
"size",
"equals",
"to",
"or",
"approximates",
"self",
".",
"shard_size",
"."
] | def __iter__(self):
"""
Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`.
"""
iteration_index = -1
if self.assoc_iter is not None:
# We have associate iterator, just yields tuples
# util we run parallel with it.
while self.line_index < self.assoc_iter.line_index:
line = self.corpus.readline()
if line == '':
raise AssertionError(
"Two corpuses must have same number of lines!")
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index)
if self.assoc_iter.eof:
self.eof = True
self.corpus.close()
else:
# Yield tuples util this shard's size reaches the threshold.
self.corpus.seek(self.last_pos)
while True:
if self.shard_size != 0 and self.line_index % 64 == 0:
# This part of check is time consuming on Py2 (but
# it is quite fast on Py3, weird!). So we don't bother
# to check for very line. Instead we chekc every 64
# lines. Thus we are not dividing exactly per
# `shard_size`, but it is not too much difference.
cur_pos = self.corpus.tell()
if cur_pos >= self.last_pos + self.shard_size:
self.last_pos = cur_pos
return
line = self.corpus.readline()
if line == '':
self.eof = True
self.corpus.close()
return
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index) | [
"def",
"__iter__",
"(",
"self",
")",
":",
"iteration_index",
"=",
"-",
"1",
"if",
"self",
".",
"assoc_iter",
"is",
"not",
"None",
":",
"# We have associate iterator, just yields tuples",
"# util we run parallel with it.",
"while",
"self",
".",
"line_index",
"<",
"self",
".",
"assoc_iter",
".",
"line_index",
":",
"line",
"=",
"self",
".",
"corpus",
".",
"readline",
"(",
")",
"if",
"line",
"==",
"''",
":",
"raise",
"AssertionError",
"(",
"\"Two corpuses must have same number of lines!\"",
")",
"self",
".",
"line_index",
"+=",
"1",
"iteration_index",
"+=",
"1",
"yield",
"self",
".",
"_example_dict_iter",
"(",
"line",
",",
"iteration_index",
")",
"if",
"self",
".",
"assoc_iter",
".",
"eof",
":",
"self",
".",
"eof",
"=",
"True",
"self",
".",
"corpus",
".",
"close",
"(",
")",
"else",
":",
"# Yield tuples util this shard's size reaches the threshold.",
"self",
".",
"corpus",
".",
"seek",
"(",
"self",
".",
"last_pos",
")",
"while",
"True",
":",
"if",
"self",
".",
"shard_size",
"!=",
"0",
"and",
"self",
".",
"line_index",
"%",
"64",
"==",
"0",
":",
"# This part of check is time consuming on Py2 (but",
"# it is quite fast on Py3, weird!). So we don't bother",
"# to check for very line. Instead we chekc every 64",
"# lines. Thus we are not dividing exactly per",
"# `shard_size`, but it is not too much difference.",
"cur_pos",
"=",
"self",
".",
"corpus",
".",
"tell",
"(",
")",
"if",
"cur_pos",
">=",
"self",
".",
"last_pos",
"+",
"self",
".",
"shard_size",
":",
"self",
".",
"last_pos",
"=",
"cur_pos",
"return",
"line",
"=",
"self",
".",
"corpus",
".",
"readline",
"(",
")",
"if",
"line",
"==",
"''",
":",
"self",
".",
"eof",
"=",
"True",
"self",
".",
"corpus",
".",
"close",
"(",
")",
"return",
"self",
".",
"line_index",
"+=",
"1",
"iteration_index",
"+=",
"1",
"yield",
"self",
".",
"_example_dict_iter",
"(",
"line",
",",
"iteration_index",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L354-L400 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | ShardedTextCorpusIterator.hit_end | (self) | return self.eof | ? | ? | [
"?"
] | def hit_end(self):
""" ? """
return self.eof | [
"def",
"hit_end",
"(",
"self",
")",
":",
"return",
"self",
".",
"eof"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L402-L404 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py | python | ShardedTextCorpusIterator.num_feats | (self) | return self.n_feats | We peek the first line and seek back to
the beginning of the file. | We peek the first line and seek back to
the beginning of the file. | [
"We",
"peek",
"the",
"first",
"line",
"and",
"seek",
"back",
"to",
"the",
"beginning",
"of",
"the",
"file",
"."
] | def num_feats(self):
"""
We peek the first line and seek back to
the beginning of the file.
"""
saved_pos = self.corpus.tell()
line = self.corpus.readline().split()
if self.line_truncate:
line = line[:self.line_truncate]
_, _, self.n_feats = TextDataset.extract_text_features(line)
self.corpus.seek(saved_pos)
return self.n_feats | [
"def",
"num_feats",
"(",
"self",
")",
":",
"saved_pos",
"=",
"self",
".",
"corpus",
".",
"tell",
"(",
")",
"line",
"=",
"self",
".",
"corpus",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"if",
"self",
".",
"line_truncate",
":",
"line",
"=",
"line",
"[",
":",
"self",
".",
"line_truncate",
"]",
"_",
",",
"_",
",",
"self",
".",
"n_feats",
"=",
"TextDataset",
".",
"extract_text_features",
"(",
"line",
")",
"self",
".",
"corpus",
".",
"seek",
"(",
"saved_pos",
")",
"return",
"self",
".",
"n_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/text_dataset.py#L407-L421 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py | python | ImageDataset.sort_key | (self, ex) | return (ex.src.size(2), ex.src.size(1)) | Sort using the size of the image: (width, height). | Sort using the size of the image: (width, height). | [
"Sort",
"using",
"the",
"size",
"of",
"the",
"image",
":",
"(",
"width",
"height",
")",
"."
] | def sort_key(self, ex):
""" Sort using the size of the image: (width, height)."""
return (ex.src.size(2), ex.src.size(1)) | [
"def",
"sort_key",
"(",
"self",
",",
"ex",
")",
":",
"return",
"(",
"ex",
".",
"src",
".",
"size",
"(",
"2",
")",
",",
"ex",
".",
"src",
".",
"size",
"(",
"1",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py#L80-L82 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py | python | ImageDataset.make_image_examples_nfeats_tpl | (img_iter, img_path, img_dir,
image_channel_size=3) | return (examples_iter, num_feats) | Note: one of img_iter and img_path must be not None
Args:
img_iter(iterator): an iterator that yields pairs (img, filename)
(or None)
img_path(str): location of a src file containing image paths
(or None)
src_dir (str): location of source images
Returns:
(example_dict iterator, num_feats) tuple | Note: one of img_iter and img_path must be not None
Args:
img_iter(iterator): an iterator that yields pairs (img, filename)
(or None)
img_path(str): location of a src file containing image paths
(or None)
src_dir (str): location of source images | [
"Note",
":",
"one",
"of",
"img_iter",
"and",
"img_path",
"must",
"be",
"not",
"None",
"Args",
":",
"img_iter",
"(",
"iterator",
")",
":",
"an",
"iterator",
"that",
"yields",
"pairs",
"(",
"img",
"filename",
")",
"(",
"or",
"None",
")",
"img_path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"file",
"containing",
"image",
"paths",
"(",
"or",
"None",
")",
"src_dir",
"(",
"str",
")",
":",
"location",
"of",
"source",
"images"
] | def make_image_examples_nfeats_tpl(img_iter, img_path, img_dir,
image_channel_size=3):
"""
Note: one of img_iter and img_path must be not None
Args:
img_iter(iterator): an iterator that yields pairs (img, filename)
(or None)
img_path(str): location of a src file containing image paths
(or None)
src_dir (str): location of source images
Returns:
(example_dict iterator, num_feats) tuple
"""
if img_iter is None:
if img_path is not None:
img_iter = ImageDataset. \
make_img_iterator_from_file(img_path,
img_dir,
image_channel_size)
else:
raise ValueError("""One of 'img_iter' and 'img_path'
must be not None""")
examples_iter = ImageDataset.make_examples(img_iter, img_dir, 'src')
num_feats = 0 # Source side(img) has no features.
return (examples_iter, num_feats) | [
"def",
"make_image_examples_nfeats_tpl",
"(",
"img_iter",
",",
"img_path",
",",
"img_dir",
",",
"image_channel_size",
"=",
"3",
")",
":",
"if",
"img_iter",
"is",
"None",
":",
"if",
"img_path",
"is",
"not",
"None",
":",
"img_iter",
"=",
"ImageDataset",
".",
"make_img_iterator_from_file",
"(",
"img_path",
",",
"img_dir",
",",
"image_channel_size",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"One of 'img_iter' and 'img_path'\n must be not None\"\"\"",
")",
"examples_iter",
"=",
"ImageDataset",
".",
"make_examples",
"(",
"img_iter",
",",
"img_dir",
",",
"'src'",
")",
"num_feats",
"=",
"0",
"# Source side(img) has no features.",
"return",
"(",
"examples_iter",
",",
"num_feats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py#L85-L111 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py | python | ImageDataset.make_examples | (img_iter, src_dir, side, truncate=None) | Args:
path (str): location of a src file containing image paths
src_dir (str): location of source images
side (str): 'src' or 'tgt'
truncate: maximum img size ((0,0) or None for unlimited)
Yields:
a dictionary containing image data, path and index for each line. | Args:
path (str): location of a src file containing image paths
src_dir (str): location of source images
side (str): 'src' or 'tgt'
truncate: maximum img size ((0,0) or None for unlimited) | [
"Args",
":",
"path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"file",
"containing",
"image",
"paths",
"src_dir",
"(",
"str",
")",
":",
"location",
"of",
"source",
"images",
"side",
"(",
"str",
")",
":",
"src",
"or",
"tgt",
"truncate",
":",
"maximum",
"img",
"size",
"((",
"0",
"0",
")",
"or",
"None",
"for",
"unlimited",
")"
] | def make_examples(img_iter, src_dir, side, truncate=None):
"""
Args:
path (str): location of a src file containing image paths
src_dir (str): location of source images
side (str): 'src' or 'tgt'
truncate: maximum img size ((0,0) or None for unlimited)
Yields:
a dictionary containing image data, path and index for each line.
"""
assert (src_dir is not None) and os.path.exists(src_dir), \
'src_dir must be a valid directory if data_type is img'
for index, (img, filename) in enumerate(img_iter):
if truncate and truncate != (0, 0):
if not (img.size(1) <= truncate[0]
and img.size(2) <= truncate[1]):
continue
example_dict = {side: img,
side + '_path': filename,
'indices': index}
yield example_dict | [
"def",
"make_examples",
"(",
"img_iter",
",",
"src_dir",
",",
"side",
",",
"truncate",
"=",
"None",
")",
":",
"assert",
"(",
"src_dir",
"is",
"not",
"None",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"src_dir",
")",
",",
"'src_dir must be a valid directory if data_type is img'",
"for",
"index",
",",
"(",
"img",
",",
"filename",
")",
"in",
"enumerate",
"(",
"img_iter",
")",
":",
"if",
"truncate",
"and",
"truncate",
"!=",
"(",
"0",
",",
"0",
")",
":",
"if",
"not",
"(",
"img",
".",
"size",
"(",
"1",
")",
"<=",
"truncate",
"[",
"0",
"]",
"and",
"img",
".",
"size",
"(",
"2",
")",
"<=",
"truncate",
"[",
"1",
"]",
")",
":",
"continue",
"example_dict",
"=",
"{",
"side",
":",
"img",
",",
"side",
"+",
"'_path'",
":",
"filename",
",",
"'indices'",
":",
"index",
"}",
"yield",
"example_dict"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py#L114-L137 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py | python | ImageDataset.make_img_iterator_from_file | (path, src_dir, image_channel_size=3) | Args:
path(str):
src_dir(str):
Yields:
img: and image tensor
filename(str): the image filename | Args:
path(str):
src_dir(str): | [
"Args",
":",
"path",
"(",
"str",
")",
":",
"src_dir",
"(",
"str",
")",
":"
] | def make_img_iterator_from_file(path, src_dir, image_channel_size=3):
"""
Args:
path(str):
src_dir(str):
Yields:
img: and image tensor
filename(str): the image filename
"""
from PIL import Image
from torchvision import transforms
with codecs.open(path, "r", "utf-8") as corpus_file:
for line in corpus_file:
filename = line.strip()
img_path = os.path.join(src_dir, filename)
if not os.path.exists(img_path):
img_path = line
assert os.path.exists(img_path), \
'img path %s not found' % (line.strip())
if (image_channel_size == 1):
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(Image.open(img_path))
yield img, filename | [
"def",
"make_img_iterator_from_file",
"(",
"path",
",",
"src_dir",
",",
"image_channel_size",
"=",
"3",
")",
":",
"from",
"PIL",
"import",
"Image",
"from",
"torchvision",
"import",
"transforms",
"with",
"codecs",
".",
"open",
"(",
"path",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"corpus_file",
":",
"for",
"line",
"in",
"corpus_file",
":",
"filename",
"=",
"line",
".",
"strip",
"(",
")",
"img_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src_dir",
",",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"img_path",
")",
":",
"img_path",
"=",
"line",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"img_path",
")",
",",
"'img path %s not found'",
"%",
"(",
"line",
".",
"strip",
"(",
")",
")",
"if",
"(",
"image_channel_size",
"==",
"1",
")",
":",
"img",
"=",
"transforms",
".",
"ToTensor",
"(",
")",
"(",
"Image",
".",
"fromarray",
"(",
"cv2",
".",
"imread",
"(",
"img_path",
",",
"0",
")",
")",
")",
"else",
":",
"img",
"=",
"transforms",
".",
"ToTensor",
"(",
")",
"(",
"Image",
".",
"open",
"(",
"img_path",
")",
")",
"yield",
"img",
",",
"filename"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py#L140-L169 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py | python | ImageDataset.get_fields | (n_src_features, n_tgt_features) | return fields | Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects. | Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for. | [
"Args",
":",
"n_src_features",
":",
"the",
"number",
"of",
"source",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
".",
"n_tgt_features",
":",
"the",
"number",
"of",
"target",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
"."
] | def get_fields(n_src_features, n_tgt_features):
"""
Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects.
"""
fields = {}
def make_img(data, vocab):
""" ? """
c = data[0].size(0)
h = max([t.size(1) for t in data])
w = max([t.size(2) for t in data])
imgs = torch.zeros(len(data), c, h, w).fill_(1)
for i, img in enumerate(data):
imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
return imgs
fields["src"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_img, sequential=False)
for j in range(n_src_features):
fields["src_feat_" + str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_" + str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, vocab):
""" ? """
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
def make_tgt(data, vocab):
""" ? """
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
sequential=False)
return fields | [
"def",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
":",
"fields",
"=",
"{",
"}",
"def",
"make_img",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"c",
"=",
"data",
"[",
"0",
"]",
".",
"size",
"(",
"0",
")",
"h",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"1",
")",
"for",
"t",
"in",
"data",
"]",
")",
"w",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"2",
")",
"for",
"t",
"in",
"data",
"]",
")",
"imgs",
"=",
"torch",
".",
"zeros",
"(",
"len",
"(",
"data",
")",
",",
"c",
",",
"h",
",",
"w",
")",
".",
"fill_",
"(",
"1",
")",
"for",
"i",
",",
"img",
"in",
"enumerate",
"(",
"data",
")",
":",
"imgs",
"[",
"i",
",",
":",
",",
"0",
":",
"img",
".",
"size",
"(",
"1",
")",
",",
"0",
":",
"img",
".",
"size",
"(",
"2",
")",
"]",
"=",
"img",
"return",
"imgs",
"fields",
"[",
"\"src\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_img",
",",
"sequential",
"=",
"False",
")",
"for",
"j",
"in",
"range",
"(",
"n_src_features",
")",
":",
"fields",
"[",
"\"src_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"pad_token",
"=",
"PAD_WORD",
")",
"fields",
"[",
"\"tgt\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"for",
"j",
"in",
"range",
"(",
"n_tgt_features",
")",
":",
"fields",
"[",
"\"tgt_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"def",
"make_src",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"src_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"src_vocab_size",
"=",
"max",
"(",
"[",
"t",
".",
"max",
"(",
")",
"for",
"t",
"in",
"data",
"]",
")",
"+",
"1",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"src_size",
",",
"len",
"(",
"data",
")",
",",
"src_vocab_size",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"for",
"j",
",",
"t",
"in",
"enumerate",
"(",
"sent",
")",
":",
"alignment",
"[",
"j",
",",
"i",
",",
"t",
"]",
"=",
"1",
"return",
"alignment",
"fields",
"[",
"\"src_map\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_src",
",",
"sequential",
"=",
"False",
")",
"def",
"make_tgt",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"tgt_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"tgt_size",
",",
"len",
"(",
"data",
")",
")",
".",
"long",
"(",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"alignment",
"[",
":",
"sent",
".",
"size",
"(",
"0",
")",
",",
"i",
"]",
"=",
"sent",
"return",
"alignment",
"fields",
"[",
"\"alignment\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"postprocessing",
"=",
"make_tgt",
",",
"sequential",
"=",
"False",
")",
"fields",
"[",
"\"indices\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"sequential",
"=",
"False",
")",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py#L172-L243 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py | python | ImageDataset.get_num_features | (corpus_file, side) | return num_feats | For image corpus, source side is in form of image, thus
no feature; while target side is in form of text, thus
we can extract its text features.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`. | For image corpus, source side is in form of image, thus
no feature; while target side is in form of text, thus
we can extract its text features. | [
"For",
"image",
"corpus",
"source",
"side",
"is",
"in",
"form",
"of",
"image",
"thus",
"no",
"feature",
";",
"while",
"target",
"side",
"is",
"in",
"form",
"of",
"text",
"thus",
"we",
"can",
"extract",
"its",
"text",
"features",
"."
] | def get_num_features(corpus_file, side):
"""
For image corpus, source side is in form of image, thus
no feature; while target side is in form of text, thus
we can extract its text features.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`.
"""
if side == 'src':
num_feats = 0
else:
with codecs.open(corpus_file, "r", "utf-8") as cf:
f_line = cf.readline().strip().split()
_, _, num_feats = ImageDataset.extract_text_features(f_line)
return num_feats | [
"def",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
":",
"if",
"side",
"==",
"'src'",
":",
"num_feats",
"=",
"0",
"else",
":",
"with",
"codecs",
".",
"open",
"(",
"corpus_file",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"cf",
":",
"f_line",
"=",
"cf",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"_",
",",
"_",
",",
"num_feats",
"=",
"ImageDataset",
".",
"extract_text_features",
"(",
"f_line",
")",
"return",
"num_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/image_dataset.py#L246-L266 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py | python | AudioDataset.sort_key | (self, ex) | return ex.src.size(1) | Sort using duration time of the sound spectrogram. | Sort using duration time of the sound spectrogram. | [
"Sort",
"using",
"duration",
"time",
"of",
"the",
"sound",
"spectrogram",
"."
] | def sort_key(self, ex):
""" Sort using duration time of the sound spectrogram. """
return ex.src.size(1) | [
"def",
"sort_key",
"(",
"self",
",",
"ex",
")",
":",
"return",
"ex",
".",
"src",
".",
"size",
"(",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py#L90-L92 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py | python | AudioDataset.make_audio_examples_nfeats_tpl | (path, audio_dir,
sample_rate, window_size,
window_stride, window,
normalize_audio, truncate=None) | return (examples_iter, num_feats) | Args:
path (str): location of a src file containing audio paths.
audio_dir (str): location of source audio files.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited).
Returns:
(example_dict iterator, num_feats) tuple | Args:
path (str): location of a src file containing audio paths.
audio_dir (str): location of source audio files.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited). | [
"Args",
":",
"path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"file",
"containing",
"audio",
"paths",
".",
"audio_dir",
"(",
"str",
")",
":",
"location",
"of",
"source",
"audio",
"files",
".",
"sample_rate",
"(",
"int",
")",
":",
"sample_rate",
".",
"window_size",
"(",
"float",
")",
":",
"window",
"size",
"for",
"spectrogram",
"in",
"seconds",
".",
"window_stride",
"(",
"float",
")",
":",
"window",
"stride",
"for",
"spectrogram",
"in",
"seconds",
".",
"window",
"(",
"str",
")",
":",
"window",
"type",
"for",
"spectrogram",
"generation",
".",
"normalize_audio",
"(",
"bool",
")",
":",
"subtract",
"spectrogram",
"by",
"mean",
"and",
"divide",
"by",
"std",
"or",
"not",
".",
"truncate",
"(",
"int",
")",
":",
"maximum",
"audio",
"length",
"(",
"0",
"or",
"None",
"for",
"unlimited",
")",
"."
] | def make_audio_examples_nfeats_tpl(path, audio_dir,
sample_rate, window_size,
window_stride, window,
normalize_audio, truncate=None):
"""
Args:
path (str): location of a src file containing audio paths.
audio_dir (str): location of source audio files.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited).
Returns:
(example_dict iterator, num_feats) tuple
"""
examples_iter = AudioDataset.read_audio_file(
path, audio_dir, "src", sample_rate,
window_size, window_stride, window,
normalize_audio, truncate)
num_feats = 0 # Source side(audio) has no features.
return (examples_iter, num_feats) | [
"def",
"make_audio_examples_nfeats_tpl",
"(",
"path",
",",
"audio_dir",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"truncate",
"=",
"None",
")",
":",
"examples_iter",
"=",
"AudioDataset",
".",
"read_audio_file",
"(",
"path",
",",
"audio_dir",
",",
"\"src\"",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"truncate",
")",
"num_feats",
"=",
"0",
"# Source side(audio) has no features.",
"return",
"(",
"examples_iter",
",",
"num_feats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py#L95-L120 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py | python | AudioDataset.read_audio_file | (path, src_dir, side, sample_rate, window_size,
window_stride, window, normalize_audio,
truncate=None) | Args:
path (str): location of a src file containing audio paths.
src_dir (str): location of source audio files.
side (str): 'src' or 'tgt'.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited).
Yields:
a dictionary containing audio data for each line. | Args:
path (str): location of a src file containing audio paths.
src_dir (str): location of source audio files.
side (str): 'src' or 'tgt'.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited). | [
"Args",
":",
"path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"file",
"containing",
"audio",
"paths",
".",
"src_dir",
"(",
"str",
")",
":",
"location",
"of",
"source",
"audio",
"files",
".",
"side",
"(",
"str",
")",
":",
"src",
"or",
"tgt",
".",
"sample_rate",
"(",
"int",
")",
":",
"sample_rate",
".",
"window_size",
"(",
"float",
")",
":",
"window",
"size",
"for",
"spectrogram",
"in",
"seconds",
".",
"window_stride",
"(",
"float",
")",
":",
"window",
"stride",
"for",
"spectrogram",
"in",
"seconds",
".",
"window",
"(",
"str",
")",
":",
"window",
"type",
"for",
"spectrogram",
"generation",
".",
"normalize_audio",
"(",
"bool",
")",
":",
"subtract",
"spectrogram",
"by",
"mean",
"and",
"divide",
"by",
"std",
"or",
"not",
".",
"truncate",
"(",
"int",
")",
":",
"maximum",
"audio",
"length",
"(",
"0",
"or",
"None",
"for",
"unlimited",
")",
"."
] | def read_audio_file(path, src_dir, side, sample_rate, window_size,
window_stride, window, normalize_audio,
truncate=None):
"""
Args:
path (str): location of a src file containing audio paths.
src_dir (str): location of source audio files.
side (str): 'src' or 'tgt'.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited).
Yields:
a dictionary containing audio data for each line.
"""
assert (src_dir is not None) and os.path.exists(src_dir),\
"src_dir must be a valid directory if data_type is audio"
import torchaudio
import librosa
import numpy as np
with codecs.open(path, "r", "utf-8") as corpus_file:
index = 0
for line in corpus_file:
audio_path = os.path.join(src_dir, line.strip())
if not os.path.exists(audio_path):
audio_path = line
assert os.path.exists(audio_path), \
'audio path %s not found' % (line.strip())
sound, sample_rate = torchaudio.load(audio_path)
if truncate and truncate > 0:
if sound.size(0) > truncate:
continue
assert sample_rate == sample_rate, \
'Sample rate of %s != -sample_rate (%d vs %d)' \
% (audio_path, sample_rate, sample_rate)
sound = sound.numpy()
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # average multiple channels
n_fft = int(sample_rate * window_size)
win_length = n_fft
hop_length = int(sample_rate * window_stride)
# STFT
d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window)
spect, _ = librosa.magphase(d)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if normalize_audio:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
example_dict = {side: spect,
side + '_path': line.strip(),
'indices': index}
index += 1
yield example_dict | [
"def",
"read_audio_file",
"(",
"path",
",",
"src_dir",
",",
"side",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"truncate",
"=",
"None",
")",
":",
"assert",
"(",
"src_dir",
"is",
"not",
"None",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"src_dir",
")",
",",
"\"src_dir must be a valid directory if data_type is audio\"",
"import",
"torchaudio",
"import",
"librosa",
"import",
"numpy",
"as",
"np",
"with",
"codecs",
".",
"open",
"(",
"path",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"corpus_file",
":",
"index",
"=",
"0",
"for",
"line",
"in",
"corpus_file",
":",
"audio_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src_dir",
",",
"line",
".",
"strip",
"(",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"audio_path",
")",
":",
"audio_path",
"=",
"line",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"audio_path",
")",
",",
"'audio path %s not found'",
"%",
"(",
"line",
".",
"strip",
"(",
")",
")",
"sound",
",",
"sample_rate",
"=",
"torchaudio",
".",
"load",
"(",
"audio_path",
")",
"if",
"truncate",
"and",
"truncate",
">",
"0",
":",
"if",
"sound",
".",
"size",
"(",
"0",
")",
">",
"truncate",
":",
"continue",
"assert",
"sample_rate",
"==",
"sample_rate",
",",
"'Sample rate of %s != -sample_rate (%d vs %d)'",
"%",
"(",
"audio_path",
",",
"sample_rate",
",",
"sample_rate",
")",
"sound",
"=",
"sound",
".",
"numpy",
"(",
")",
"if",
"len",
"(",
"sound",
".",
"shape",
")",
">",
"1",
":",
"if",
"sound",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"sound",
"=",
"sound",
".",
"squeeze",
"(",
")",
"else",
":",
"sound",
"=",
"sound",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"# average multiple channels",
"n_fft",
"=",
"int",
"(",
"sample_rate",
"*",
"window_size",
")",
"win_length",
"=",
"n_fft",
"hop_length",
"=",
"int",
"(",
"sample_rate",
"*",
"window_stride",
")",
"# STFT",
"d",
"=",
"librosa",
".",
"stft",
"(",
"sound",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
")",
"spect",
",",
"_",
"=",
"librosa",
".",
"magphase",
"(",
"d",
")",
"spect",
"=",
"np",
".",
"log1p",
"(",
"spect",
")",
"spect",
"=",
"torch",
".",
"FloatTensor",
"(",
"spect",
")",
"if",
"normalize_audio",
":",
"mean",
"=",
"spect",
".",
"mean",
"(",
")",
"std",
"=",
"spect",
".",
"std",
"(",
")",
"spect",
".",
"add_",
"(",
"-",
"mean",
")",
"spect",
".",
"div_",
"(",
"std",
")",
"example_dict",
"=",
"{",
"side",
":",
"spect",
",",
"side",
"+",
"'_path'",
":",
"line",
".",
"strip",
"(",
")",
",",
"'indices'",
":",
"index",
"}",
"index",
"+=",
"1",
"yield",
"example_dict"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py#L123-L195 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py | python | AudioDataset.get_fields | (n_src_features, n_tgt_features) | return fields | Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects. | Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for. | [
"Args",
":",
"n_src_features",
":",
"the",
"number",
"of",
"source",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
".",
"n_tgt_features",
":",
"the",
"number",
"of",
"target",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
"."
] | def get_fields(n_src_features, n_tgt_features):
"""
Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects.
"""
fields = {}
def make_audio(data, vocab):
""" ? """
nfft = data[0].size(0)
t = max([t.size(1) for t in data])
sounds = torch.zeros(len(data), 1, nfft, t)
for i, spect in enumerate(data):
sounds[i, :, :, 0:spect.size(1)] = spect
return sounds
fields["src"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_audio, sequential=False)
for j in range(n_src_features):
fields["src_feat_" + str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_" + str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, vocab):
""" ? """
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
def make_tgt(data, vocab):
""" ? """
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
sequential=False)
return fields | [
"def",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
":",
"fields",
"=",
"{",
"}",
"def",
"make_audio",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"nfft",
"=",
"data",
"[",
"0",
"]",
".",
"size",
"(",
"0",
")",
"t",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"1",
")",
"for",
"t",
"in",
"data",
"]",
")",
"sounds",
"=",
"torch",
".",
"zeros",
"(",
"len",
"(",
"data",
")",
",",
"1",
",",
"nfft",
",",
"t",
")",
"for",
"i",
",",
"spect",
"in",
"enumerate",
"(",
"data",
")",
":",
"sounds",
"[",
"i",
",",
":",
",",
":",
",",
"0",
":",
"spect",
".",
"size",
"(",
"1",
")",
"]",
"=",
"spect",
"return",
"sounds",
"fields",
"[",
"\"src\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_audio",
",",
"sequential",
"=",
"False",
")",
"for",
"j",
"in",
"range",
"(",
"n_src_features",
")",
":",
"fields",
"[",
"\"src_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"pad_token",
"=",
"PAD_WORD",
")",
"fields",
"[",
"\"tgt\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"for",
"j",
"in",
"range",
"(",
"n_tgt_features",
")",
":",
"fields",
"[",
"\"tgt_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"def",
"make_src",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"src_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"src_vocab_size",
"=",
"max",
"(",
"[",
"t",
".",
"max",
"(",
")",
"for",
"t",
"in",
"data",
"]",
")",
"+",
"1",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"src_size",
",",
"len",
"(",
"data",
")",
",",
"src_vocab_size",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"for",
"j",
",",
"t",
"in",
"enumerate",
"(",
"sent",
")",
":",
"alignment",
"[",
"j",
",",
"i",
",",
"t",
"]",
"=",
"1",
"return",
"alignment",
"fields",
"[",
"\"src_map\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_src",
",",
"sequential",
"=",
"False",
")",
"def",
"make_tgt",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"tgt_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"tgt_size",
",",
"len",
"(",
"data",
")",
")",
".",
"long",
"(",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"alignment",
"[",
":",
"sent",
".",
"size",
"(",
"0",
")",
",",
"i",
"]",
"=",
"sent",
"return",
"alignment",
"fields",
"[",
"\"alignment\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"postprocessing",
"=",
"make_tgt",
",",
"sequential",
"=",
"False",
")",
"fields",
"[",
"\"indices\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"sequential",
"=",
"False",
")",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/OpenNMT-py-baselines/onmt/inputters/audio_dataset.py#L198-L268 |