nwo
stringlengths 6
76
| sha
stringlengths 40
40
| path
stringlengths 5
118
| language
stringclasses 1
value | identifier
stringlengths 1
89
| parameters
stringlengths 2
5.4k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
51.1k
| docstring
stringlengths 1
17.6k
| docstring_summary
stringlengths 0
7.02k
| docstring_tokens
sequence | function
stringlengths 30
51.1k
| function_tokens
sequence | url
stringlengths 85
218
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/report_manager.py | python | ReportMgrBase._report_training | (self, *args, **kwargs) | To be overridden | To be overridden | [
"To",
"be",
"overridden"
] | def _report_training(self, *args, **kwargs):
""" To be overridden """
raise NotImplementedError() | [
"def",
"_report_training",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/report_manager.py#L77-L79 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/report_manager.py | python | ReportMgrBase.report_step | (self, lr, step, train_stats=None, valid_stats=None) | Report stats of a step
Args:
train_stats(Statistics): training stats
valid_stats(Statistics): validation stats
lr(float): current learning rate | Report stats of a step | [
"Report",
"stats",
"of",
"a",
"step"
] | def report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
Report stats of a step
Args:
train_stats(Statistics): training stats
valid_stats(Statistics): validation stats
lr(float): current learning rate
"""
self._report_step(
lr, step, train_stats=train_stats, valid_stats=valid_stats) | [
"def",
"report_step",
"(",
"self",
",",
"lr",
",",
"step",
",",
"train_stats",
"=",
"None",
",",
"valid_stats",
"=",
"None",
")",
":",
"self",
".",
"_report_step",
"(",
"lr",
",",
"step",
",",
"train_stats",
"=",
"train_stats",
",",
"valid_stats",
"=",
"valid_stats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/report_manager.py#L81-L91 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/report_manager.py | python | ReportMgr.__init__ | (self, report_every, start_time=-1., tensorboard_writer=None) | A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None | A report manager that writes statistics on standard output as well as
(optionally) TensorBoard | [
"A",
"report",
"manager",
"that",
"writes",
"statistics",
"on",
"standard",
"output",
"as",
"well",
"as",
"(",
"optionally",
")",
"TensorBoard"
] | def __init__(self, report_every, start_time=-1., tensorboard_writer=None):
"""
A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None
"""
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer | [
"def",
"__init__",
"(",
"self",
",",
"report_every",
",",
"start_time",
"=",
"-",
"1.",
",",
"tensorboard_writer",
"=",
"None",
")",
":",
"super",
"(",
"ReportMgr",
",",
"self",
")",
".",
"__init__",
"(",
"report_every",
",",
"start_time",
")",
"self",
".",
"tensorboard_writer",
"=",
"tensorboard_writer"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/report_manager.py#L98-L109 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/report_manager.py | python | ReportMgr._report_training | (self, step, num_steps, learning_rate,
report_stats) | return report_stats | See base class method `ReportMgrBase.report_training`. | See base class method `ReportMgrBase.report_training`. | [
"See",
"base",
"class",
"method",
"ReportMgrBase",
".",
"report_training",
"."
] | def _report_training(self, step, num_steps, learning_rate,
report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps,
learning_rate, self.start_time)
# Log the progress using the number of batches on the x-axis.
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
self.progress_step)
report_stats = onmt.utils.Statistics()
return report_stats | [
"def",
"_report_training",
"(",
"self",
",",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"report_stats",
")",
":",
"report_stats",
".",
"output",
"(",
"step",
",",
"num_steps",
",",
"learning_rate",
",",
"self",
".",
"start_time",
")",
"# Log the progress using the number of batches on the x-axis.",
"self",
".",
"maybe_log_tensorboard",
"(",
"report_stats",
",",
"\"progress\"",
",",
"learning_rate",
",",
"self",
".",
"progress_step",
")",
"report_stats",
"=",
"onmt",
".",
"utils",
".",
"Statistics",
"(",
")",
"return",
"report_stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/report_manager.py#L116-L131 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/utils/report_manager.py | python | ReportMgr._report_step | (self, lr, step, train_stats=None, valid_stats=None) | See base class method `ReportMgrBase.report_step`. | See base class method `ReportMgrBase.report_step`. | [
"See",
"base",
"class",
"method",
"ReportMgrBase",
".",
"report_step",
"."
] | def _report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
See base class method `ReportMgrBase.report_step`.
"""
if train_stats is not None:
self.log('Train perplexity: %g' % train_stats.ppl())
self.log('Train accuracy: %g' % train_stats.accuracy())
self.maybe_log_tensorboard(train_stats,
"train",
lr,
step)
if valid_stats is not None:
self.log('Validation perplexity: %g' % valid_stats.ppl())
self.log('Validation accuracy: %g' % valid_stats.accuracy())
self.maybe_log_tensorboard(valid_stats,
"valid",
lr,
step) | [
"def",
"_report_step",
"(",
"self",
",",
"lr",
",",
"step",
",",
"train_stats",
"=",
"None",
",",
"valid_stats",
"=",
"None",
")",
":",
"if",
"train_stats",
"is",
"not",
"None",
":",
"self",
".",
"log",
"(",
"'Train perplexity: %g'",
"%",
"train_stats",
".",
"ppl",
"(",
")",
")",
"self",
".",
"log",
"(",
"'Train accuracy: %g'",
"%",
"train_stats",
".",
"accuracy",
"(",
")",
")",
"self",
".",
"maybe_log_tensorboard",
"(",
"train_stats",
",",
"\"train\"",
",",
"lr",
",",
"step",
")",
"if",
"valid_stats",
"is",
"not",
"None",
":",
"self",
".",
"log",
"(",
"'Validation perplexity: %g'",
"%",
"valid_stats",
".",
"ppl",
"(",
")",
")",
"self",
".",
"log",
"(",
"'Validation accuracy: %g'",
"%",
"valid_stats",
".",
"accuracy",
"(",
")",
")",
"self",
".",
"maybe_log_tensorboard",
"(",
"valid_stats",
",",
"\"valid\"",
",",
"lr",
",",
"step",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/utils/report_manager.py#L133-L153 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/multi_headed_attn.py | python | MultiHeadedAttention.forward | (self, key, value, query, mask=None,
layer_cache=None, type=None) | return output, top_attn | Compute the context vector and the attention vectors.
Args:
key (`FloatTensor`): set of `key_len`
key vectors `[batch, key_len, dim]`
value (`FloatTensor`): set of `key_len`
value vectors `[batch, key_len, dim]`
query (`FloatTensor`): set of `query_len`
query vectors `[batch, query_len, dim]`
mask: binary mask indicating which keys have
non-zero attention `[batch, query_len, key_len]`
Returns:
(`FloatTensor`, `FloatTensor`) :
* output context vectors `[batch, query_len, dim]`
* one of the attention vectors `[batch, query_len, key_len]` | Compute the context vector and the attention vectors. | [
"Compute",
"the",
"context",
"vector",
"and",
"the",
"attention",
"vectors",
"."
] | def forward(self, key, value, query, mask=None,
layer_cache=None, type=None):
"""
Compute the context vector and the attention vectors.
Args:
key (`FloatTensor`): set of `key_len`
key vectors `[batch, key_len, dim]`
value (`FloatTensor`): set of `key_len`
value vectors `[batch, key_len, dim]`
query (`FloatTensor`): set of `query_len`
query vectors `[batch, query_len, dim]`
mask: binary mask indicating which keys have
non-zero attention `[batch, query_len, key_len]`
Returns:
(`FloatTensor`, `FloatTensor`) :
* output context vectors `[batch, query_len, dim]`
* one of the attention vectors `[batch, query_len, key_len]`
"""
# CHECKS
# batch, k_len, d = key.size()
# batch_, k_len_, d_ = value.size()
# aeq(batch, batch_)
# aeq(k_len, k_len_)
# aeq(d, d_)
# batch_, q_len, d_ = query.size()
# aeq(batch, batch_)
# aeq(d, d_)
# aeq(self.model_dim % 8, 0)
# if mask is not None:
# batch_, q_len_, k_len_ = mask.size()
# aeq(batch_, batch)
# aeq(k_len_, k_len)
# aeq(q_len_ == q_len)
# END CHECKS
batch_size = key.size(0)
dim_per_head = self.dim_per_head
head_count = self.head_count
key_len = key.size(1)
query_len = query.size(1)
def shape(x):
""" projection """
return x.view(batch_size, -1, head_count, dim_per_head) \
.transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous() \
.view(batch_size, -1, head_count * dim_per_head)
# 1) Project key, value, and query.
if layer_cache is not None:
if type == "self":
query, key, value = self.linear_query(query),\
self.linear_keys(query),\
self.linear_values(query)
key = shape(key)
value = shape(value)
if layer_cache is not None:
device = key.device
if layer_cache["self_keys"] is not None:
key = torch.cat(
(layer_cache["self_keys"].to(device), key),
dim=2)
if layer_cache["self_values"] is not None:
value = torch.cat(
(layer_cache["self_values"].to(device), value),
dim=2)
layer_cache["self_keys"] = key
layer_cache["self_values"] = value
elif type == "context":
query = self.linear_query(query)
if layer_cache is not None:
if layer_cache["memory_keys"] is None:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key, value = layer_cache["memory_keys"],\
layer_cache["memory_values"]
layer_cache["memory_keys"] = key
layer_cache["memory_values"] = value
else:
key, value = self.linear_keys(key),\
self.linear_values(value)
key = shape(key)
value = shape(value)
else:
key = self.linear_keys(key)
value = self.linear_values(value)
query = self.linear_query(query)
key = shape(key)
value = shape(value)
query = shape(query)
key_len = key.size(2)
query_len = query.size(2)
# 2) Calculate and scale scores.
query = query / math.sqrt(dim_per_head)
scores = torch.matmul(query, key.transpose(2, 3))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(scores)
scores = scores.masked_fill(mask, -1e18)
# 3) Apply attention dropout and compute context vectors.
attn = self.softmax(scores)
drop_attn = self.dropout(attn)
context = unshape(torch.matmul(drop_attn, value))
output = self.final_linear(context)
# CHECK
# batch_, q_len_, d_ = output.size()
# aeq(q_len, q_len_)
# aeq(batch, batch_)
# aeq(d, d_)
# Return one attn
top_attn = attn \
.view(batch_size, head_count,
query_len, key_len)[:, 0, :, :] \
.contiguous()
return output, top_attn | [
"def",
"forward",
"(",
"self",
",",
"key",
",",
"value",
",",
"query",
",",
"mask",
"=",
"None",
",",
"layer_cache",
"=",
"None",
",",
"type",
"=",
"None",
")",
":",
"# CHECKS",
"# batch, k_len, d = key.size()",
"# batch_, k_len_, d_ = value.size()",
"# aeq(batch, batch_)",
"# aeq(k_len, k_len_)",
"# aeq(d, d_)",
"# batch_, q_len, d_ = query.size()",
"# aeq(batch, batch_)",
"# aeq(d, d_)",
"# aeq(self.model_dim % 8, 0)",
"# if mask is not None:",
"# batch_, q_len_, k_len_ = mask.size()",
"# aeq(batch_, batch)",
"# aeq(k_len_, k_len)",
"# aeq(q_len_ == q_len)",
"# END CHECKS",
"batch_size",
"=",
"key",
".",
"size",
"(",
"0",
")",
"dim_per_head",
"=",
"self",
".",
"dim_per_head",
"head_count",
"=",
"self",
".",
"head_count",
"key_len",
"=",
"key",
".",
"size",
"(",
"1",
")",
"query_len",
"=",
"query",
".",
"size",
"(",
"1",
")",
"def",
"shape",
"(",
"x",
")",
":",
"\"\"\" projection \"\"\"",
"return",
"x",
".",
"view",
"(",
"batch_size",
",",
"-",
"1",
",",
"head_count",
",",
"dim_per_head",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"def",
"unshape",
"(",
"x",
")",
":",
"\"\"\" compute context \"\"\"",
"return",
"x",
".",
"transpose",
"(",
"1",
",",
"2",
")",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"batch_size",
",",
"-",
"1",
",",
"head_count",
"*",
"dim_per_head",
")",
"# 1) Project key, value, and query.",
"if",
"layer_cache",
"is",
"not",
"None",
":",
"if",
"type",
"==",
"\"self\"",
":",
"query",
",",
"key",
",",
"value",
"=",
"self",
".",
"linear_query",
"(",
"query",
")",
",",
"self",
".",
"linear_keys",
"(",
"query",
")",
",",
"self",
".",
"linear_values",
"(",
"query",
")",
"key",
"=",
"shape",
"(",
"key",
")",
"value",
"=",
"shape",
"(",
"value",
")",
"if",
"layer_cache",
"is",
"not",
"None",
":",
"device",
"=",
"key",
".",
"device",
"if",
"layer_cache",
"[",
"\"self_keys\"",
"]",
"is",
"not",
"None",
":",
"key",
"=",
"torch",
".",
"cat",
"(",
"(",
"layer_cache",
"[",
"\"self_keys\"",
"]",
".",
"to",
"(",
"device",
")",
",",
"key",
")",
",",
"dim",
"=",
"2",
")",
"if",
"layer_cache",
"[",
"\"self_values\"",
"]",
"is",
"not",
"None",
":",
"value",
"=",
"torch",
".",
"cat",
"(",
"(",
"layer_cache",
"[",
"\"self_values\"",
"]",
".",
"to",
"(",
"device",
")",
",",
"value",
")",
",",
"dim",
"=",
"2",
")",
"layer_cache",
"[",
"\"self_keys\"",
"]",
"=",
"key",
"layer_cache",
"[",
"\"self_values\"",
"]",
"=",
"value",
"elif",
"type",
"==",
"\"context\"",
":",
"query",
"=",
"self",
".",
"linear_query",
"(",
"query",
")",
"if",
"layer_cache",
"is",
"not",
"None",
":",
"if",
"layer_cache",
"[",
"\"memory_keys\"",
"]",
"is",
"None",
":",
"key",
",",
"value",
"=",
"self",
".",
"linear_keys",
"(",
"key",
")",
",",
"self",
".",
"linear_values",
"(",
"value",
")",
"key",
"=",
"shape",
"(",
"key",
")",
"value",
"=",
"shape",
"(",
"value",
")",
"else",
":",
"key",
",",
"value",
"=",
"layer_cache",
"[",
"\"memory_keys\"",
"]",
",",
"layer_cache",
"[",
"\"memory_values\"",
"]",
"layer_cache",
"[",
"\"memory_keys\"",
"]",
"=",
"key",
"layer_cache",
"[",
"\"memory_values\"",
"]",
"=",
"value",
"else",
":",
"key",
",",
"value",
"=",
"self",
".",
"linear_keys",
"(",
"key",
")",
",",
"self",
".",
"linear_values",
"(",
"value",
")",
"key",
"=",
"shape",
"(",
"key",
")",
"value",
"=",
"shape",
"(",
"value",
")",
"else",
":",
"key",
"=",
"self",
".",
"linear_keys",
"(",
"key",
")",
"value",
"=",
"self",
".",
"linear_values",
"(",
"value",
")",
"query",
"=",
"self",
".",
"linear_query",
"(",
"query",
")",
"key",
"=",
"shape",
"(",
"key",
")",
"value",
"=",
"shape",
"(",
"value",
")",
"query",
"=",
"shape",
"(",
"query",
")",
"key_len",
"=",
"key",
".",
"size",
"(",
"2",
")",
"query_len",
"=",
"query",
".",
"size",
"(",
"2",
")",
"# 2) Calculate and scale scores.",
"query",
"=",
"query",
"/",
"math",
".",
"sqrt",
"(",
"dim_per_head",
")",
"scores",
"=",
"torch",
".",
"matmul",
"(",
"query",
",",
"key",
".",
"transpose",
"(",
"2",
",",
"3",
")",
")",
"if",
"mask",
"is",
"not",
"None",
":",
"mask",
"=",
"mask",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand_as",
"(",
"scores",
")",
"scores",
"=",
"scores",
".",
"masked_fill",
"(",
"mask",
",",
"-",
"1e18",
")",
"# 3) Apply attention dropout and compute context vectors.",
"attn",
"=",
"self",
".",
"softmax",
"(",
"scores",
")",
"drop_attn",
"=",
"self",
".",
"dropout",
"(",
"attn",
")",
"context",
"=",
"unshape",
"(",
"torch",
".",
"matmul",
"(",
"drop_attn",
",",
"value",
")",
")",
"output",
"=",
"self",
".",
"final_linear",
"(",
"context",
")",
"# CHECK",
"# batch_, q_len_, d_ = output.size()",
"# aeq(q_len, q_len_)",
"# aeq(batch, batch_)",
"# aeq(d, d_)",
"# Return one attn",
"top_attn",
"=",
"attn",
".",
"view",
"(",
"batch_size",
",",
"head_count",
",",
"query_len",
",",
"key_len",
")",
"[",
":",
",",
"0",
",",
":",
",",
":",
"]",
".",
"contiguous",
"(",
")",
"return",
"output",
",",
"top_attn"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/multi_headed_attn.py#L69-L201 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/position_ffn.py | python | PositionwiseFeedForward.forward | (self, x) | return output + x | Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ] | Layer definition. | [
"Layer",
"definition",
"."
] | def forward(self, x):
"""
Layer definition.
Args:
input: [ batch_size, input_len, model_dim ]
Returns:
output: [ batch_size, input_len, model_dim ]
"""
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x | [
"def",
"forward",
"(",
"self",
",",
"x",
")",
":",
"inter",
"=",
"self",
".",
"dropout_1",
"(",
"self",
".",
"relu",
"(",
"self",
".",
"w_1",
"(",
"self",
".",
"layer_norm",
"(",
"x",
")",
")",
")",
")",
"output",
"=",
"self",
".",
"dropout_2",
"(",
"self",
".",
"w_2",
"(",
"inter",
")",
")",
"return",
"output",
"+",
"x"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/position_ffn.py#L29-L42 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/sparse_activations.py | python | threshold_and_support | (z, dim=0) | return tau_z, k_z | z: any dimension
dim: dimension along which to apply the sparsemax | z: any dimension
dim: dimension along which to apply the sparsemax | [
"z",
":",
"any",
"dimension",
"dim",
":",
"dimension",
"along",
"which",
"to",
"apply",
"the",
"sparsemax"
] | def threshold_and_support(z, dim=0):
"""
z: any dimension
dim: dimension along which to apply the sparsemax
"""
sorted_z, _ = torch.sort(z, descending=True, dim=dim)
z_sum = sorted_z.cumsum(dim) - 1 # sort of a misnomer
k = torch.arange(1, sorted_z.size(dim) + 1, device=z.device).float().view(
torch.Size([-1] + [1] * (z.dim() - 1))
).transpose(0, dim)
support = k * sorted_z > z_sum
k_z_indices = support.sum(dim=dim).unsqueeze(dim)
k_z = k_z_indices.float()
tau_z = z_sum.gather(dim, k_z_indices - 1) / k_z
return tau_z, k_z | [
"def",
"threshold_and_support",
"(",
"z",
",",
"dim",
"=",
"0",
")",
":",
"sorted_z",
",",
"_",
"=",
"torch",
".",
"sort",
"(",
"z",
",",
"descending",
"=",
"True",
",",
"dim",
"=",
"dim",
")",
"z_sum",
"=",
"sorted_z",
".",
"cumsum",
"(",
"dim",
")",
"-",
"1",
"# sort of a misnomer",
"k",
"=",
"torch",
".",
"arange",
"(",
"1",
",",
"sorted_z",
".",
"size",
"(",
"dim",
")",
"+",
"1",
",",
"device",
"=",
"z",
".",
"device",
")",
".",
"float",
"(",
")",
".",
"view",
"(",
"torch",
".",
"Size",
"(",
"[",
"-",
"1",
"]",
"+",
"[",
"1",
"]",
"*",
"(",
"z",
".",
"dim",
"(",
")",
"-",
"1",
")",
")",
")",
".",
"transpose",
"(",
"0",
",",
"dim",
")",
"support",
"=",
"k",
"*",
"sorted_z",
">",
"z_sum",
"k_z_indices",
"=",
"support",
".",
"sum",
"(",
"dim",
"=",
"dim",
")",
".",
"unsqueeze",
"(",
"dim",
")",
"k_z",
"=",
"k_z_indices",
".",
"float",
"(",
")",
"tau_z",
"=",
"z_sum",
".",
"gather",
"(",
"dim",
",",
"k_z_indices",
"-",
"1",
")",
"/",
"k_z",
"return",
"tau_z",
",",
"k_z"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/sparse_activations.py#L11-L26 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/sparse_activations.py | python | SparsemaxFunction.forward | (ctx, input, dim=0) | return output | input (FloatTensor): any shape
returns (FloatTensor): same shape with sparsemax computed on given dim | input (FloatTensor): any shape
returns (FloatTensor): same shape with sparsemax computed on given dim | [
"input",
"(",
"FloatTensor",
")",
":",
"any",
"shape",
"returns",
"(",
"FloatTensor",
")",
":",
"same",
"shape",
"with",
"sparsemax",
"computed",
"on",
"given",
"dim"
] | def forward(ctx, input, dim=0):
"""
input (FloatTensor): any shape
returns (FloatTensor): same shape with sparsemax computed on given dim
"""
ctx.dim = dim
tau_z, k_z = threshold_and_support(input, dim=dim)
output = torch.clamp(input - tau_z, min=0)
ctx.save_for_backward(k_z, output)
return output | [
"def",
"forward",
"(",
"ctx",
",",
"input",
",",
"dim",
"=",
"0",
")",
":",
"ctx",
".",
"dim",
"=",
"dim",
"tau_z",
",",
"k_z",
"=",
"threshold_and_support",
"(",
"input",
",",
"dim",
"=",
"dim",
")",
"output",
"=",
"torch",
".",
"clamp",
"(",
"input",
"-",
"tau_z",
",",
"min",
"=",
"0",
")",
"ctx",
".",
"save_for_backward",
"(",
"k_z",
",",
"output",
")",
"return",
"output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/sparse_activations.py#L32-L41 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/global_attention.py | python | GlobalAttention.mmr_score | (self, inputs, output) | :param inputs: inputs sentence matrix
:param output: output sentence (vector)
:return: scores of mmr | [] | def mmr_score(self, inputs, output):
'''
:param inputs: inputs sentence matrix
:param output: output sentence (vector)
:return: scores of mmr
''' | [
"def",
"mmr_score",
"(",
"self",
",",
"inputs",
",",
"output",
")",
":"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/global_attention.py#L99-L105 |
|||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/global_attention.py | python | GlobalAttention.score | (self, h_t, h_s) | Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]` | Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]` | [
"Args",
":",
"h_t",
"(",
"FloatTensor",
")",
":",
"sequence",
"of",
"queries",
"[",
"batch",
"x",
"tgt_len",
"x",
"dim",
"]",
"h_s",
"(",
"FloatTensor",
")",
":",
"sequence",
"of",
"sources",
"[",
"batch",
"x",
"src_len",
"x",
"dim",
"]"
] | def score(self, h_t, h_s):
"""
Args:
h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`
h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`
Returns:
:obj:`FloatTensor`:
raw attention scores (unnormalized) for each src index
`[batch x tgt_len x src_len]`
"""
# target length is 1 (tgt_len)
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
# print('tgt_len, src_len...', tgt_len, src_len) tgt_len=1, src_len is various
return torch.bmm(h_t, h_s_) # Performs a batch matrix-matrix product of matrices
else: # normal attention
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len) | [
"def",
"score",
"(",
"self",
",",
"h_t",
",",
"h_s",
")",
":",
"# target length is 1 (tgt_len)",
"# Check input sizes",
"src_batch",
",",
"src_len",
",",
"src_dim",
"=",
"h_s",
".",
"size",
"(",
")",
"tgt_batch",
",",
"tgt_len",
",",
"tgt_dim",
"=",
"h_t",
".",
"size",
"(",
")",
"aeq",
"(",
"src_batch",
",",
"tgt_batch",
")",
"aeq",
"(",
"src_dim",
",",
"tgt_dim",
")",
"aeq",
"(",
"self",
".",
"dim",
",",
"src_dim",
")",
"if",
"self",
".",
"attn_type",
"in",
"[",
"\"general\"",
",",
"\"dot\"",
"]",
":",
"if",
"self",
".",
"attn_type",
"==",
"\"general\"",
":",
"h_t_",
"=",
"h_t",
".",
"view",
"(",
"tgt_batch",
"*",
"tgt_len",
",",
"tgt_dim",
")",
"h_t_",
"=",
"self",
".",
"linear_in",
"(",
"h_t_",
")",
"h_t",
"=",
"h_t_",
".",
"view",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"tgt_dim",
")",
"h_s_",
"=",
"h_s",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)",
"# print('tgt_len, src_len...', tgt_len, src_len) tgt_len=1, src_len is various",
"return",
"torch",
".",
"bmm",
"(",
"h_t",
",",
"h_s_",
")",
"# Performs a batch matrix-matrix product of matrices",
"else",
":",
"# normal attention",
"dim",
"=",
"self",
".",
"dim",
"wq",
"=",
"self",
".",
"linear_query",
"(",
"h_t",
".",
"view",
"(",
"-",
"1",
",",
"dim",
")",
")",
"wq",
"=",
"wq",
".",
"view",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"1",
",",
"dim",
")",
"wq",
"=",
"wq",
".",
"expand",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"src_len",
",",
"dim",
")",
"uh",
"=",
"self",
".",
"linear_context",
"(",
"h_s",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"-",
"1",
",",
"dim",
")",
")",
"uh",
"=",
"uh",
".",
"view",
"(",
"src_batch",
",",
"1",
",",
"src_len",
",",
"dim",
")",
"uh",
"=",
"uh",
".",
"expand",
"(",
"src_batch",
",",
"tgt_len",
",",
"src_len",
",",
"dim",
")",
"# (batch, t_len, s_len, d)",
"wquh",
"=",
"torch",
".",
"tanh",
"(",
"wq",
"+",
"uh",
")",
"return",
"self",
".",
"v",
"(",
"wquh",
".",
"view",
"(",
"-",
"1",
",",
"dim",
")",
")",
".",
"view",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"src_len",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/global_attention.py#L109-L151 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/global_attention.py | python | GlobalAttention.forward | (self, source, memory_bank,memory_lengths=None, coverage=None) | return attn_h, align_vectors | Args:
source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`
memory_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]` | [] | def forward(self, source, memory_bank,memory_lengths=None, coverage=None):
"""
Args:
source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`
memory_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]`
"""
# print ('Source..',source.size())
# print ('memory_bank..',memory_bank.size())
# Source..torch.Size([16, 512])
# memory_bank..torch.Size([16, 400, 512])
# one step input
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
batch, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, source_l_ = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
memory_bank += self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
# compute attention scores, as in Luong et al.
align = self.score(source, memory_bank)
if memory_lengths is not None:
#????
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1) # Make it broadcastable.
align.masked_fill_(1 - mask, -float('inf'))
# Softmax or sparsemax to normalize attention weights
if self.attn_func == "softmax":
align_vectors = F.softmax(align.view(batch*target_l, source_l), -1)
else:
align_vectors = sparsemax(align.view(batch*target_l, source_l), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, memory_bank)
# concatenate
concat_c = torch.cat([c, source], 2).view(batch*target_l, dim*2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ["general", "dot"]:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, source_l_ = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
target_l_, batch_, dim_ = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
target_l_, batch_, source_l_ = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
# print ('Atten Hidden...',attn_h.size()) # torch.Size([16, 512])
# print ('Align...',align_vectors.size()) # torch.Size([16, 400])
return attn_h, align_vectors | [
"def",
"forward",
"(",
"self",
",",
"source",
",",
"memory_bank",
",",
"memory_lengths",
"=",
"None",
",",
"coverage",
"=",
"None",
")",
":",
"# print ('Source..',source.size())",
"# print ('memory_bank..',memory_bank.size())",
"# Source..torch.Size([16, 512])",
"# memory_bank..torch.Size([16, 400, 512])",
"# one step input",
"if",
"source",
".",
"dim",
"(",
")",
"==",
"2",
":",
"one_step",
"=",
"True",
"source",
"=",
"source",
".",
"unsqueeze",
"(",
"1",
")",
"else",
":",
"one_step",
"=",
"False",
"batch",
",",
"source_l",
",",
"dim",
"=",
"memory_bank",
".",
"size",
"(",
")",
"batch_",
",",
"target_l",
",",
"dim_",
"=",
"source",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"dim",
",",
"dim_",
")",
"aeq",
"(",
"self",
".",
"dim",
",",
"dim",
")",
"if",
"coverage",
"is",
"not",
"None",
":",
"batch_",
",",
"source_l_",
"=",
"coverage",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"source_l",
",",
"source_l_",
")",
"if",
"coverage",
"is",
"not",
"None",
":",
"cover",
"=",
"coverage",
".",
"view",
"(",
"-",
"1",
")",
".",
"unsqueeze",
"(",
"1",
")",
"memory_bank",
"+=",
"self",
".",
"linear_cover",
"(",
"cover",
")",
".",
"view_as",
"(",
"memory_bank",
")",
"memory_bank",
"=",
"torch",
".",
"tanh",
"(",
"memory_bank",
")",
"# compute attention scores, as in Luong et al.",
"align",
"=",
"self",
".",
"score",
"(",
"source",
",",
"memory_bank",
")",
"if",
"memory_lengths",
"is",
"not",
"None",
":",
"#????",
"mask",
"=",
"sequence_mask",
"(",
"memory_lengths",
",",
"max_len",
"=",
"align",
".",
"size",
"(",
"-",
"1",
")",
")",
"mask",
"=",
"mask",
".",
"unsqueeze",
"(",
"1",
")",
"# Make it broadcastable.",
"align",
".",
"masked_fill_",
"(",
"1",
"-",
"mask",
",",
"-",
"float",
"(",
"'inf'",
")",
")",
"# Softmax or sparsemax to normalize attention weights",
"if",
"self",
".",
"attn_func",
"==",
"\"softmax\"",
":",
"align_vectors",
"=",
"F",
".",
"softmax",
"(",
"align",
".",
"view",
"(",
"batch",
"*",
"target_l",
",",
"source_l",
")",
",",
"-",
"1",
")",
"else",
":",
"align_vectors",
"=",
"sparsemax",
"(",
"align",
".",
"view",
"(",
"batch",
"*",
"target_l",
",",
"source_l",
")",
",",
"-",
"1",
")",
"align_vectors",
"=",
"align_vectors",
".",
"view",
"(",
"batch",
",",
"target_l",
",",
"source_l",
")",
"# each context vector c_t is the weighted average",
"# over all the source hidden states",
"c",
"=",
"torch",
".",
"bmm",
"(",
"align_vectors",
",",
"memory_bank",
")",
"# concatenate",
"concat_c",
"=",
"torch",
".",
"cat",
"(",
"[",
"c",
",",
"source",
"]",
",",
"2",
")",
".",
"view",
"(",
"batch",
"*",
"target_l",
",",
"dim",
"*",
"2",
")",
"attn_h",
"=",
"self",
".",
"linear_out",
"(",
"concat_c",
")",
".",
"view",
"(",
"batch",
",",
"target_l",
",",
"dim",
")",
"if",
"self",
".",
"attn_type",
"in",
"[",
"\"general\"",
",",
"\"dot\"",
"]",
":",
"attn_h",
"=",
"torch",
".",
"tanh",
"(",
"attn_h",
")",
"if",
"one_step",
":",
"attn_h",
"=",
"attn_h",
".",
"squeeze",
"(",
"1",
")",
"align_vectors",
"=",
"align_vectors",
".",
"squeeze",
"(",
"1",
")",
"# Check output sizes",
"batch_",
",",
"dim_",
"=",
"attn_h",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"dim",
",",
"dim_",
")",
"batch_",
",",
"source_l_",
"=",
"align_vectors",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"source_l",
",",
"source_l_",
")",
"else",
":",
"attn_h",
"=",
"attn_h",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"align_vectors",
"=",
"align_vectors",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# Check output sizes",
"target_l_",
",",
"batch_",
",",
"dim_",
"=",
"attn_h",
".",
"size",
"(",
")",
"aeq",
"(",
"target_l",
",",
"target_l_",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"dim",
",",
"dim_",
")",
"target_l_",
",",
"batch_",
",",
"source_l_",
"=",
"align_vectors",
".",
"size",
"(",
")",
"aeq",
"(",
"target_l",
",",
"target_l_",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"source_l",
",",
"source_l_",
")",
"# print ('Atten Hidden...',attn_h.size()) # torch.Size([16, 512])",
"# print ('Align...',align_vectors.size()) # torch.Size([16, 400])",
"return",
"attn_h",
",",
"align_vectors"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/global_attention.py#L153-L250 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/gate.py | python | context_gate_factory | (gate_type, embeddings_size, decoder_size,
attention_size, output_size) | return gate_types[gate_type](embeddings_size, decoder_size, attention_size,
output_size) | Returns the correct ContextGate class | Returns the correct ContextGate class | [
"Returns",
"the",
"correct",
"ContextGate",
"class"
] | def context_gate_factory(gate_type, embeddings_size, decoder_size,
attention_size, output_size):
"""Returns the correct ContextGate class"""
gate_types = {'source': SourceContextGate,
'target': TargetContextGate,
'both': BothContextGate}
assert gate_type in gate_types, "Not valid ContextGate type: {0}".format(
gate_type)
return gate_types[gate_type](embeddings_size, decoder_size, attention_size,
output_size) | [
"def",
"context_gate_factory",
"(",
"gate_type",
",",
"embeddings_size",
",",
"decoder_size",
",",
"attention_size",
",",
"output_size",
")",
":",
"gate_types",
"=",
"{",
"'source'",
":",
"SourceContextGate",
",",
"'target'",
":",
"TargetContextGate",
",",
"'both'",
":",
"BothContextGate",
"}",
"assert",
"gate_type",
"in",
"gate_types",
",",
"\"Not valid ContextGate type: {0}\"",
".",
"format",
"(",
"gate_type",
")",
"return",
"gate_types",
"[",
"gate_type",
"]",
"(",
"embeddings_size",
",",
"decoder_size",
",",
"attention_size",
",",
"output_size",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/gate.py#L6-L17 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/average_attn.py | python | AverageAttention.cumulative_average_mask | (self, batch_size, inputs_len) | return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) | Builds the mask to compute the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(`FloatTensor`):
* A Tensor of shape `[batch_size x input_len x input_len]` | Builds the mask to compute the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Figure 3 | [
"Builds",
"the",
"mask",
"to",
"compute",
"the",
"cumulative",
"average",
"as",
"described",
"in",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1805",
".",
"00631",
"--",
"Figure",
"3"
] | def cumulative_average_mask(self, batch_size, inputs_len):
"""
Builds the mask to compute the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(`FloatTensor`):
* A Tensor of shape `[batch_size x input_len x input_len]`
"""
triangle = torch.tril(torch.ones(inputs_len, inputs_len))
weights = torch.ones(1, inputs_len) / torch.arange(
1, inputs_len + 1, dtype=torch.float)
mask = triangle * weights.transpose(0, 1)
return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len) | [
"def",
"cumulative_average_mask",
"(",
"self",
",",
"batch_size",
",",
"inputs_len",
")",
":",
"triangle",
"=",
"torch",
".",
"tril",
"(",
"torch",
".",
"ones",
"(",
"inputs_len",
",",
"inputs_len",
")",
")",
"weights",
"=",
"torch",
".",
"ones",
"(",
"1",
",",
"inputs_len",
")",
"/",
"torch",
".",
"arange",
"(",
"1",
",",
"inputs_len",
"+",
"1",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
"mask",
"=",
"triangle",
"*",
"weights",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"return",
"mask",
".",
"unsqueeze",
"(",
"0",
")",
".",
"expand",
"(",
"batch_size",
",",
"inputs_len",
",",
"inputs_len",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/average_attn.py#L31-L51 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/average_attn.py | python | AverageAttention.cumulative_average | (self, inputs, mask_or_step,
layer_cache=None, step=None) | Computes the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Equations (1) (5) (6)
Args:
inputs (`FloatTensor`): sequence to average
`[batch_size x input_len x dimension]`
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
cache: a dictionary containing the cumulative average
of the previous step. | Computes the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Equations (1) (5) (6) | [
"Computes",
"the",
"cumulative",
"average",
"as",
"described",
"in",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1805",
".",
"00631",
"--",
"Equations",
"(",
"1",
")",
"(",
"5",
")",
"(",
"6",
")"
] | def cumulative_average(self, inputs, mask_or_step,
layer_cache=None, step=None):
"""
Computes the cumulative average as described in
https://arxiv.org/abs/1805.00631 -- Equations (1) (5) (6)
Args:
inputs (`FloatTensor`): sequence to average
`[batch_size x input_len x dimension]`
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
cache: a dictionary containing the cumulative average
of the previous step.
"""
if layer_cache is not None:
step = mask_or_step
device = inputs.device
average_attention = (inputs + step *
layer_cache["prev_g"].to(device)) / (step + 1)
layer_cache["prev_g"] = average_attention
return average_attention
else:
mask = mask_or_step
return torch.matmul(mask, inputs) | [
"def",
"cumulative_average",
"(",
"self",
",",
"inputs",
",",
"mask_or_step",
",",
"layer_cache",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"if",
"layer_cache",
"is",
"not",
"None",
":",
"step",
"=",
"mask_or_step",
"device",
"=",
"inputs",
".",
"device",
"average_attention",
"=",
"(",
"inputs",
"+",
"step",
"*",
"layer_cache",
"[",
"\"prev_g\"",
"]",
".",
"to",
"(",
"device",
")",
")",
"/",
"(",
"step",
"+",
"1",
")",
"layer_cache",
"[",
"\"prev_g\"",
"]",
"=",
"average_attention",
"return",
"average_attention",
"else",
":",
"mask",
"=",
"mask_or_step",
"return",
"torch",
".",
"matmul",
"(",
"mask",
",",
"inputs",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/average_attn.py#L53-L78 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/average_attn.py | python | AverageAttention.forward | (self, inputs, mask=None, layer_cache=None, step=None) | return gating_outputs, average_outputs | Args:
inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`
Returns:
(`FloatTensor`, `FloatTensor`):
* gating_outputs `[batch_size x 1 x model_dim]`
* average_outputs average attention `[batch_size x 1 x model_dim]` | Args:
inputs (`FloatTensor`): `[batch_size x input_len x model_dim]` | [
"Args",
":",
"inputs",
"(",
"FloatTensor",
")",
":",
"[",
"batch_size",
"x",
"input_len",
"x",
"model_dim",
"]"
] | def forward(self, inputs, mask=None, layer_cache=None, step=None):
"""
Args:
inputs (`FloatTensor`): `[batch_size x input_len x model_dim]`
Returns:
(`FloatTensor`, `FloatTensor`):
* gating_outputs `[batch_size x 1 x model_dim]`
* average_outputs average attention `[batch_size x 1 x model_dim]`
"""
batch_size = inputs.size(0)
inputs_len = inputs.size(1)
device = inputs.device
average_outputs = self.cumulative_average(
inputs, self.cumulative_average_mask(batch_size,
inputs_len).to(device).float()
if layer_cache is None else step, layer_cache=layer_cache)
average_outputs = self.average_layer(average_outputs)
gating_outputs = self.gating_layer(torch.cat((inputs,
average_outputs), -1))
input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)
gating_outputs = torch.sigmoid(input_gate) * inputs + \
torch.sigmoid(forget_gate) * average_outputs
return gating_outputs, average_outputs | [
"def",
"forward",
"(",
"self",
",",
"inputs",
",",
"mask",
"=",
"None",
",",
"layer_cache",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"batch_size",
"=",
"inputs",
".",
"size",
"(",
"0",
")",
"inputs_len",
"=",
"inputs",
".",
"size",
"(",
"1",
")",
"device",
"=",
"inputs",
".",
"device",
"average_outputs",
"=",
"self",
".",
"cumulative_average",
"(",
"inputs",
",",
"self",
".",
"cumulative_average_mask",
"(",
"batch_size",
",",
"inputs_len",
")",
".",
"to",
"(",
"device",
")",
".",
"float",
"(",
")",
"if",
"layer_cache",
"is",
"None",
"else",
"step",
",",
"layer_cache",
"=",
"layer_cache",
")",
"average_outputs",
"=",
"self",
".",
"average_layer",
"(",
"average_outputs",
")",
"gating_outputs",
"=",
"self",
".",
"gating_layer",
"(",
"torch",
".",
"cat",
"(",
"(",
"inputs",
",",
"average_outputs",
")",
",",
"-",
"1",
")",
")",
"input_gate",
",",
"forget_gate",
"=",
"torch",
".",
"chunk",
"(",
"gating_outputs",
",",
"2",
",",
"dim",
"=",
"2",
")",
"gating_outputs",
"=",
"torch",
".",
"sigmoid",
"(",
"input_gate",
")",
"*",
"inputs",
"+",
"torch",
".",
"sigmoid",
"(",
"forget_gate",
")",
"*",
"average_outputs",
"return",
"gating_outputs",
",",
"average_outputs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/average_attn.py#L80-L106 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/copy_generator.py | python | CopyGenerator.forward | (self, hidden, attn, src_map) | return torch.cat([out_prob, copy_prob], 1) | Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by compying
source words.
Args:
hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`
attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`
src_map (`FloatTensor`):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
`[src_len, batch, extra_words]` | Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by compying
source words. | [
"Compute",
"a",
"distribution",
"over",
"the",
"target",
"dictionary",
"extended",
"by",
"the",
"dynamic",
"dictionary",
"implied",
"by",
"compying",
"source",
"words",
"."
] | def forward(self, hidden, attn, src_map):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by compying
source words.
Args:
hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`
attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`
src_map (`FloatTensor`):
A sparse indicator matrix mapping each source word to
its index in the "extended" vocab containing.
`[src_len, batch, extra_words]`
"""
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.tgt_dict.stoi[inputters.PAD_WORD]] = -float('inf')
prob = self.softmax(logits)
# Probability of copying p(z=1) batch.
p_copy = self.sigmoid(self.linear_copy(hidden))
# Probibility of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - p_copy.expand_as(prob))
mul_attn = torch.mul(attn, p_copy.expand_as(attn))
copy_prob = torch.bmm(mul_attn.view(-1, batch, slen)
.transpose(0, 1),
src_map.transpose(0, 1)).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1) | [
"def",
"forward",
"(",
"self",
",",
"hidden",
",",
"attn",
",",
"src_map",
")",
":",
"# CHECKS",
"batch_by_tlen",
",",
"_",
"=",
"hidden",
".",
"size",
"(",
")",
"batch_by_tlen_",
",",
"slen",
"=",
"attn",
".",
"size",
"(",
")",
"slen_",
",",
"batch",
",",
"cvocab",
"=",
"src_map",
".",
"size",
"(",
")",
"aeq",
"(",
"batch_by_tlen",
",",
"batch_by_tlen_",
")",
"aeq",
"(",
"slen",
",",
"slen_",
")",
"# Original probabilities.",
"logits",
"=",
"self",
".",
"linear",
"(",
"hidden",
")",
"logits",
"[",
":",
",",
"self",
".",
"tgt_dict",
".",
"stoi",
"[",
"inputters",
".",
"PAD_WORD",
"]",
"]",
"=",
"-",
"float",
"(",
"'inf'",
")",
"prob",
"=",
"self",
".",
"softmax",
"(",
"logits",
")",
"# Probability of copying p(z=1) batch.",
"p_copy",
"=",
"self",
".",
"sigmoid",
"(",
"self",
".",
"linear_copy",
"(",
"hidden",
")",
")",
"# Probibility of not copying: p_{word}(w) * (1 - p(z))",
"out_prob",
"=",
"torch",
".",
"mul",
"(",
"prob",
",",
"1",
"-",
"p_copy",
".",
"expand_as",
"(",
"prob",
")",
")",
"mul_attn",
"=",
"torch",
".",
"mul",
"(",
"attn",
",",
"p_copy",
".",
"expand_as",
"(",
"attn",
")",
")",
"copy_prob",
"=",
"torch",
".",
"bmm",
"(",
"mul_attn",
".",
"view",
"(",
"-",
"1",
",",
"batch",
",",
"slen",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"src_map",
".",
"transpose",
"(",
"0",
",",
"1",
")",
")",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"copy_prob",
"=",
"copy_prob",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"-",
"1",
",",
"cvocab",
")",
"return",
"torch",
".",
"cat",
"(",
"[",
"out_prob",
",",
"copy_prob",
"]",
",",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/copy_generator.py#L71-L106 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/copy_generator.py | python | CopyGeneratorLossCompute._make_shard_state | (self, batch, output, range_, attns) | return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
} | See base class for args description. | See base class for args description. | [
"See",
"base",
"class",
"for",
"args",
"description",
"."
] | def _make_shard_state(self, batch, output, range_, attns):
""" See base class for args description. """
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
} | [
"def",
"_make_shard_state",
"(",
"self",
",",
"batch",
",",
"output",
",",
"range_",
",",
"attns",
")",
":",
"if",
"getattr",
"(",
"batch",
",",
"\"alignment\"",
",",
"None",
")",
"is",
"None",
":",
"raise",
"AssertionError",
"(",
"\"using -copy_attn you need to pass in \"",
"\"-dynamic_dict during preprocess stage.\"",
")",
"return",
"{",
"\"output\"",
":",
"output",
",",
"\"target\"",
":",
"batch",
".",
"tgt",
"[",
"range_",
"[",
"0",
"]",
"+",
"1",
":",
"range_",
"[",
"1",
"]",
"]",
",",
"\"copy_attn\"",
":",
"attns",
".",
"get",
"(",
"\"copy\"",
")",
",",
"\"align\"",
":",
"batch",
".",
"alignment",
"[",
"range_",
"[",
"0",
"]",
"+",
"1",
":",
"range_",
"[",
"1",
"]",
"]",
"}"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/copy_generator.py#L163-L174 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/copy_generator.py | python | CopyGeneratorLossCompute._compute_loss | (self, batch, output, target, copy_attn, align) | return loss, stats | Compute the loss. The args must match self._make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info. | Compute the loss. The args must match self._make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info. | [
"Compute",
"the",
"loss",
".",
"The",
"args",
"must",
"match",
"self",
".",
"_make_shard_state",
"()",
".",
"Args",
":",
"batch",
":",
"the",
"current",
"batch",
".",
"output",
":",
"the",
"predict",
"output",
"from",
"the",
"model",
".",
"target",
":",
"the",
"validate",
"target",
"to",
"compare",
"output",
"with",
".",
"copy_attn",
":",
"the",
"copy",
"attention",
"value",
".",
"align",
":",
"the",
"align",
"info",
"."
] | def _compute_loss(self, batch, output, target, copy_attn, align):
"""
Compute the loss. The args must match self._make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(self._bottle(output),
self._bottle(copy_attn),
batch.src_map)
loss = self.criterion(scores, align, target)
scores_data = scores.data.clone()
scores_data = inputters.TextDataset.collapse_copy_scores(
self._unbottle(scores_data, batch.batch_size),
batch, self.tgt_vocab, batch.dataset.src_vocabs)
scores_data = self._bottle(scores_data)
# Correct target copy token instead of <unk>
# tgt[i] = align[i] + len(tgt_vocab)
# for i such that tgt[i] == 0 and align[i] != 0
target_data = target.data.clone()
correct_mask = target_data.eq(0) * align.data.ne(0)
correct_copy = (align.data + len(self.tgt_vocab)) * correct_mask.long()
target_data = target_data + correct_copy
# Compute sum of perplexities for stats
loss_data = loss.sum().data.clone()
stats = self._stats(loss_data, scores_data, target_data)
if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
# Compute Sequence Lengths
pad_ix = batch.dataset.fields['tgt'].vocab.stoi[inputters.PAD_WORD]
tgt_lens = batch.tgt.ne(pad_ix).float().sum(0)
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:
loss = loss.sum()
return loss, stats | [
"def",
"_compute_loss",
"(",
"self",
",",
"batch",
",",
"output",
",",
"target",
",",
"copy_attn",
",",
"align",
")",
":",
"target",
"=",
"target",
".",
"view",
"(",
"-",
"1",
")",
"align",
"=",
"align",
".",
"view",
"(",
"-",
"1",
")",
"scores",
"=",
"self",
".",
"generator",
"(",
"self",
".",
"_bottle",
"(",
"output",
")",
",",
"self",
".",
"_bottle",
"(",
"copy_attn",
")",
",",
"batch",
".",
"src_map",
")",
"loss",
"=",
"self",
".",
"criterion",
"(",
"scores",
",",
"align",
",",
"target",
")",
"scores_data",
"=",
"scores",
".",
"data",
".",
"clone",
"(",
")",
"scores_data",
"=",
"inputters",
".",
"TextDataset",
".",
"collapse_copy_scores",
"(",
"self",
".",
"_unbottle",
"(",
"scores_data",
",",
"batch",
".",
"batch_size",
")",
",",
"batch",
",",
"self",
".",
"tgt_vocab",
",",
"batch",
".",
"dataset",
".",
"src_vocabs",
")",
"scores_data",
"=",
"self",
".",
"_bottle",
"(",
"scores_data",
")",
"# Correct target copy token instead of <unk>",
"# tgt[i] = align[i] + len(tgt_vocab)",
"# for i such that tgt[i] == 0 and align[i] != 0",
"target_data",
"=",
"target",
".",
"data",
".",
"clone",
"(",
")",
"correct_mask",
"=",
"target_data",
".",
"eq",
"(",
"0",
")",
"*",
"align",
".",
"data",
".",
"ne",
"(",
"0",
")",
"correct_copy",
"=",
"(",
"align",
".",
"data",
"+",
"len",
"(",
"self",
".",
"tgt_vocab",
")",
")",
"*",
"correct_mask",
".",
"long",
"(",
")",
"target_data",
"=",
"target_data",
"+",
"correct_copy",
"# Compute sum of perplexities for stats",
"loss_data",
"=",
"loss",
".",
"sum",
"(",
")",
".",
"data",
".",
"clone",
"(",
")",
"stats",
"=",
"self",
".",
"_stats",
"(",
"loss_data",
",",
"scores_data",
",",
"target_data",
")",
"if",
"self",
".",
"normalize_by_length",
":",
"# Compute Loss as NLL divided by seq length",
"# Compute Sequence Lengths",
"pad_ix",
"=",
"batch",
".",
"dataset",
".",
"fields",
"[",
"'tgt'",
"]",
".",
"vocab",
".",
"stoi",
"[",
"inputters",
".",
"PAD_WORD",
"]",
"tgt_lens",
"=",
"batch",
".",
"tgt",
".",
"ne",
"(",
"pad_ix",
")",
".",
"float",
"(",
")",
".",
"sum",
"(",
"0",
")",
"# Compute Total Loss per sequence in batch",
"loss",
"=",
"loss",
".",
"view",
"(",
"-",
"1",
",",
"batch",
".",
"batch_size",
")",
".",
"sum",
"(",
"0",
")",
"# Divide by length of each sequence and sum",
"loss",
"=",
"torch",
".",
"div",
"(",
"loss",
",",
"tgt_lens",
")",
".",
"sum",
"(",
")",
"else",
":",
"loss",
"=",
"loss",
".",
"sum",
"(",
")",
"return",
"loss",
",",
"stats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/copy_generator.py#L176-L222 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/embeddings.py | python | Embeddings.word_lut | (self) | return self.make_embedding[0][0] | word look-up table | word look-up table | [
"word",
"look",
"-",
"up",
"table"
] | def word_lut(self):
""" word look-up table """
return self.make_embedding[0][0] | [
"def",
"word_lut",
"(",
"self",
")",
":",
"return",
"self",
".",
"make_embedding",
"[",
"0",
"]",
"[",
"0",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/embeddings.py#L160-L162 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/embeddings.py | python | Embeddings.emb_luts | (self) | return self.make_embedding[0] | embedding look-up table | embedding look-up table | [
"embedding",
"look",
"-",
"up",
"table"
] | def emb_luts(self):
""" embedding look-up table """
return self.make_embedding[0] | [
"def",
"emb_luts",
"(",
"self",
")",
":",
"return",
"self",
".",
"make_embedding",
"[",
"0",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/embeddings.py#L165-L167 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/embeddings.py | python | Embeddings.load_pretrained_vectors | (self, emb_file, fixed) | Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
fixed (bool) : if true, embeddings are not updated | Load in pretrained embeddings. | [
"Load",
"in",
"pretrained",
"embeddings",
"."
] | def load_pretrained_vectors(self, emb_file, fixed):
"""Load in pretrained embeddings.
Args:
emb_file (str) : path to torch serialized embeddings
fixed (bool) : if true, embeddings are not updated
"""
if emb_file:
pretrained = torch.load(emb_file)
pretrained_vec_size = pretrained.size(1)
if self.word_vec_size > pretrained_vec_size:
self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained
elif self.word_vec_size < pretrained_vec_size:
self.word_lut.weight.data \
.copy_(pretrained[:, :self.word_vec_size])
else:
self.word_lut.weight.data.copy_(pretrained)
if fixed:
self.word_lut.weight.requires_grad = False | [
"def",
"load_pretrained_vectors",
"(",
"self",
",",
"emb_file",
",",
"fixed",
")",
":",
"if",
"emb_file",
":",
"pretrained",
"=",
"torch",
".",
"load",
"(",
"emb_file",
")",
"pretrained_vec_size",
"=",
"pretrained",
".",
"size",
"(",
"1",
")",
"if",
"self",
".",
"word_vec_size",
">",
"pretrained_vec_size",
":",
"self",
".",
"word_lut",
".",
"weight",
".",
"data",
"[",
":",
",",
":",
"pretrained_vec_size",
"]",
"=",
"pretrained",
"elif",
"self",
".",
"word_vec_size",
"<",
"pretrained_vec_size",
":",
"self",
".",
"word_lut",
".",
"weight",
".",
"data",
".",
"copy_",
"(",
"pretrained",
"[",
":",
",",
":",
"self",
".",
"word_vec_size",
"]",
")",
"else",
":",
"self",
".",
"word_lut",
".",
"weight",
".",
"data",
".",
"copy_",
"(",
"pretrained",
")",
"if",
"fixed",
":",
"self",
".",
"word_lut",
".",
"weight",
".",
"requires_grad",
"=",
"False"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/embeddings.py#L169-L187 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/embeddings.py | python | Embeddings.forward | (self, source, step=None) | return source | Computes the embeddings for words and features.
Args:
source (`LongTensor`): index tensor `[len x batch x nfeat]`
Return:
`FloatTensor`: word embeddings `[len x batch x embedding_size]` | Computes the embeddings for words and features. | [
"Computes",
"the",
"embeddings",
"for",
"words",
"and",
"features",
"."
] | def forward(self, source, step=None):
"""
Computes the embeddings for words and features.
Args:
source (`LongTensor`): index tensor `[len x batch x nfeat]`
Return:
`FloatTensor`: word embeddings `[len x batch x embedding_size]`
"""
if self.position_encoding:
for i, module in enumerate(self.make_embedding._modules.values()):
if i == len(self.make_embedding._modules.values()) - 1:
source = module(source, step=step)
else:
source = module(source)
else:
source = self.make_embedding(source)
return source | [
"def",
"forward",
"(",
"self",
",",
"source",
",",
"step",
"=",
"None",
")",
":",
"if",
"self",
".",
"position_encoding",
":",
"for",
"i",
",",
"module",
"in",
"enumerate",
"(",
"self",
".",
"make_embedding",
".",
"_modules",
".",
"values",
"(",
")",
")",
":",
"if",
"i",
"==",
"len",
"(",
"self",
".",
"make_embedding",
".",
"_modules",
".",
"values",
"(",
")",
")",
"-",
"1",
":",
"source",
"=",
"module",
"(",
"source",
",",
"step",
"=",
"step",
")",
"else",
":",
"source",
"=",
"module",
"(",
"source",
")",
"else",
":",
"source",
"=",
"self",
".",
"make_embedding",
"(",
"source",
")",
"return",
"source"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/embeddings.py#L189-L207 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/sparse_losses.py | python | SparsemaxLossFunction.forward | (ctx, input, target) | return torch.clamp(x / 2 - z_k + 0.5, min=0.0) | input (FloatTensor): n x num_classes
target (LongTensor): n, the indices of the target classes | input (FloatTensor): n x num_classes
target (LongTensor): n, the indices of the target classes | [
"input",
"(",
"FloatTensor",
")",
":",
"n",
"x",
"num_classes",
"target",
"(",
"LongTensor",
")",
":",
"n",
"the",
"indices",
"of",
"the",
"target",
"classes"
] | def forward(ctx, input, target):
"""
input (FloatTensor): n x num_classes
target (LongTensor): n, the indices of the target classes
"""
input_batch, classes = input.size()
target_batch = target.size(0)
aeq(input_batch, target_batch)
z_k = input.gather(1, target.unsqueeze(1)).squeeze()
tau_z, support_size = threshold_and_support(input, dim=1)
support = input > tau_z
x = torch.where(
support, input**2 - tau_z**2,
torch.tensor(0.0, device=input.device)
).sum(dim=1)
ctx.save_for_backward(input, target, tau_z)
# clamping necessary because of numerical errors: loss should be lower
# bounded by zero, but negative values near zero are possible without
# the clamp
return torch.clamp(x / 2 - z_k + 0.5, min=0.0) | [
"def",
"forward",
"(",
"ctx",
",",
"input",
",",
"target",
")",
":",
"input_batch",
",",
"classes",
"=",
"input",
".",
"size",
"(",
")",
"target_batch",
"=",
"target",
".",
"size",
"(",
"0",
")",
"aeq",
"(",
"input_batch",
",",
"target_batch",
")",
"z_k",
"=",
"input",
".",
"gather",
"(",
"1",
",",
"target",
".",
"unsqueeze",
"(",
"1",
")",
")",
".",
"squeeze",
"(",
")",
"tau_z",
",",
"support_size",
"=",
"threshold_and_support",
"(",
"input",
",",
"dim",
"=",
"1",
")",
"support",
"=",
"input",
">",
"tau_z",
"x",
"=",
"torch",
".",
"where",
"(",
"support",
",",
"input",
"**",
"2",
"-",
"tau_z",
"**",
"2",
",",
"torch",
".",
"tensor",
"(",
"0.0",
",",
"device",
"=",
"input",
".",
"device",
")",
")",
".",
"sum",
"(",
"dim",
"=",
"1",
")",
"ctx",
".",
"save_for_backward",
"(",
"input",
",",
"target",
",",
"tau_z",
")",
"# clamping necessary because of numerical errors: loss should be lower",
"# bounded by zero, but negative values near zero are possible without",
"# the clamp",
"return",
"torch",
".",
"clamp",
"(",
"x",
"/",
"2",
"-",
"z_k",
"+",
"0.5",
",",
"min",
"=",
"0.0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/sparse_losses.py#L11-L31 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/conv_multi_step_attention.py | python | seq_linear | (linear, x) | return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2) | linear transform for 3-d tensor | linear transform for 3-d tensor | [
"linear",
"transform",
"for",
"3",
"-",
"d",
"tensor"
] | def seq_linear(linear, x):
""" linear transform for 3-d tensor """
batch, hidden_size, length, _ = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view(
batch * length, hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2) | [
"def",
"seq_linear",
"(",
"linear",
",",
"x",
")",
":",
"batch",
",",
"hidden_size",
",",
"length",
",",
"_",
"=",
"x",
".",
"size",
"(",
")",
"h",
"=",
"linear",
"(",
"torch",
".",
"transpose",
"(",
"x",
",",
"1",
",",
"2",
")",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"batch",
"*",
"length",
",",
"hidden_size",
")",
")",
"return",
"torch",
".",
"transpose",
"(",
"h",
".",
"view",
"(",
"batch",
",",
"length",
",",
"hidden_size",
",",
"1",
")",
",",
"1",
",",
"2",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/conv_multi_step_attention.py#L11-L16 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/conv_multi_step_attention.py | python | ConvMultiStepAttention.apply_mask | (self, mask) | Apply mask | Apply mask | [
"Apply",
"mask"
] | def apply_mask(self, mask):
""" Apply mask """
self.mask = mask | [
"def",
"apply_mask",
"(",
"self",
",",
"mask",
")",
":",
"self",
".",
"mask",
"=",
"mask"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/conv_multi_step_attention.py#L34-L36 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/conv_multi_step_attention.py | python | ConvMultiStepAttention.forward | (self, base_target_emb, input_from_dec, encoder_out_top,
encoder_out_combine) | return context_output, attn | Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode | Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode | [
"Args",
":",
"base_target_emb",
":",
"target",
"emb",
"tensor",
"input",
":",
"output",
"of",
"decode",
"conv",
"encoder_out_t",
":",
"the",
"key",
"matrix",
"for",
"calculation",
"of",
"attetion",
"weight",
"which",
"is",
"the",
"top",
"output",
"of",
"encode",
"conv",
"encoder_out_combine",
":",
"the",
"value",
"matrix",
"for",
"the",
"attention",
"-",
"weighted",
"sum",
"which",
"is",
"the",
"combination",
"of",
"base",
"emb",
"and",
"top",
"output",
"of",
"encode"
] | def forward(self, base_target_emb, input_from_dec, encoder_out_top,
encoder_out_combine):
"""
Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode
"""
# checks
# batch, channel, height, width = base_target_emb.size()
batch, _, height, _ = base_target_emb.size()
# batch_, channel_, height_, width_ = input_from_dec.size()
batch_, _, height_, _ = input_from_dec.size()
aeq(batch, batch_)
aeq(height, height_)
# enc_batch, enc_channel, enc_height = encoder_out_top.size()
enc_batch, _, enc_height = encoder_out_top.size()
# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()
enc_batch_, _, enc_height_ = encoder_out_combine.size()
aeq(enc_batch, enc_batch_)
aeq(enc_height, enc_height_)
preatt = seq_linear(self.linear_in, input_from_dec)
target = (base_target_emb + preatt) * SCALE_WEIGHT
target = torch.squeeze(target, 3)
target = torch.transpose(target, 1, 2)
pre_attn = torch.bmm(target, encoder_out_top)
if self.mask is not None:
pre_attn.data.masked_fill_(self.mask, -float('inf'))
pre_attn = pre_attn.transpose(0, 2)
attn = F.softmax(pre_attn, dim=-1)
attn = attn.transpose(0, 2).contiguous()
context_output = torch.bmm(
attn, torch.transpose(encoder_out_combine, 1, 2))
context_output = torch.transpose(
torch.unsqueeze(context_output, 3), 1, 2)
return context_output, attn | [
"def",
"forward",
"(",
"self",
",",
"base_target_emb",
",",
"input_from_dec",
",",
"encoder_out_top",
",",
"encoder_out_combine",
")",
":",
"# checks",
"# batch, channel, height, width = base_target_emb.size()",
"batch",
",",
"_",
",",
"height",
",",
"_",
"=",
"base_target_emb",
".",
"size",
"(",
")",
"# batch_, channel_, height_, width_ = input_from_dec.size()",
"batch_",
",",
"_",
",",
"height_",
",",
"_",
"=",
"input_from_dec",
".",
"size",
"(",
")",
"aeq",
"(",
"batch",
",",
"batch_",
")",
"aeq",
"(",
"height",
",",
"height_",
")",
"# enc_batch, enc_channel, enc_height = encoder_out_top.size()",
"enc_batch",
",",
"_",
",",
"enc_height",
"=",
"encoder_out_top",
".",
"size",
"(",
")",
"# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()",
"enc_batch_",
",",
"_",
",",
"enc_height_",
"=",
"encoder_out_combine",
".",
"size",
"(",
")",
"aeq",
"(",
"enc_batch",
",",
"enc_batch_",
")",
"aeq",
"(",
"enc_height",
",",
"enc_height_",
")",
"preatt",
"=",
"seq_linear",
"(",
"self",
".",
"linear_in",
",",
"input_from_dec",
")",
"target",
"=",
"(",
"base_target_emb",
"+",
"preatt",
")",
"*",
"SCALE_WEIGHT",
"target",
"=",
"torch",
".",
"squeeze",
"(",
"target",
",",
"3",
")",
"target",
"=",
"torch",
".",
"transpose",
"(",
"target",
",",
"1",
",",
"2",
")",
"pre_attn",
"=",
"torch",
".",
"bmm",
"(",
"target",
",",
"encoder_out_top",
")",
"if",
"self",
".",
"mask",
"is",
"not",
"None",
":",
"pre_attn",
".",
"data",
".",
"masked_fill_",
"(",
"self",
".",
"mask",
",",
"-",
"float",
"(",
"'inf'",
")",
")",
"pre_attn",
"=",
"pre_attn",
".",
"transpose",
"(",
"0",
",",
"2",
")",
"attn",
"=",
"F",
".",
"softmax",
"(",
"pre_attn",
",",
"dim",
"=",
"-",
"1",
")",
"attn",
"=",
"attn",
".",
"transpose",
"(",
"0",
",",
"2",
")",
".",
"contiguous",
"(",
")",
"context_output",
"=",
"torch",
".",
"bmm",
"(",
"attn",
",",
"torch",
".",
"transpose",
"(",
"encoder_out_combine",
",",
"1",
",",
"2",
")",
")",
"context_output",
"=",
"torch",
".",
"transpose",
"(",
"torch",
".",
"unsqueeze",
"(",
"context_output",
",",
"3",
")",
",",
"1",
",",
"2",
")",
"return",
"context_output",
",",
"attn"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/conv_multi_step_attention.py#L38-L83 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/weight_norm.py | python | get_var_maybe_avg | (namespace, var_name, training, polyak_decay) | utility for retrieving polyak averaged params
Update average | utility for retrieving polyak averaged params
Update average | [
"utility",
"for",
"retrieving",
"polyak",
"averaged",
"params",
"Update",
"average"
] | def get_var_maybe_avg(namespace, var_name, training, polyak_decay):
""" utility for retrieving polyak averaged params
Update average
"""
v = getattr(namespace, var_name)
v_avg = getattr(namespace, var_name + '_avg')
v_avg -= (1 - polyak_decay) * (v_avg - v.data)
if training:
return v
else:
return v_avg | [
"def",
"get_var_maybe_avg",
"(",
"namespace",
",",
"var_name",
",",
"training",
",",
"polyak_decay",
")",
":",
"v",
"=",
"getattr",
"(",
"namespace",
",",
"var_name",
")",
"v_avg",
"=",
"getattr",
"(",
"namespace",
",",
"var_name",
"+",
"'_avg'",
")",
"v_avg",
"-=",
"(",
"1",
"-",
"polyak_decay",
")",
"*",
"(",
"v_avg",
"-",
"v",
".",
"data",
")",
"if",
"training",
":",
"return",
"v",
"else",
":",
"return",
"v_avg"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/weight_norm.py#L8-L19 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/modules/weight_norm.py | python | get_vars_maybe_avg | (namespace, var_names, training, polyak_decay) | return vars | utility for retrieving polyak averaged params | utility for retrieving polyak averaged params | [
"utility",
"for",
"retrieving",
"polyak",
"averaged",
"params"
] | def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
""" utility for retrieving polyak averaged params """
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(
namespace, vn, training, polyak_decay))
return vars | [
"def",
"get_vars_maybe_avg",
"(",
"namespace",
",",
"var_names",
",",
"training",
",",
"polyak_decay",
")",
":",
"vars",
"=",
"[",
"]",
"for",
"vn",
"in",
"var_names",
":",
"vars",
".",
"append",
"(",
"get_var_maybe_avg",
"(",
"namespace",
",",
"vn",
",",
"training",
",",
"polyak_decay",
")",
")",
"return",
"vars"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/modules/weight_norm.py#L22-L28 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/dataset_base.py | python | DatasetBase.__reduce_ex__ | (self, proto) | return super(DatasetBase, self).__reduce_ex__() | This is a hack. Something is broken with torch pickle. | This is a hack. Something is broken with torch pickle. | [
"This",
"is",
"a",
"hack",
".",
"Something",
"is",
"broken",
"with",
"torch",
"pickle",
"."
] | def __reduce_ex__(self, proto):
"This is a hack. Something is broken with torch pickle."
return super(DatasetBase, self).__reduce_ex__() | [
"def",
"__reduce_ex__",
"(",
"self",
",",
"proto",
")",
":",
"return",
"super",
"(",
"DatasetBase",
",",
"self",
")",
".",
"__reduce_ex__",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/dataset_base.py#L38-L40 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/dataset_base.py | python | DatasetBase.load_fields | (self, vocab_dict) | Load fields from vocab.pt, and set the `fields` attribute.
Args:
vocab_dict (dict): a dict of loaded vocab from vocab.pt file. | Load fields from vocab.pt, and set the `fields` attribute. | [
"Load",
"fields",
"from",
"vocab",
".",
"pt",
"and",
"set",
"the",
"fields",
"attribute",
"."
] | def load_fields(self, vocab_dict):
""" Load fields from vocab.pt, and set the `fields` attribute.
Args:
vocab_dict (dict): a dict of loaded vocab from vocab.pt file.
"""
fields = onmt.inputters.inputter.load_fields_from_vocab(
vocab_dict.items(), self.data_type)
self.fields = dict([(k, f) for (k, f) in fields.items()
if k in self.examples[0].__dict__]) | [
"def",
"load_fields",
"(",
"self",
",",
"vocab_dict",
")",
":",
"fields",
"=",
"onmt",
".",
"inputters",
".",
"inputter",
".",
"load_fields_from_vocab",
"(",
"vocab_dict",
".",
"items",
"(",
")",
",",
"self",
".",
"data_type",
")",
"self",
".",
"fields",
"=",
"dict",
"(",
"[",
"(",
"k",
",",
"f",
")",
"for",
"(",
"k",
",",
"f",
")",
"in",
"fields",
".",
"items",
"(",
")",
"if",
"k",
"in",
"self",
".",
"examples",
"[",
"0",
"]",
".",
"__dict__",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/dataset_base.py#L42-L51 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/dataset_base.py | python | DatasetBase.extract_text_features | (tokens) | return tuple(words), features, n_feats - 1 | Args:
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features. | Args:
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features. | [
"Args",
":",
"tokens",
":",
"A",
"list",
"of",
"tokens",
"where",
"each",
"token",
"consists",
"of",
"a",
"word",
"optionally",
"followed",
"by",
"u",
"│",
"-",
"delimited",
"features",
".",
"Returns",
":",
"A",
"sequence",
"of",
"words",
"a",
"sequence",
"of",
"features",
"and",
"num",
"of",
"features",
"."
] | def extract_text_features(tokens):
"""
Args:
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
Returns:
A sequence of words, a sequence of features, and num of features.
"""
if not tokens:
return [], [], -1
specials = [PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD]
words = []
features = []
n_feats = None
#TODO We stop here
for token in tokens:
split_token = token.split(u"│")
assert all([special != split_token[0] for special in specials]), \
"Dataset cannot contain Special Tokens"
if split_token[0]:
words += [split_token[0]]
features += [split_token[1:]]
if n_feats is None:
n_feats = len(split_token)
else:
assert len(split_token) == n_feats, \
"all words must have the same number of features"
features = list(zip(*features))
return tuple(words), features, n_feats - 1 | [
"def",
"extract_text_features",
"(",
"tokens",
")",
":",
"if",
"not",
"tokens",
":",
"return",
"[",
"]",
",",
"[",
"]",
",",
"-",
"1",
"specials",
"=",
"[",
"PAD_WORD",
",",
"UNK_WORD",
",",
"BOS_WORD",
",",
"EOS_WORD",
"]",
"words",
"=",
"[",
"]",
"features",
"=",
"[",
"]",
"n_feats",
"=",
"None",
"#TODO We stop here",
"for",
"token",
"in",
"tokens",
":",
"split_token",
"=",
"token",
".",
"split",
"(",
"u\"│\")",
"",
"assert",
"all",
"(",
"[",
"special",
"!=",
"split_token",
"[",
"0",
"]",
"for",
"special",
"in",
"specials",
"]",
")",
",",
"\"Dataset cannot contain Special Tokens\"",
"if",
"split_token",
"[",
"0",
"]",
":",
"words",
"+=",
"[",
"split_token",
"[",
"0",
"]",
"]",
"features",
"+=",
"[",
"split_token",
"[",
"1",
":",
"]",
"]",
"if",
"n_feats",
"is",
"None",
":",
"n_feats",
"=",
"len",
"(",
"split_token",
")",
"else",
":",
"assert",
"len",
"(",
"split_token",
")",
"==",
"n_feats",
",",
"\"all words must have the same number of features\"",
"features",
"=",
"list",
"(",
"zip",
"(",
"*",
"features",
")",
")",
"return",
"tuple",
"(",
"words",
")",
",",
"features",
",",
"n_feats",
"-",
"1"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/dataset_base.py#L54-L87 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/dataset_base.py | python | DatasetBase._join_dicts | (self, *args) | return dict(chain(*[d.items() for d in args])) | Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys. | Args:
dictionaries with disjoint keys. | [
"Args",
":",
"dictionaries",
"with",
"disjoint",
"keys",
"."
] | def _join_dicts(self, *args):
"""
Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys.
"""
return dict(chain(*[d.items() for d in args])) | [
"def",
"_join_dicts",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"dict",
"(",
"chain",
"(",
"*",
"[",
"d",
".",
"items",
"(",
")",
"for",
"d",
"in",
"args",
"]",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/dataset_base.py#L91-L99 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/dataset_base.py | python | DatasetBase._peek | (self, seq) | return first, chain([first], seq) | Args:
seq: an iterator.
Returns:
the first thing returned by calling next() on the iterator
and an iterator created by re-chaining that value to the beginning
of the iterator. | Args:
seq: an iterator. | [
"Args",
":",
"seq",
":",
"an",
"iterator",
"."
] | def _peek(self, seq):
"""
Args:
seq: an iterator.
Returns:
the first thing returned by calling next() on the iterator
and an iterator created by re-chaining that value to the beginning
of the iterator.
"""
first = next(seq)
return first, chain([first], seq) | [
"def",
"_peek",
"(",
"self",
",",
"seq",
")",
":",
"first",
"=",
"next",
"(",
"seq",
")",
"return",
"first",
",",
"chain",
"(",
"[",
"first",
"]",
",",
"seq",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/dataset_base.py#L101-L112 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/dataset_base.py | python | DatasetBase._construct_example_fromlist | (self, data, fields) | return ex | Args:
data: the data to be set as the value of the attributes of
the to-be-created `Example`, associating with respective
`Field` objects with same key.
fields: a dict of `torchtext.data.Field` objects. The keys
are attributes of the to-be-created `Example`.
Returns:
the created `Example` object. | Args:
data: the data to be set as the value of the attributes of
the to-be-created `Example`, associating with respective
`Field` objects with same key.
fields: a dict of `torchtext.data.Field` objects. The keys
are attributes of the to-be-created `Example`. | [
"Args",
":",
"data",
":",
"the",
"data",
"to",
"be",
"set",
"as",
"the",
"value",
"of",
"the",
"attributes",
"of",
"the",
"to",
"-",
"be",
"-",
"created",
"Example",
"associating",
"with",
"respective",
"Field",
"objects",
"with",
"same",
"key",
".",
"fields",
":",
"a",
"dict",
"of",
"torchtext",
".",
"data",
".",
"Field",
"objects",
".",
"The",
"keys",
"are",
"attributes",
"of",
"the",
"to",
"-",
"be",
"-",
"created",
"Example",
"."
] | def _construct_example_fromlist(self, data, fields):
"""
Args:
data: the data to be set as the value of the attributes of
the to-be-created `Example`, associating with respective
`Field` objects with same key.
fields: a dict of `torchtext.data.Field` objects. The keys
are attributes of the to-be-created `Example`.
Returns:
the created `Example` object.
"""
ex = torchtext.data.Example()
# import pdb;pdb.set_trace()
for (name, field), val in zip(fields, data):
if field is not None:
setattr(ex, name, field.preprocess(val))
else:
setattr(ex, name, val)
return ex | [
"def",
"_construct_example_fromlist",
"(",
"self",
",",
"data",
",",
"fields",
")",
":",
"ex",
"=",
"torchtext",
".",
"data",
".",
"Example",
"(",
")",
"# import pdb;pdb.set_trace()",
"for",
"(",
"name",
",",
"field",
")",
",",
"val",
"in",
"zip",
"(",
"fields",
",",
"data",
")",
":",
"if",
"field",
"is",
"not",
"None",
":",
"setattr",
"(",
"ex",
",",
"name",
",",
"field",
".",
"preprocess",
"(",
"val",
")",
")",
"else",
":",
"setattr",
"(",
"ex",
",",
"name",
",",
"val",
")",
"return",
"ex"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/dataset_base.py#L114-L133 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | TextDataset.sort_key | (self, ex) | return len(ex.src) | Sort using length of source sentences. | Sort using length of source sentences. | [
"Sort",
"using",
"length",
"of",
"source",
"sentences",
"."
] | def sort_key(self, ex):
""" Sort using length of source sentences. """
# Default to a balanced sort, prioritizing tgt len match.
# TODO: make this configurable.
if hasattr(ex, "tgt"):
return len(ex.src), len(ex.tgt)
return len(ex.src) | [
"def",
"sort_key",
"(",
"self",
",",
"ex",
")",
":",
"# Default to a balanced sort, prioritizing tgt len match.",
"# TODO: make this configurable.",
"if",
"hasattr",
"(",
"ex",
",",
"\"tgt\"",
")",
":",
"return",
"len",
"(",
"ex",
".",
"src",
")",
",",
"len",
"(",
"ex",
".",
"tgt",
")",
"return",
"len",
"(",
"ex",
".",
"src",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L105-L111 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | TextDataset.collapse_copy_scores | (scores, batch, tgt_vocab, src_vocabs) | return scores | Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious. | Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious. | [
"Given",
"scores",
"from",
"an",
"expanded",
"dictionary",
"corresponeding",
"to",
"a",
"batch",
"sums",
"together",
"copies",
"with",
"a",
"dictionary",
"word",
"when",
"it",
"is",
"ambigious",
"."
] | def collapse_copy_scores(scores, batch, tgt_vocab, src_vocabs):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious.
"""
offset = len(tgt_vocab)
for b in range(batch.batch_size):
blank = []
fill = []
index = batch.indices.data[b]
src_vocab = src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
blank.append(offset + i)
fill.append(ti)
if blank:
blank = torch.Tensor(blank).type_as(batch.indices.data)
fill = torch.Tensor(fill).type_as(batch.indices.data)
scores[:, b].index_add_(1, fill,
scores[:, b].index_select(1, blank))
scores[:, b].index_fill_(1, blank, 1e-10)
return scores | [
"def",
"collapse_copy_scores",
"(",
"scores",
",",
"batch",
",",
"tgt_vocab",
",",
"src_vocabs",
")",
":",
"offset",
"=",
"len",
"(",
"tgt_vocab",
")",
"for",
"b",
"in",
"range",
"(",
"batch",
".",
"batch_size",
")",
":",
"blank",
"=",
"[",
"]",
"fill",
"=",
"[",
"]",
"index",
"=",
"batch",
".",
"indices",
".",
"data",
"[",
"b",
"]",
"src_vocab",
"=",
"src_vocabs",
"[",
"index",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"src_vocab",
")",
")",
":",
"sw",
"=",
"src_vocab",
".",
"itos",
"[",
"i",
"]",
"ti",
"=",
"tgt_vocab",
".",
"stoi",
"[",
"sw",
"]",
"if",
"ti",
"!=",
"0",
":",
"blank",
".",
"append",
"(",
"offset",
"+",
"i",
")",
"fill",
".",
"append",
"(",
"ti",
")",
"if",
"blank",
":",
"blank",
"=",
"torch",
".",
"Tensor",
"(",
"blank",
")",
".",
"type_as",
"(",
"batch",
".",
"indices",
".",
"data",
")",
"fill",
"=",
"torch",
".",
"Tensor",
"(",
"fill",
")",
".",
"type_as",
"(",
"batch",
".",
"indices",
".",
"data",
")",
"scores",
"[",
":",
",",
"b",
"]",
".",
"index_add_",
"(",
"1",
",",
"fill",
",",
"scores",
"[",
":",
",",
"b",
"]",
".",
"index_select",
"(",
"1",
",",
"blank",
")",
")",
"scores",
"[",
":",
",",
"b",
"]",
".",
"index_fill_",
"(",
"1",
",",
"blank",
",",
"1e-10",
")",
"return",
"scores"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L114-L138 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | TextDataset.make_text_examples_nfeats_tpl | (text_iter, text_path, truncate, side) | return (examples_iter, num_feats) | Args:
text_iter(iterator): an iterator (or None) that we can loop over
to read examples.
It may be an openned file, a string list etc...
text_path(str): path to file or None
path (str): location of a src or tgt file.
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Returns:
(example_dict iterator, num_feats) tuple. | Args:
text_iter(iterator): an iterator (or None) that we can loop over
to read examples.
It may be an openned file, a string list etc...
text_path(str): path to file or None
path (str): location of a src or tgt file.
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt". | [
"Args",
":",
"text_iter",
"(",
"iterator",
")",
":",
"an",
"iterator",
"(",
"or",
"None",
")",
"that",
"we",
"can",
"loop",
"over",
"to",
"read",
"examples",
".",
"It",
"may",
"be",
"an",
"openned",
"file",
"a",
"string",
"list",
"etc",
"...",
"text_path",
"(",
"str",
")",
":",
"path",
"to",
"file",
"or",
"None",
"path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"or",
"tgt",
"file",
".",
"truncate",
"(",
"int",
")",
":",
"maximum",
"sequence",
"length",
"(",
"0",
"for",
"unlimited",
")",
".",
"side",
"(",
"str",
")",
":",
"src",
"or",
"tgt",
"."
] | def make_text_examples_nfeats_tpl(text_iter, text_path, truncate, side):
"""
Args:
text_iter(iterator): an iterator (or None) that we can loop over
to read examples.
It may be an openned file, a string list etc...
text_path(str): path to file or None
path (str): location of a src or tgt file.
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Returns:
(example_dict iterator, num_feats) tuple.
"""
assert side in ['src', 'tgt']
if text_iter is None:
if text_path is not None:
text_iter = TextDataset.make_text_iterator_from_file(text_path)
else:
return (None, 0)
# All examples have same number of features, so we peek first one
# to get the num_feats.
examples_nfeats_iter = \
TextDataset.make_examples(text_iter, truncate, side)
first_ex = next(examples_nfeats_iter)
num_feats = first_ex[1]
# Chain back the first element - we only want to peek it.
examples_nfeats_iter = chain([first_ex], examples_nfeats_iter)
examples_iter = (ex for ex, nfeats in examples_nfeats_iter)
return (examples_iter, num_feats) | [
"def",
"make_text_examples_nfeats_tpl",
"(",
"text_iter",
",",
"text_path",
",",
"truncate",
",",
"side",
")",
":",
"assert",
"side",
"in",
"[",
"'src'",
",",
"'tgt'",
"]",
"if",
"text_iter",
"is",
"None",
":",
"if",
"text_path",
"is",
"not",
"None",
":",
"text_iter",
"=",
"TextDataset",
".",
"make_text_iterator_from_file",
"(",
"text_path",
")",
"else",
":",
"return",
"(",
"None",
",",
"0",
")",
"# All examples have same number of features, so we peek first one",
"# to get the num_feats.",
"examples_nfeats_iter",
"=",
"TextDataset",
".",
"make_examples",
"(",
"text_iter",
",",
"truncate",
",",
"side",
")",
"first_ex",
"=",
"next",
"(",
"examples_nfeats_iter",
")",
"num_feats",
"=",
"first_ex",
"[",
"1",
"]",
"# Chain back the first element - we only want to peek it.",
"examples_nfeats_iter",
"=",
"chain",
"(",
"[",
"first_ex",
"]",
",",
"examples_nfeats_iter",
")",
"examples_iter",
"=",
"(",
"ex",
"for",
"ex",
",",
"nfeats",
"in",
"examples_nfeats_iter",
")",
"return",
"(",
"examples_iter",
",",
"num_feats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L141-L175 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | TextDataset.make_examples | (text_iter, truncate, side) | Args:
text_iter (iterator): iterator of text sequences
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Yields:
(word, features, nfeat) triples for each line. | Args:
text_iter (iterator): iterator of text sequences
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt". | [
"Args",
":",
"text_iter",
"(",
"iterator",
")",
":",
"iterator",
"of",
"text",
"sequences",
"truncate",
"(",
"int",
")",
":",
"maximum",
"sequence",
"length",
"(",
"0",
"for",
"unlimited",
")",
".",
"side",
"(",
"str",
")",
":",
"src",
"or",
"tgt",
"."
] | def make_examples(text_iter, truncate, side):
"""
Args:
text_iter (iterator): iterator of text sequences
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt".
Yields:
(word, features, nfeat) triples for each line.
"""
for i, line in enumerate(text_iter):
# print('*' * 10)
line = line.strip().split()
if truncate:
line = line[:truncate]
words, feats, n_feats = \
TextDataset.extract_text_features(line)
# print (line)
# print (words)
example_dict = {side: words, "indices": i}
if feats:
prefix = side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
yield example_dict, n_feats | [
"def",
"make_examples",
"(",
"text_iter",
",",
"truncate",
",",
"side",
")",
":",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"text_iter",
")",
":",
"# print('*' * 10)",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"truncate",
":",
"line",
"=",
"line",
"[",
":",
"truncate",
"]",
"words",
",",
"feats",
",",
"n_feats",
"=",
"TextDataset",
".",
"extract_text_features",
"(",
"line",
")",
"# print (line)",
"# print (words)",
"example_dict",
"=",
"{",
"side",
":",
"words",
",",
"\"indices\"",
":",
"i",
"}",
"if",
"feats",
":",
"prefix",
"=",
"side",
"+",
"\"_feat_\"",
"example_dict",
".",
"update",
"(",
"(",
"prefix",
"+",
"str",
"(",
"j",
")",
",",
"f",
")",
"for",
"j",
",",
"f",
"in",
"enumerate",
"(",
"feats",
")",
")",
"yield",
"example_dict",
",",
"n_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L178-L206 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | TextDataset.get_fields | (n_src_features, n_tgt_features) | return fields | Args:
n_src_features (int): the number of source features to
create `torchtext.data.Field` for.
n_tgt_features (int): the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects. | Args:
n_src_features (int): the number of source features to
create `torchtext.data.Field` for.
n_tgt_features (int): the number of target features to
create `torchtext.data.Field` for. | [
"Args",
":",
"n_src_features",
"(",
"int",
")",
":",
"the",
"number",
"of",
"source",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
".",
"n_tgt_features",
"(",
"int",
")",
":",
"the",
"number",
"of",
"target",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
"."
] | def get_fields(n_src_features, n_tgt_features):
"""
Args:
n_src_features (int): the number of source features to
create `torchtext.data.Field` for.
n_tgt_features (int): the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects.
"""
fields = {}
fields["src"] = torchtext.data.Field(
pad_token=PAD_WORD,
include_lengths=True)
for j in range(n_src_features):
fields["src_feat_" + str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_" + str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, vocab):
""" ? """
#pdb.set_trace()
src_size = max([t.size(0) for t in data])
src_vocab_size = int(max([t.max() for t in data])) + 1
try:
alignment = torch.zeros(src_size, len(data), src_vocab_size)
except:
print(src_size)
print(len(data))
print(src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
def make_tgt(data, vocab):
""" ? """
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
sequential=False)
def make_sents(data, vocab):
""" ? """
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(len(data),tgt_size).long()
for i, sent in enumerate(data):
alignment[i,:sent.size(0)] = sent
return alignment
fields["src_sents"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long, postprocessing=make_sents,sequential=False)
fields["tgt_sents"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long, postprocessing=make_sents,sequential=False)
return fields | [
"def",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
":",
"fields",
"=",
"{",
"}",
"fields",
"[",
"\"src\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"pad_token",
"=",
"PAD_WORD",
",",
"include_lengths",
"=",
"True",
")",
"for",
"j",
"in",
"range",
"(",
"n_src_features",
")",
":",
"fields",
"[",
"\"src_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"pad_token",
"=",
"PAD_WORD",
")",
"fields",
"[",
"\"tgt\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"for",
"j",
"in",
"range",
"(",
"n_tgt_features",
")",
":",
"fields",
"[",
"\"tgt_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"def",
"make_src",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"#pdb.set_trace()",
"src_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"src_vocab_size",
"=",
"int",
"(",
"max",
"(",
"[",
"t",
".",
"max",
"(",
")",
"for",
"t",
"in",
"data",
"]",
")",
")",
"+",
"1",
"try",
":",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"src_size",
",",
"len",
"(",
"data",
")",
",",
"src_vocab_size",
")",
"except",
":",
"print",
"(",
"src_size",
")",
"print",
"(",
"len",
"(",
"data",
")",
")",
"print",
"(",
"src_vocab_size",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"for",
"j",
",",
"t",
"in",
"enumerate",
"(",
"sent",
")",
":",
"alignment",
"[",
"j",
",",
"i",
",",
"t",
"]",
"=",
"1",
"return",
"alignment",
"fields",
"[",
"\"src_map\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_src",
",",
"sequential",
"=",
"False",
")",
"def",
"make_tgt",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"tgt_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"tgt_size",
",",
"len",
"(",
"data",
")",
")",
".",
"long",
"(",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"alignment",
"[",
":",
"sent",
".",
"size",
"(",
"0",
")",
",",
"i",
"]",
"=",
"sent",
"return",
"alignment",
"fields",
"[",
"\"alignment\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"postprocessing",
"=",
"make_tgt",
",",
"sequential",
"=",
"False",
")",
"fields",
"[",
"\"indices\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"sequential",
"=",
"False",
")",
"def",
"make_sents",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"tgt_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"len",
"(",
"data",
")",
",",
"tgt_size",
")",
".",
"long",
"(",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"alignment",
"[",
"i",
",",
":",
"sent",
".",
"size",
"(",
"0",
")",
"]",
"=",
"sent",
"return",
"alignment",
"fields",
"[",
"\"src_sents\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"postprocessing",
"=",
"make_sents",
",",
"sequential",
"=",
"False",
")",
"fields",
"[",
"\"tgt_sents\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"postprocessing",
"=",
"make_sents",
",",
"sequential",
"=",
"False",
")",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L215-L308 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | TextDataset.get_num_features | (corpus_file, side) | return num_feats | Peek one line and get number of features of it.
(All lines must have same number of features).
For text corpus, both sides are in text form, thus
it works the same.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`. | Peek one line and get number of features of it.
(All lines must have same number of features).
For text corpus, both sides are in text form, thus
it works the same. | [
"Peek",
"one",
"line",
"and",
"get",
"number",
"of",
"features",
"of",
"it",
".",
"(",
"All",
"lines",
"must",
"have",
"same",
"number",
"of",
"features",
")",
".",
"For",
"text",
"corpus",
"both",
"sides",
"are",
"in",
"text",
"form",
"thus",
"it",
"works",
"the",
"same",
"."
] | def get_num_features(corpus_file, side):
"""
Peek one line and get number of features of it.
(All lines must have same number of features).
For text corpus, both sides are in text form, thus
it works the same.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`.
"""
with codecs.open(corpus_file, "r", "utf-8") as cf:
f_line = cf.readline().strip().split()
_, _, num_feats = TextDataset.extract_text_features(f_line)
return num_feats | [
"def",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"corpus_file",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"cf",
":",
"f_line",
"=",
"cf",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"_",
",",
"_",
",",
"num_feats",
"=",
"TextDataset",
".",
"extract_text_features",
"(",
"f_line",
")",
"return",
"num_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L311-L329 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | ShardedTextCorpusIterator.__init__ | (self, corpus_path, line_truncate, side, shard_size,
assoc_iter=None) | Args:
corpus_path: the corpus file path.
line_truncate: the maximum length of a line to read.
0 for unlimited.
side: "src" or "tgt".
shard_size: the shard size, 0 means not sharding the file.
assoc_iter: if not None, it is the associate iterator that
this iterator should align its step with. | Args:
corpus_path: the corpus file path.
line_truncate: the maximum length of a line to read.
0 for unlimited.
side: "src" or "tgt".
shard_size: the shard size, 0 means not sharding the file.
assoc_iter: if not None, it is the associate iterator that
this iterator should align its step with. | [
"Args",
":",
"corpus_path",
":",
"the",
"corpus",
"file",
"path",
".",
"line_truncate",
":",
"the",
"maximum",
"length",
"of",
"a",
"line",
"to",
"read",
".",
"0",
"for",
"unlimited",
".",
"side",
":",
"src",
"or",
"tgt",
".",
"shard_size",
":",
"the",
"shard",
"size",
"0",
"means",
"not",
"sharding",
"the",
"file",
".",
"assoc_iter",
":",
"if",
"not",
"None",
"it",
"is",
"the",
"associate",
"iterator",
"that",
"this",
"iterator",
"should",
"align",
"its",
"step",
"with",
"."
] | def __init__(self, corpus_path, line_truncate, side, shard_size,
assoc_iter=None):
"""
Args:
corpus_path: the corpus file path.
line_truncate: the maximum length of a line to read.
0 for unlimited.
side: "src" or "tgt".
shard_size: the shard size, 0 means not sharding the file.
assoc_iter: if not None, it is the associate iterator that
this iterator should align its step with.
"""
try:
# The codecs module seems to have bugs with seek()/tell(),
# so we use io.open().
self.corpus = io.open(corpus_path, "r", encoding="utf-8")
except IOError:
sys.stderr.write("Failed to open corpus file: %s" % corpus_path)
sys.exit(1)
self.line_truncate = line_truncate
self.side = side
self.shard_size = shard_size
self.assoc_iter = assoc_iter
self.last_pos = 0
self.line_index = -1
self.eof = False | [
"def",
"__init__",
"(",
"self",
",",
"corpus_path",
",",
"line_truncate",
",",
"side",
",",
"shard_size",
",",
"assoc_iter",
"=",
"None",
")",
":",
"try",
":",
"# The codecs module seems to have bugs with seek()/tell(),",
"# so we use io.open().",
"self",
".",
"corpus",
"=",
"io",
".",
"open",
"(",
"corpus_path",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"except",
"IOError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Failed to open corpus file: %s\"",
"%",
"corpus_path",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"self",
".",
"line_truncate",
"=",
"line_truncate",
"self",
".",
"side",
"=",
"side",
"self",
".",
"shard_size",
"=",
"shard_size",
"self",
".",
"assoc_iter",
"=",
"assoc_iter",
"self",
".",
"last_pos",
"=",
"0",
"self",
".",
"line_index",
"=",
"-",
"1",
"self",
".",
"eof",
"=",
"False"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L406-L432 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | ShardedTextCorpusIterator.__iter__ | (self) | Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`. | Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`. | [
"Iterator",
"of",
"(",
"example_dict",
"nfeats",
")",
".",
"On",
"each",
"call",
"it",
"iterates",
"over",
"as",
"many",
"(",
"example_dict",
"nfeats",
")",
"tuples",
"until",
"this",
"shard",
"s",
"size",
"equals",
"to",
"or",
"approximates",
"self",
".",
"shard_size",
"."
] | def __iter__(self):
"""
Iterator of (example_dict, nfeats).
On each call, it iterates over as many (example_dict, nfeats) tuples
until this shard's size equals to or approximates `self.shard_size`.
"""
iteration_index = -1
if self.assoc_iter is not None:
# We have associate iterator, just yields tuples
# util we run parallel with it.
while self.line_index < self.assoc_iter.line_index:
line = self.corpus.readline()
if line == '':
raise AssertionError(
"Two corpuses must have same number of lines!")
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index)
if self.assoc_iter.eof:
self.eof = True
self.corpus.close()
else:
# Yield tuples util this shard's size reaches the threshold.
self.corpus.seek(self.last_pos)
while True:
if self.shard_size != 0 and self.line_index % 64 == 0:
# This part of check is time consuming on Py2 (but
# it is quite fast on Py3, weird!). So we don't bother
# to check for very line. Instead we chekc every 64
# lines. Thus we are not dividing exactly per
# `shard_size`, but it is not too much difference.
cur_pos = self.corpus.tell()
if cur_pos >= self.last_pos + self.shard_size:
self.last_pos = cur_pos
return
line = self.corpus.readline()
if line == '':
self.eof = True
self.corpus.close()
return
self.line_index += 1
iteration_index += 1
yield self._example_dict_iter(line, iteration_index) | [
"def",
"__iter__",
"(",
"self",
")",
":",
"iteration_index",
"=",
"-",
"1",
"if",
"self",
".",
"assoc_iter",
"is",
"not",
"None",
":",
"# We have associate iterator, just yields tuples",
"# util we run parallel with it.",
"while",
"self",
".",
"line_index",
"<",
"self",
".",
"assoc_iter",
".",
"line_index",
":",
"line",
"=",
"self",
".",
"corpus",
".",
"readline",
"(",
")",
"if",
"line",
"==",
"''",
":",
"raise",
"AssertionError",
"(",
"\"Two corpuses must have same number of lines!\"",
")",
"self",
".",
"line_index",
"+=",
"1",
"iteration_index",
"+=",
"1",
"yield",
"self",
".",
"_example_dict_iter",
"(",
"line",
",",
"iteration_index",
")",
"if",
"self",
".",
"assoc_iter",
".",
"eof",
":",
"self",
".",
"eof",
"=",
"True",
"self",
".",
"corpus",
".",
"close",
"(",
")",
"else",
":",
"# Yield tuples util this shard's size reaches the threshold.",
"self",
".",
"corpus",
".",
"seek",
"(",
"self",
".",
"last_pos",
")",
"while",
"True",
":",
"if",
"self",
".",
"shard_size",
"!=",
"0",
"and",
"self",
".",
"line_index",
"%",
"64",
"==",
"0",
":",
"# This part of check is time consuming on Py2 (but",
"# it is quite fast on Py3, weird!). So we don't bother",
"# to check for very line. Instead we chekc every 64",
"# lines. Thus we are not dividing exactly per",
"# `shard_size`, but it is not too much difference.",
"cur_pos",
"=",
"self",
".",
"corpus",
".",
"tell",
"(",
")",
"if",
"cur_pos",
">=",
"self",
".",
"last_pos",
"+",
"self",
".",
"shard_size",
":",
"self",
".",
"last_pos",
"=",
"cur_pos",
"return",
"line",
"=",
"self",
".",
"corpus",
".",
"readline",
"(",
")",
"if",
"line",
"==",
"''",
":",
"self",
".",
"eof",
"=",
"True",
"self",
".",
"corpus",
".",
"close",
"(",
")",
"return",
"self",
".",
"line_index",
"+=",
"1",
"iteration_index",
"+=",
"1",
"yield",
"self",
".",
"_example_dict_iter",
"(",
"line",
",",
"iteration_index",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L434-L480 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | ShardedTextCorpusIterator.hit_end | (self) | return self.eof | ? | ? | [
"?"
] | def hit_end(self):
""" ? """
return self.eof | [
"def",
"hit_end",
"(",
"self",
")",
":",
"return",
"self",
".",
"eof"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L482-L484 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/text_dataset.py | python | ShardedTextCorpusIterator.num_feats | (self) | return self.n_feats | We peek the first line and seek back to
the beginning of the file. | We peek the first line and seek back to
the beginning of the file. | [
"We",
"peek",
"the",
"first",
"line",
"and",
"seek",
"back",
"to",
"the",
"beginning",
"of",
"the",
"file",
"."
] | def num_feats(self):
"""
We peek the first line and seek back to
the beginning of the file.
"""
saved_pos = self.corpus.tell()
line = self.corpus.readline().split()
if self.line_truncate:
line = line[:self.line_truncate]
_, _, self.n_feats = TextDataset.extract_text_features(line)
self.corpus.seek(saved_pos)
return self.n_feats | [
"def",
"num_feats",
"(",
"self",
")",
":",
"saved_pos",
"=",
"self",
".",
"corpus",
".",
"tell",
"(",
")",
"line",
"=",
"self",
".",
"corpus",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"if",
"self",
".",
"line_truncate",
":",
"line",
"=",
"line",
"[",
":",
"self",
".",
"line_truncate",
"]",
"_",
",",
"_",
",",
"self",
".",
"n_feats",
"=",
"TextDataset",
".",
"extract_text_features",
"(",
"line",
")",
"self",
".",
"corpus",
".",
"seek",
"(",
"saved_pos",
")",
"return",
"self",
".",
"n_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/text_dataset.py#L487-L501 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/image_dataset.py | python | ImageDataset.sort_key | (self, ex) | return (ex.src.size(2), ex.src.size(1)) | Sort using the size of the image: (width, height). | Sort using the size of the image: (width, height). | [
"Sort",
"using",
"the",
"size",
"of",
"the",
"image",
":",
"(",
"width",
"height",
")",
"."
] | def sort_key(self, ex):
""" Sort using the size of the image: (width, height)."""
return (ex.src.size(2), ex.src.size(1)) | [
"def",
"sort_key",
"(",
"self",
",",
"ex",
")",
":",
"return",
"(",
"ex",
".",
"src",
".",
"size",
"(",
"2",
")",
",",
"ex",
".",
"src",
".",
"size",
"(",
"1",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/image_dataset.py#L80-L82 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/image_dataset.py | python | ImageDataset.make_image_examples_nfeats_tpl | (img_iter, img_path, img_dir,
image_channel_size=3) | return (examples_iter, num_feats) | Note: one of img_iter and img_path must be not None
Args:
img_iter(iterator): an iterator that yields pairs (img, filename)
(or None)
img_path(str): location of a src file containing image paths
(or None)
src_dir (str): location of source images
Returns:
(example_dict iterator, num_feats) tuple | Note: one of img_iter and img_path must be not None
Args:
img_iter(iterator): an iterator that yields pairs (img, filename)
(or None)
img_path(str): location of a src file containing image paths
(or None)
src_dir (str): location of source images | [
"Note",
":",
"one",
"of",
"img_iter",
"and",
"img_path",
"must",
"be",
"not",
"None",
"Args",
":",
"img_iter",
"(",
"iterator",
")",
":",
"an",
"iterator",
"that",
"yields",
"pairs",
"(",
"img",
"filename",
")",
"(",
"or",
"None",
")",
"img_path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"file",
"containing",
"image",
"paths",
"(",
"or",
"None",
")",
"src_dir",
"(",
"str",
")",
":",
"location",
"of",
"source",
"images"
] | def make_image_examples_nfeats_tpl(img_iter, img_path, img_dir,
image_channel_size=3):
"""
Note: one of img_iter and img_path must be not None
Args:
img_iter(iterator): an iterator that yields pairs (img, filename)
(or None)
img_path(str): location of a src file containing image paths
(or None)
src_dir (str): location of source images
Returns:
(example_dict iterator, num_feats) tuple
"""
if img_iter is None:
if img_path is not None:
img_iter = ImageDataset. \
make_img_iterator_from_file(img_path,
img_dir,
image_channel_size)
else:
raise ValueError("""One of 'img_iter' and 'img_path'
must be not None""")
examples_iter = ImageDataset.make_examples(img_iter, img_dir, 'src')
num_feats = 0 # Source side(img) has no features.
return (examples_iter, num_feats) | [
"def",
"make_image_examples_nfeats_tpl",
"(",
"img_iter",
",",
"img_path",
",",
"img_dir",
",",
"image_channel_size",
"=",
"3",
")",
":",
"if",
"img_iter",
"is",
"None",
":",
"if",
"img_path",
"is",
"not",
"None",
":",
"img_iter",
"=",
"ImageDataset",
".",
"make_img_iterator_from_file",
"(",
"img_path",
",",
"img_dir",
",",
"image_channel_size",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"One of 'img_iter' and 'img_path'\n must be not None\"\"\"",
")",
"examples_iter",
"=",
"ImageDataset",
".",
"make_examples",
"(",
"img_iter",
",",
"img_dir",
",",
"'src'",
")",
"num_feats",
"=",
"0",
"# Source side(img) has no features.",
"return",
"(",
"examples_iter",
",",
"num_feats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/image_dataset.py#L85-L111 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/image_dataset.py | python | ImageDataset.make_examples | (img_iter, src_dir, side, truncate=None) | Args:
path (str): location of a src file containing image paths
src_dir (str): location of source images
side (str): 'src' or 'tgt'
truncate: maximum img size ((0,0) or None for unlimited)
Yields:
a dictionary containing image data, path and index for each line. | Args:
path (str): location of a src file containing image paths
src_dir (str): location of source images
side (str): 'src' or 'tgt'
truncate: maximum img size ((0,0) or None for unlimited) | [
"Args",
":",
"path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"file",
"containing",
"image",
"paths",
"src_dir",
"(",
"str",
")",
":",
"location",
"of",
"source",
"images",
"side",
"(",
"str",
")",
":",
"src",
"or",
"tgt",
"truncate",
":",
"maximum",
"img",
"size",
"((",
"0",
"0",
")",
"or",
"None",
"for",
"unlimited",
")"
] | def make_examples(img_iter, src_dir, side, truncate=None):
"""
Args:
path (str): location of a src file containing image paths
src_dir (str): location of source images
side (str): 'src' or 'tgt'
truncate: maximum img size ((0,0) or None for unlimited)
Yields:
a dictionary containing image data, path and index for each line.
"""
assert (src_dir is not None) and os.path.exists(src_dir), \
'src_dir must be a valid directory if data_type is img'
for index, (img, filename) in enumerate(img_iter):
if truncate and truncate != (0, 0):
if not (img.size(1) <= truncate[0]
and img.size(2) <= truncate[1]):
continue
example_dict = {side: img,
side + '_path': filename,
'indices': index}
yield example_dict | [
"def",
"make_examples",
"(",
"img_iter",
",",
"src_dir",
",",
"side",
",",
"truncate",
"=",
"None",
")",
":",
"assert",
"(",
"src_dir",
"is",
"not",
"None",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"src_dir",
")",
",",
"'src_dir must be a valid directory if data_type is img'",
"for",
"index",
",",
"(",
"img",
",",
"filename",
")",
"in",
"enumerate",
"(",
"img_iter",
")",
":",
"if",
"truncate",
"and",
"truncate",
"!=",
"(",
"0",
",",
"0",
")",
":",
"if",
"not",
"(",
"img",
".",
"size",
"(",
"1",
")",
"<=",
"truncate",
"[",
"0",
"]",
"and",
"img",
".",
"size",
"(",
"2",
")",
"<=",
"truncate",
"[",
"1",
"]",
")",
":",
"continue",
"example_dict",
"=",
"{",
"side",
":",
"img",
",",
"side",
"+",
"'_path'",
":",
"filename",
",",
"'indices'",
":",
"index",
"}",
"yield",
"example_dict"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/image_dataset.py#L114-L137 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/image_dataset.py | python | ImageDataset.make_img_iterator_from_file | (path, src_dir, image_channel_size=3) | Args:
path(str):
src_dir(str):
Yields:
img: and image tensor
filename(str): the image filename | Args:
path(str):
src_dir(str): | [
"Args",
":",
"path",
"(",
"str",
")",
":",
"src_dir",
"(",
"str",
")",
":"
] | def make_img_iterator_from_file(path, src_dir, image_channel_size=3):
"""
Args:
path(str):
src_dir(str):
Yields:
img: and image tensor
filename(str): the image filename
"""
from PIL import Image
from torchvision import transforms
with codecs.open(path, "r", "utf-8") as corpus_file:
for line in corpus_file:
filename = line.strip()
img_path = os.path.join(src_dir, filename)
if not os.path.exists(img_path):
img_path = line
assert os.path.exists(img_path), \
'img path %s not found' % (line.strip())
if (image_channel_size == 1):
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(Image.open(img_path))
yield img, filename | [
"def",
"make_img_iterator_from_file",
"(",
"path",
",",
"src_dir",
",",
"image_channel_size",
"=",
"3",
")",
":",
"from",
"PIL",
"import",
"Image",
"from",
"torchvision",
"import",
"transforms",
"with",
"codecs",
".",
"open",
"(",
"path",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"corpus_file",
":",
"for",
"line",
"in",
"corpus_file",
":",
"filename",
"=",
"line",
".",
"strip",
"(",
")",
"img_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src_dir",
",",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"img_path",
")",
":",
"img_path",
"=",
"line",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"img_path",
")",
",",
"'img path %s not found'",
"%",
"(",
"line",
".",
"strip",
"(",
")",
")",
"if",
"(",
"image_channel_size",
"==",
"1",
")",
":",
"img",
"=",
"transforms",
".",
"ToTensor",
"(",
")",
"(",
"Image",
".",
"fromarray",
"(",
"cv2",
".",
"imread",
"(",
"img_path",
",",
"0",
")",
")",
")",
"else",
":",
"img",
"=",
"transforms",
".",
"ToTensor",
"(",
")",
"(",
"Image",
".",
"open",
"(",
"img_path",
")",
")",
"yield",
"img",
",",
"filename"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/image_dataset.py#L140-L169 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/image_dataset.py | python | ImageDataset.get_fields | (n_src_features, n_tgt_features) | return fields | Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects. | Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for. | [
"Args",
":",
"n_src_features",
":",
"the",
"number",
"of",
"source",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
".",
"n_tgt_features",
":",
"the",
"number",
"of",
"target",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
"."
] | def get_fields(n_src_features, n_tgt_features):
"""
Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects.
"""
fields = {}
def make_img(data, vocab):
""" ? """
c = data[0].size(0)
h = max([t.size(1) for t in data])
w = max([t.size(2) for t in data])
imgs = torch.zeros(len(data), c, h, w).fill_(1)
for i, img in enumerate(data):
imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
return imgs
fields["src"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_img, sequential=False)
for j in range(n_src_features):
fields["src_feat_" + str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_" + str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, vocab):
""" ? """
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
def make_tgt(data, vocab):
""" ? """
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
sequential=False)
return fields | [
"def",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
":",
"fields",
"=",
"{",
"}",
"def",
"make_img",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"c",
"=",
"data",
"[",
"0",
"]",
".",
"size",
"(",
"0",
")",
"h",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"1",
")",
"for",
"t",
"in",
"data",
"]",
")",
"w",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"2",
")",
"for",
"t",
"in",
"data",
"]",
")",
"imgs",
"=",
"torch",
".",
"zeros",
"(",
"len",
"(",
"data",
")",
",",
"c",
",",
"h",
",",
"w",
")",
".",
"fill_",
"(",
"1",
")",
"for",
"i",
",",
"img",
"in",
"enumerate",
"(",
"data",
")",
":",
"imgs",
"[",
"i",
",",
":",
",",
"0",
":",
"img",
".",
"size",
"(",
"1",
")",
",",
"0",
":",
"img",
".",
"size",
"(",
"2",
")",
"]",
"=",
"img",
"return",
"imgs",
"fields",
"[",
"\"src\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_img",
",",
"sequential",
"=",
"False",
")",
"for",
"j",
"in",
"range",
"(",
"n_src_features",
")",
":",
"fields",
"[",
"\"src_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"pad_token",
"=",
"PAD_WORD",
")",
"fields",
"[",
"\"tgt\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"for",
"j",
"in",
"range",
"(",
"n_tgt_features",
")",
":",
"fields",
"[",
"\"tgt_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"def",
"make_src",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"src_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"src_vocab_size",
"=",
"max",
"(",
"[",
"t",
".",
"max",
"(",
")",
"for",
"t",
"in",
"data",
"]",
")",
"+",
"1",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"src_size",
",",
"len",
"(",
"data",
")",
",",
"src_vocab_size",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"for",
"j",
",",
"t",
"in",
"enumerate",
"(",
"sent",
")",
":",
"alignment",
"[",
"j",
",",
"i",
",",
"t",
"]",
"=",
"1",
"return",
"alignment",
"fields",
"[",
"\"src_map\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_src",
",",
"sequential",
"=",
"False",
")",
"def",
"make_tgt",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"tgt_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"tgt_size",
",",
"len",
"(",
"data",
")",
")",
".",
"long",
"(",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"alignment",
"[",
":",
"sent",
".",
"size",
"(",
"0",
")",
",",
"i",
"]",
"=",
"sent",
"return",
"alignment",
"fields",
"[",
"\"alignment\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"postprocessing",
"=",
"make_tgt",
",",
"sequential",
"=",
"False",
")",
"fields",
"[",
"\"indices\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"sequential",
"=",
"False",
")",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/image_dataset.py#L172-L243 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/image_dataset.py | python | ImageDataset.get_num_features | (corpus_file, side) | return num_feats | For image corpus, source side is in form of image, thus
no feature; while target side is in form of text, thus
we can extract its text features.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`. | For image corpus, source side is in form of image, thus
no feature; while target side is in form of text, thus
we can extract its text features. | [
"For",
"image",
"corpus",
"source",
"side",
"is",
"in",
"form",
"of",
"image",
"thus",
"no",
"feature",
";",
"while",
"target",
"side",
"is",
"in",
"form",
"of",
"text",
"thus",
"we",
"can",
"extract",
"its",
"text",
"features",
"."
] | def get_num_features(corpus_file, side):
"""
For image corpus, source side is in form of image, thus
no feature; while target side is in form of text, thus
we can extract its text features.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`.
"""
if side == 'src':
num_feats = 0
else:
with codecs.open(corpus_file, "r", "utf-8") as cf:
f_line = cf.readline().strip().split()
_, _, num_feats = ImageDataset.extract_text_features(f_line)
return num_feats | [
"def",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
":",
"if",
"side",
"==",
"'src'",
":",
"num_feats",
"=",
"0",
"else",
":",
"with",
"codecs",
".",
"open",
"(",
"corpus_file",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"cf",
":",
"f_line",
"=",
"cf",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"_",
",",
"_",
",",
"num_feats",
"=",
"ImageDataset",
".",
"extract_text_features",
"(",
"f_line",
")",
"return",
"num_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/image_dataset.py#L246-L266 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/audio_dataset.py | python | AudioDataset.sort_key | (self, ex) | return ex.src.size(1) | Sort using duration time of the sound spectrogram. | Sort using duration time of the sound spectrogram. | [
"Sort",
"using",
"duration",
"time",
"of",
"the",
"sound",
"spectrogram",
"."
] | def sort_key(self, ex):
""" Sort using duration time of the sound spectrogram. """
return ex.src.size(1) | [
"def",
"sort_key",
"(",
"self",
",",
"ex",
")",
":",
"return",
"ex",
".",
"src",
".",
"size",
"(",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/audio_dataset.py#L90-L92 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/audio_dataset.py | python | AudioDataset.make_audio_examples_nfeats_tpl | (path, audio_dir,
sample_rate, window_size,
window_stride, window,
normalize_audio, truncate=None) | return (examples_iter, num_feats) | Args:
path (str): location of a src file containing audio paths.
audio_dir (str): location of source audio files.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited).
Returns:
(example_dict iterator, num_feats) tuple | Args:
path (str): location of a src file containing audio paths.
audio_dir (str): location of source audio files.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited). | [
"Args",
":",
"path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"file",
"containing",
"audio",
"paths",
".",
"audio_dir",
"(",
"str",
")",
":",
"location",
"of",
"source",
"audio",
"files",
".",
"sample_rate",
"(",
"int",
")",
":",
"sample_rate",
".",
"window_size",
"(",
"float",
")",
":",
"window",
"size",
"for",
"spectrogram",
"in",
"seconds",
".",
"window_stride",
"(",
"float",
")",
":",
"window",
"stride",
"for",
"spectrogram",
"in",
"seconds",
".",
"window",
"(",
"str",
")",
":",
"window",
"type",
"for",
"spectrogram",
"generation",
".",
"normalize_audio",
"(",
"bool",
")",
":",
"subtract",
"spectrogram",
"by",
"mean",
"and",
"divide",
"by",
"std",
"or",
"not",
".",
"truncate",
"(",
"int",
")",
":",
"maximum",
"audio",
"length",
"(",
"0",
"or",
"None",
"for",
"unlimited",
")",
"."
] | def make_audio_examples_nfeats_tpl(path, audio_dir,
sample_rate, window_size,
window_stride, window,
normalize_audio, truncate=None):
"""
Args:
path (str): location of a src file containing audio paths.
audio_dir (str): location of source audio files.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited).
Returns:
(example_dict iterator, num_feats) tuple
"""
examples_iter = AudioDataset.read_audio_file(
path, audio_dir, "src", sample_rate,
window_size, window_stride, window,
normalize_audio, truncate)
num_feats = 0 # Source side(audio) has no features.
return (examples_iter, num_feats) | [
"def",
"make_audio_examples_nfeats_tpl",
"(",
"path",
",",
"audio_dir",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"truncate",
"=",
"None",
")",
":",
"examples_iter",
"=",
"AudioDataset",
".",
"read_audio_file",
"(",
"path",
",",
"audio_dir",
",",
"\"src\"",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"truncate",
")",
"num_feats",
"=",
"0",
"# Source side(audio) has no features.",
"return",
"(",
"examples_iter",
",",
"num_feats",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/audio_dataset.py#L95-L120 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/audio_dataset.py | python | AudioDataset.read_audio_file | (path, src_dir, side, sample_rate, window_size,
window_stride, window, normalize_audio,
truncate=None) | Args:
path (str): location of a src file containing audio paths.
src_dir (str): location of source audio files.
side (str): 'src' or 'tgt'.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited).
Yields:
a dictionary containing audio data for each line. | Args:
path (str): location of a src file containing audio paths.
src_dir (str): location of source audio files.
side (str): 'src' or 'tgt'.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited). | [
"Args",
":",
"path",
"(",
"str",
")",
":",
"location",
"of",
"a",
"src",
"file",
"containing",
"audio",
"paths",
".",
"src_dir",
"(",
"str",
")",
":",
"location",
"of",
"source",
"audio",
"files",
".",
"side",
"(",
"str",
")",
":",
"src",
"or",
"tgt",
".",
"sample_rate",
"(",
"int",
")",
":",
"sample_rate",
".",
"window_size",
"(",
"float",
")",
":",
"window",
"size",
"for",
"spectrogram",
"in",
"seconds",
".",
"window_stride",
"(",
"float",
")",
":",
"window",
"stride",
"for",
"spectrogram",
"in",
"seconds",
".",
"window",
"(",
"str",
")",
":",
"window",
"type",
"for",
"spectrogram",
"generation",
".",
"normalize_audio",
"(",
"bool",
")",
":",
"subtract",
"spectrogram",
"by",
"mean",
"and",
"divide",
"by",
"std",
"or",
"not",
".",
"truncate",
"(",
"int",
")",
":",
"maximum",
"audio",
"length",
"(",
"0",
"or",
"None",
"for",
"unlimited",
")",
"."
] | def read_audio_file(path, src_dir, side, sample_rate, window_size,
window_stride, window, normalize_audio,
truncate=None):
"""
Args:
path (str): location of a src file containing audio paths.
src_dir (str): location of source audio files.
side (str): 'src' or 'tgt'.
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int): maximum audio length (0 or None for unlimited).
Yields:
a dictionary containing audio data for each line.
"""
assert (src_dir is not None) and os.path.exists(src_dir),\
"src_dir must be a valid directory if data_type is audio"
import torchaudio
import librosa
import numpy as np
with codecs.open(path, "r", "utf-8") as corpus_file:
index = 0
for line in corpus_file:
audio_path = os.path.join(src_dir, line.strip())
if not os.path.exists(audio_path):
audio_path = line
assert os.path.exists(audio_path), \
'audio path %s not found' % (line.strip())
sound, sample_rate = torchaudio.load(audio_path)
if truncate and truncate > 0:
if sound.size(0) > truncate:
continue
assert sample_rate == sample_rate, \
'Sample rate of %s != -sample_rate (%d vs %d)' \
% (audio_path, sample_rate, sample_rate)
sound = sound.numpy()
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # average multiple channels
n_fft = int(sample_rate * window_size)
win_length = n_fft
hop_length = int(sample_rate * window_stride)
# STFT
d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window)
spect, _ = librosa.magphase(d)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if normalize_audio:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
example_dict = {side: spect,
side + '_path': line.strip(),
'indices': index}
index += 1
yield example_dict | [
"def",
"read_audio_file",
"(",
"path",
",",
"src_dir",
",",
"side",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"truncate",
"=",
"None",
")",
":",
"assert",
"(",
"src_dir",
"is",
"not",
"None",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"src_dir",
")",
",",
"\"src_dir must be a valid directory if data_type is audio\"",
"import",
"torchaudio",
"import",
"librosa",
"import",
"numpy",
"as",
"np",
"with",
"codecs",
".",
"open",
"(",
"path",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"corpus_file",
":",
"index",
"=",
"0",
"for",
"line",
"in",
"corpus_file",
":",
"audio_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src_dir",
",",
"line",
".",
"strip",
"(",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"audio_path",
")",
":",
"audio_path",
"=",
"line",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"audio_path",
")",
",",
"'audio path %s not found'",
"%",
"(",
"line",
".",
"strip",
"(",
")",
")",
"sound",
",",
"sample_rate",
"=",
"torchaudio",
".",
"load",
"(",
"audio_path",
")",
"if",
"truncate",
"and",
"truncate",
">",
"0",
":",
"if",
"sound",
".",
"size",
"(",
"0",
")",
">",
"truncate",
":",
"continue",
"assert",
"sample_rate",
"==",
"sample_rate",
",",
"'Sample rate of %s != -sample_rate (%d vs %d)'",
"%",
"(",
"audio_path",
",",
"sample_rate",
",",
"sample_rate",
")",
"sound",
"=",
"sound",
".",
"numpy",
"(",
")",
"if",
"len",
"(",
"sound",
".",
"shape",
")",
">",
"1",
":",
"if",
"sound",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"sound",
"=",
"sound",
".",
"squeeze",
"(",
")",
"else",
":",
"sound",
"=",
"sound",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"# average multiple channels",
"n_fft",
"=",
"int",
"(",
"sample_rate",
"*",
"window_size",
")",
"win_length",
"=",
"n_fft",
"hop_length",
"=",
"int",
"(",
"sample_rate",
"*",
"window_stride",
")",
"# STFT",
"d",
"=",
"librosa",
".",
"stft",
"(",
"sound",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
")",
"spect",
",",
"_",
"=",
"librosa",
".",
"magphase",
"(",
"d",
")",
"spect",
"=",
"np",
".",
"log1p",
"(",
"spect",
")",
"spect",
"=",
"torch",
".",
"FloatTensor",
"(",
"spect",
")",
"if",
"normalize_audio",
":",
"mean",
"=",
"spect",
".",
"mean",
"(",
")",
"std",
"=",
"spect",
".",
"std",
"(",
")",
"spect",
".",
"add_",
"(",
"-",
"mean",
")",
"spect",
".",
"div_",
"(",
"std",
")",
"example_dict",
"=",
"{",
"side",
":",
"spect",
",",
"side",
"+",
"'_path'",
":",
"line",
".",
"strip",
"(",
")",
",",
"'indices'",
":",
"index",
"}",
"index",
"+=",
"1",
"yield",
"example_dict"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/audio_dataset.py#L123-L195 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/audio_dataset.py | python | AudioDataset.get_fields | (n_src_features, n_tgt_features) | return fields | Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects. | Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for. | [
"Args",
":",
"n_src_features",
":",
"the",
"number",
"of",
"source",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
".",
"n_tgt_features",
":",
"the",
"number",
"of",
"target",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
"."
] | def get_fields(n_src_features, n_tgt_features):
"""
Args:
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values
are the corresponding Field objects.
"""
fields = {}
def make_audio(data, vocab):
""" ? """
nfft = data[0].size(0)
t = max([t.size(1) for t in data])
sounds = torch.zeros(len(data), 1, nfft, t)
for i, spect in enumerate(data):
sounds[i, :, :, 0:spect.size(1)] = spect
return sounds
fields["src"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_audio, sequential=False)
for j in range(n_src_features):
fields["src_feat_" + str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_" + str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, vocab):
""" ? """
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, dtype=torch.float,
postprocessing=make_src, sequential=False)
def make_tgt(data, vocab):
""" ? """
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, dtype=torch.long,
sequential=False)
return fields | [
"def",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
":",
"fields",
"=",
"{",
"}",
"def",
"make_audio",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"nfft",
"=",
"data",
"[",
"0",
"]",
".",
"size",
"(",
"0",
")",
"t",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"1",
")",
"for",
"t",
"in",
"data",
"]",
")",
"sounds",
"=",
"torch",
".",
"zeros",
"(",
"len",
"(",
"data",
")",
",",
"1",
",",
"nfft",
",",
"t",
")",
"for",
"i",
",",
"spect",
"in",
"enumerate",
"(",
"data",
")",
":",
"sounds",
"[",
"i",
",",
":",
",",
":",
",",
"0",
":",
"spect",
".",
"size",
"(",
"1",
")",
"]",
"=",
"spect",
"return",
"sounds",
"fields",
"[",
"\"src\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_audio",
",",
"sequential",
"=",
"False",
")",
"for",
"j",
"in",
"range",
"(",
"n_src_features",
")",
":",
"fields",
"[",
"\"src_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"pad_token",
"=",
"PAD_WORD",
")",
"fields",
"[",
"\"tgt\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"for",
"j",
"in",
"range",
"(",
"n_tgt_features",
")",
":",
"fields",
"[",
"\"tgt_feat_\"",
"+",
"str",
"(",
"j",
")",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"init_token",
"=",
"BOS_WORD",
",",
"eos_token",
"=",
"EOS_WORD",
",",
"pad_token",
"=",
"PAD_WORD",
")",
"def",
"make_src",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"src_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"src_vocab_size",
"=",
"max",
"(",
"[",
"t",
".",
"max",
"(",
")",
"for",
"t",
"in",
"data",
"]",
")",
"+",
"1",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"src_size",
",",
"len",
"(",
"data",
")",
",",
"src_vocab_size",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"for",
"j",
",",
"t",
"in",
"enumerate",
"(",
"sent",
")",
":",
"alignment",
"[",
"j",
",",
"i",
",",
"t",
"]",
"=",
"1",
"return",
"alignment",
"fields",
"[",
"\"src_map\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"float",
",",
"postprocessing",
"=",
"make_src",
",",
"sequential",
"=",
"False",
")",
"def",
"make_tgt",
"(",
"data",
",",
"vocab",
")",
":",
"\"\"\" ? \"\"\"",
"tgt_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"tgt_size",
",",
"len",
"(",
"data",
")",
")",
".",
"long",
"(",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"alignment",
"[",
":",
"sent",
".",
"size",
"(",
"0",
")",
",",
"i",
"]",
"=",
"sent",
"return",
"alignment",
"fields",
"[",
"\"alignment\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"postprocessing",
"=",
"make_tgt",
",",
"sequential",
"=",
"False",
")",
"fields",
"[",
"\"indices\"",
"]",
"=",
"torchtext",
".",
"data",
".",
"Field",
"(",
"use_vocab",
"=",
"False",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"sequential",
"=",
"False",
")",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/audio_dataset.py#L198-L268 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/audio_dataset.py | python | AudioDataset.get_num_features | (corpus_file, side) | return num_feats | For audio corpus, source side is in form of audio, thus
no feature; while target side is in form of text, thus
we can extract its text features.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`. | For audio corpus, source side is in form of audio, thus
no feature; while target side is in form of text, thus
we can extract its text features. | [
"For",
"audio",
"corpus",
"source",
"side",
"is",
"in",
"form",
"of",
"audio",
"thus",
"no",
"feature",
";",
"while",
"target",
"side",
"is",
"in",
"form",
"of",
"text",
"thus",
"we",
"can",
"extract",
"its",
"text",
"features",
"."
] | def get_num_features(corpus_file, side):
"""
For audio corpus, source side is in form of audio, thus
no feature; while target side is in form of text, thus
we can extract its text features.
Args:
corpus_file (str): file path to get the features.
side (str): 'src' or 'tgt'.
Returns:
number of features on `side`.
"""
if side == 'src':
num_feats = 0
else:
with codecs.open(corpus_file, "r", "utf-8") as cf:
f_line = cf.readline().strip().split()
_, _, num_feats = AudioDataset.extract_text_features(f_line)
return num_feats | [
"def",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
":",
"if",
"side",
"==",
"'src'",
":",
"num_feats",
"=",
"0",
"else",
":",
"with",
"codecs",
".",
"open",
"(",
"corpus_file",
",",
"\"r\"",
",",
"\"utf-8\"",
")",
"as",
"cf",
":",
"f_line",
"=",
"cf",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"_",
",",
"_",
",",
"num_feats",
"=",
"AudioDataset",
".",
"extract_text_features",
"(",
"f_line",
")",
"return",
"num_feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/audio_dataset.py#L271-L291 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | get_fields | (data_type, n_src_features, n_tgt_features) | Args:
data_type: type of the source input. Options are [text|img|audio].
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values are the
corresponding Field objects. | Args:
data_type: type of the source input. Options are [text|img|audio].
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for. | [
"Args",
":",
"data_type",
":",
"type",
"of",
"the",
"source",
"input",
".",
"Options",
"are",
"[",
"text|img|audio",
"]",
".",
"n_src_features",
":",
"the",
"number",
"of",
"source",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
".",
"n_tgt_features",
":",
"the",
"number",
"of",
"target",
"features",
"to",
"create",
"torchtext",
".",
"data",
".",
"Field",
"for",
"."
] | def get_fields(data_type, n_src_features, n_tgt_features):
"""
Args:
data_type: type of the source input. Options are [text|img|audio].
n_src_features: the number of source features to
create `torchtext.data.Field` for.
n_tgt_features: the number of target features to
create `torchtext.data.Field` for.
Returns:
A dictionary whose keys are strings and whose values are the
corresponding Field objects.
"""
if data_type == 'text':
return TextDataset.get_fields(n_src_features, n_tgt_features)
elif data_type == 'img':
return ImageDataset.get_fields(n_src_features, n_tgt_features)
elif data_type == 'audio':
return AudioDataset.get_fields(n_src_features, n_tgt_features)
else:
raise ValueError("Data type not implemented") | [
"def",
"get_fields",
"(",
"data_type",
",",
"n_src_features",
",",
"n_tgt_features",
")",
":",
"if",
"data_type",
"==",
"'text'",
":",
"return",
"TextDataset",
".",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
"elif",
"data_type",
"==",
"'img'",
":",
"return",
"ImageDataset",
".",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
"elif",
"data_type",
"==",
"'audio'",
":",
"return",
"AudioDataset",
".",
"get_fields",
"(",
"n_src_features",
",",
"n_tgt_features",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Data type not implemented\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L36-L56 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | load_fields_from_vocab | (vocab, data_type="text") | return fields | Load Field objects from `vocab.pt` file. | Load Field objects from `vocab.pt` file. | [
"Load",
"Field",
"objects",
"from",
"vocab",
".",
"pt",
"file",
"."
] | def load_fields_from_vocab(vocab, data_type="text"):
"""
Load Field objects from `vocab.pt` file.
"""
vocab = dict(vocab)
n_src_features = len(collect_features(vocab, 'src'))
n_tgt_features = len(collect_features(vocab, 'tgt'))
fields = get_fields(data_type, n_src_features, n_tgt_features)
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
fields[k].vocab = v
# TODO: until here, fields has 'tgt_sents'
return fields | [
"def",
"load_fields_from_vocab",
"(",
"vocab",
",",
"data_type",
"=",
"\"text\"",
")",
":",
"vocab",
"=",
"dict",
"(",
"vocab",
")",
"n_src_features",
"=",
"len",
"(",
"collect_features",
"(",
"vocab",
",",
"'src'",
")",
")",
"n_tgt_features",
"=",
"len",
"(",
"collect_features",
"(",
"vocab",
",",
"'tgt'",
")",
")",
"fields",
"=",
"get_fields",
"(",
"data_type",
",",
"n_src_features",
",",
"n_tgt_features",
")",
"for",
"k",
",",
"v",
"in",
"vocab",
".",
"items",
"(",
")",
":",
"# Hack. Can't pickle defaultdict :(",
"v",
".",
"stoi",
"=",
"defaultdict",
"(",
"lambda",
":",
"0",
",",
"v",
".",
"stoi",
")",
"fields",
"[",
"k",
"]",
".",
"vocab",
"=",
"v",
"# TODO: until here, fields has 'tgt_sents'",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L59-L79 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | save_fields_to_vocab | (fields) | return vocab | Save Vocab objects in Field objects to `vocab.pt` file. | Save Vocab objects in Field objects to `vocab.pt` file. | [
"Save",
"Vocab",
"objects",
"in",
"Field",
"objects",
"to",
"vocab",
".",
"pt",
"file",
"."
] | def save_fields_to_vocab(fields):
"""
Save Vocab objects in Field objects to `vocab.pt` file.
"""
vocab = []
for k, f in fields.items():
if f is not None and 'vocab' in f.__dict__:
f.vocab.stoi = f.vocab.stoi
vocab.append((k, f.vocab))
return vocab | [
"def",
"save_fields_to_vocab",
"(",
"fields",
")",
":",
"vocab",
"=",
"[",
"]",
"for",
"k",
",",
"f",
"in",
"fields",
".",
"items",
"(",
")",
":",
"if",
"f",
"is",
"not",
"None",
"and",
"'vocab'",
"in",
"f",
".",
"__dict__",
":",
"f",
".",
"vocab",
".",
"stoi",
"=",
"f",
".",
"vocab",
".",
"stoi",
"vocab",
".",
"append",
"(",
"(",
"k",
",",
"f",
".",
"vocab",
")",
")",
"return",
"vocab"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L82-L91 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | merge_vocabs | (vocabs, vocab_size=None) | return torchtext.vocab.Vocab(merged,
specials=[UNK_WORD, PAD_WORD,
BOS_WORD, EOS_WORD],
max_size=vocab_size) | Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab` | Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary. | [
"Merge",
"individual",
"vocabularies",
"(",
"assumed",
"to",
"be",
"generated",
"from",
"disjoint",
"documents",
")",
"into",
"a",
"larger",
"vocabulary",
"."
] | def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return torchtext.vocab.Vocab(merged,
specials=[UNK_WORD, PAD_WORD,
BOS_WORD, EOS_WORD],
max_size=vocab_size) | [
"def",
"merge_vocabs",
"(",
"vocabs",
",",
"vocab_size",
"=",
"None",
")",
":",
"merged",
"=",
"sum",
"(",
"[",
"vocab",
".",
"freqs",
"for",
"vocab",
"in",
"vocabs",
"]",
",",
"Counter",
"(",
")",
")",
"return",
"torchtext",
".",
"vocab",
".",
"Vocab",
"(",
"merged",
",",
"specials",
"=",
"[",
"UNK_WORD",
",",
"PAD_WORD",
",",
"BOS_WORD",
",",
"EOS_WORD",
"]",
",",
"max_size",
"=",
"vocab_size",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L94-L109 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | get_num_features | (data_type, corpus_file, side) | Args:
data_type (str): type of the source input.
Options are [text|img|audio].
corpus_file (str): file path to get the features.
side (str): for source or for target.
Returns:
number of features on `side`. | Args:
data_type (str): type of the source input.
Options are [text|img|audio].
corpus_file (str): file path to get the features.
side (str): for source or for target. | [
"Args",
":",
"data_type",
"(",
"str",
")",
":",
"type",
"of",
"the",
"source",
"input",
".",
"Options",
"are",
"[",
"text|img|audio",
"]",
".",
"corpus_file",
"(",
"str",
")",
":",
"file",
"path",
"to",
"get",
"the",
"features",
".",
"side",
"(",
"str",
")",
":",
"for",
"source",
"or",
"for",
"target",
"."
] | def get_num_features(data_type, corpus_file, side):
"""
Args:
data_type (str): type of the source input.
Options are [text|img|audio].
corpus_file (str): file path to get the features.
side (str): for source or for target.
Returns:
number of features on `side`.
"""
assert side in ["src", "tgt"]
if data_type == 'text':
return TextDataset.get_num_features(corpus_file, side)
elif data_type == 'img':
return ImageDataset.get_num_features(corpus_file, side)
elif data_type == 'audio':
return AudioDataset.get_num_features(corpus_file, side)
else:
raise ValueError("Data type not implemented") | [
"def",
"get_num_features",
"(",
"data_type",
",",
"corpus_file",
",",
"side",
")",
":",
"assert",
"side",
"in",
"[",
"\"src\"",
",",
"\"tgt\"",
"]",
"if",
"data_type",
"==",
"'text'",
":",
"return",
"TextDataset",
".",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
"elif",
"data_type",
"==",
"'img'",
":",
"return",
"ImageDataset",
".",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
"elif",
"data_type",
"==",
"'audio'",
":",
"return",
"AudioDataset",
".",
"get_num_features",
"(",
"corpus_file",
",",
"side",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Data type not implemented\"",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L112-L132 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | make_features | (batch, side, data_type='text') | Args:
batch (Tensor): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input.
Options are [text|img|audio].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch). | Args:
batch (Tensor): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input.
Options are [text|img|audio].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch). | [
"Args",
":",
"batch",
"(",
"Tensor",
")",
":",
"a",
"batch",
"of",
"source",
"or",
"target",
"data",
".",
"side",
"(",
"str",
")",
":",
"for",
"source",
"or",
"for",
"target",
".",
"data_type",
"(",
"str",
")",
":",
"type",
"of",
"the",
"source",
"input",
".",
"Options",
"are",
"[",
"text|img|audio",
"]",
".",
"Returns",
":",
"A",
"sequence",
"of",
"src",
"/",
"tgt",
"tensors",
"with",
"optional",
"feature",
"tensors",
"of",
"size",
"(",
"len",
"x",
"batch",
")",
"."
] | def make_features(batch, side, data_type='text'):
"""
Args:
batch (Tensor): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input.
Options are [text|img|audio].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
keys = sorted([k for k in batch.__dict__ if feat_start in k])
features = [batch.__dict__[k] for k in keys]
levels = [data] + features
if data_type == 'text':
return torch.cat([level.unsqueeze(2) for level in levels], 2)
else:
return levels[0] | [
"def",
"make_features",
"(",
"batch",
",",
"side",
",",
"data_type",
"=",
"'text'",
")",
":",
"assert",
"side",
"in",
"[",
"'src'",
",",
"'tgt'",
"]",
"if",
"isinstance",
"(",
"batch",
".",
"__dict__",
"[",
"side",
"]",
",",
"tuple",
")",
":",
"data",
"=",
"batch",
".",
"__dict__",
"[",
"side",
"]",
"[",
"0",
"]",
"else",
":",
"data",
"=",
"batch",
".",
"__dict__",
"[",
"side",
"]",
"feat_start",
"=",
"side",
"+",
"\"_feat_\"",
"keys",
"=",
"sorted",
"(",
"[",
"k",
"for",
"k",
"in",
"batch",
".",
"__dict__",
"if",
"feat_start",
"in",
"k",
"]",
")",
"features",
"=",
"[",
"batch",
".",
"__dict__",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"]",
"levels",
"=",
"[",
"data",
"]",
"+",
"features",
"if",
"data_type",
"==",
"'text'",
":",
"return",
"torch",
".",
"cat",
"(",
"[",
"level",
".",
"unsqueeze",
"(",
"2",
")",
"for",
"level",
"in",
"levels",
"]",
",",
"2",
")",
"else",
":",
"return",
"levels",
"[",
"0",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L135-L160 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | collect_features | (fields, side="src") | return feats | Collect features from Field object. | Collect features from Field object. | [
"Collect",
"features",
"from",
"Field",
"object",
"."
] | def collect_features(fields, side="src"):
"""
Collect features from Field object.
"""
assert side in ["src", "tgt"]
feats = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feats.append(key)
return feats | [
"def",
"collect_features",
"(",
"fields",
",",
"side",
"=",
"\"src\"",
")",
":",
"assert",
"side",
"in",
"[",
"\"src\"",
",",
"\"tgt\"",
"]",
"feats",
"=",
"[",
"]",
"for",
"j",
"in",
"count",
"(",
")",
":",
"key",
"=",
"side",
"+",
"\"_feat_\"",
"+",
"str",
"(",
"j",
")",
"if",
"key",
"not",
"in",
"fields",
":",
"break",
"feats",
".",
"append",
"(",
"key",
")",
"return",
"feats"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L163-L174 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | collect_feature_vocabs | (fields, side) | return feature_vocabs | Collect feature Vocab objects from Field object. | Collect feature Vocab objects from Field object. | [
"Collect",
"feature",
"Vocab",
"objects",
"from",
"Field",
"object",
"."
] | def collect_feature_vocabs(fields, side):
"""
Collect feature Vocab objects from Field object.
"""
assert side in ['src', 'tgt']
feature_vocabs = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feature_vocabs.append(fields[key].vocab)
return feature_vocabs | [
"def",
"collect_feature_vocabs",
"(",
"fields",
",",
"side",
")",
":",
"assert",
"side",
"in",
"[",
"'src'",
",",
"'tgt'",
"]",
"feature_vocabs",
"=",
"[",
"]",
"for",
"j",
"in",
"count",
"(",
")",
":",
"key",
"=",
"side",
"+",
"\"_feat_\"",
"+",
"str",
"(",
"j",
")",
"if",
"key",
"not",
"in",
"fields",
":",
"break",
"feature_vocabs",
".",
"append",
"(",
"fields",
"[",
"key",
"]",
".",
"vocab",
")",
"return",
"feature_vocabs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L177-L188 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | build_dataset | (fields, data_type, src_data_iter=None, src_path=None,
src_dir=None, tgt_data_iter=None, tgt_path=None,
src_seq_length=0, tgt_seq_length=0,
src_seq_length_trunc=0, tgt_seq_length_trunc=0,
dynamic_dict=True, sample_rate=0,
window_size=0, window_stride=0, window=None,
normalize_audio=True, use_filter_pred=True,
image_channel_size=3) | return dataset | Build src/tgt examples iterator from corpus files, also extract
number of features. | Build src/tgt examples iterator from corpus files, also extract
number of features. | [
"Build",
"src",
"/",
"tgt",
"examples",
"iterator",
"from",
"corpus",
"files",
"also",
"extract",
"number",
"of",
"features",
"."
] | def build_dataset(fields, data_type, src_data_iter=None, src_path=None,
src_dir=None, tgt_data_iter=None, tgt_path=None,
src_seq_length=0, tgt_seq_length=0,
src_seq_length_trunc=0, tgt_seq_length_trunc=0,
dynamic_dict=True, sample_rate=0,
window_size=0, window_stride=0, window=None,
normalize_audio=True, use_filter_pred=True,
image_channel_size=3):
"""
Build src/tgt examples iterator from corpus files, also extract
number of features.
"""
def _make_examples_nfeats_tpl(data_type, src_data_iter, src_path, src_dir,
src_seq_length_trunc, sample_rate,
window_size, window_stride,
window, normalize_audio,
image_channel_size=3):
"""
Process the corpus into (example_dict iterator, num_feats) tuple
on source side for different 'data_type'.
"""
if data_type == 'text':
src_examples_iter, num_src_feats = \
TextDataset.make_text_examples_nfeats_tpl(
src_data_iter, src_path, src_seq_length_trunc, "src")
elif data_type == 'img':
src_examples_iter, num_src_feats = \
ImageDataset.make_image_examples_nfeats_tpl(
src_data_iter, src_path, src_dir, image_channel_size)
elif data_type == 'audio':
if src_data_iter:
raise ValueError("""Data iterator for AudioDataset isn't
implemented""")
if src_path is None:
raise ValueError("AudioDataset requires a non None path")
src_examples_iter, num_src_feats = \
AudioDataset.make_audio_examples_nfeats_tpl(
src_path, src_dir, sample_rate,
window_size, window_stride, window,
normalize_audio)
return src_examples_iter, num_src_feats
src_examples_iter, num_src_feats = \
_make_examples_nfeats_tpl(data_type, src_data_iter, src_path, src_dir,
src_seq_length_trunc, sample_rate,
window_size, window_stride,
window, normalize_audio,
image_channel_size=image_channel_size)
# For all data types, the tgt side corpus is in form of text.
tgt_examples_iter, num_tgt_feats = \
TextDataset.make_text_examples_nfeats_tpl(
tgt_data_iter, tgt_path, tgt_seq_length_trunc, "tgt")
if data_type == 'text':
dataset = TextDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
src_seq_length=src_seq_length,
tgt_seq_length=tgt_seq_length,
dynamic_dict=dynamic_dict,
use_filter_pred=use_filter_pred)
elif data_type == 'img':
dataset = ImageDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
tgt_seq_length=tgt_seq_length,
use_filter_pred=use_filter_pred,
image_channel_size=image_channel_size)
elif data_type == 'audio':
dataset = AudioDataset(fields, src_examples_iter, tgt_examples_iter,
num_src_feats, num_tgt_feats,
tgt_seq_length=tgt_seq_length,
sample_rate=sample_rate,
window_size=window_size,
window_stride=window_stride,
window=window,
normalize_audio=normalize_audio,
use_filter_pred=use_filter_pred)
return dataset | [
"def",
"build_dataset",
"(",
"fields",
",",
"data_type",
",",
"src_data_iter",
"=",
"None",
",",
"src_path",
"=",
"None",
",",
"src_dir",
"=",
"None",
",",
"tgt_data_iter",
"=",
"None",
",",
"tgt_path",
"=",
"None",
",",
"src_seq_length",
"=",
"0",
",",
"tgt_seq_length",
"=",
"0",
",",
"src_seq_length_trunc",
"=",
"0",
",",
"tgt_seq_length_trunc",
"=",
"0",
",",
"dynamic_dict",
"=",
"True",
",",
"sample_rate",
"=",
"0",
",",
"window_size",
"=",
"0",
",",
"window_stride",
"=",
"0",
",",
"window",
"=",
"None",
",",
"normalize_audio",
"=",
"True",
",",
"use_filter_pred",
"=",
"True",
",",
"image_channel_size",
"=",
"3",
")",
":",
"def",
"_make_examples_nfeats_tpl",
"(",
"data_type",
",",
"src_data_iter",
",",
"src_path",
",",
"src_dir",
",",
"src_seq_length_trunc",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"image_channel_size",
"=",
"3",
")",
":",
"\"\"\"\n Process the corpus into (example_dict iterator, num_feats) tuple\n on source side for different 'data_type'.\n \"\"\"",
"if",
"data_type",
"==",
"'text'",
":",
"src_examples_iter",
",",
"num_src_feats",
"=",
"TextDataset",
".",
"make_text_examples_nfeats_tpl",
"(",
"src_data_iter",
",",
"src_path",
",",
"src_seq_length_trunc",
",",
"\"src\"",
")",
"elif",
"data_type",
"==",
"'img'",
":",
"src_examples_iter",
",",
"num_src_feats",
"=",
"ImageDataset",
".",
"make_image_examples_nfeats_tpl",
"(",
"src_data_iter",
",",
"src_path",
",",
"src_dir",
",",
"image_channel_size",
")",
"elif",
"data_type",
"==",
"'audio'",
":",
"if",
"src_data_iter",
":",
"raise",
"ValueError",
"(",
"\"\"\"Data iterator for AudioDataset isn't\n implemented\"\"\"",
")",
"if",
"src_path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"AudioDataset requires a non None path\"",
")",
"src_examples_iter",
",",
"num_src_feats",
"=",
"AudioDataset",
".",
"make_audio_examples_nfeats_tpl",
"(",
"src_path",
",",
"src_dir",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
")",
"return",
"src_examples_iter",
",",
"num_src_feats",
"src_examples_iter",
",",
"num_src_feats",
"=",
"_make_examples_nfeats_tpl",
"(",
"data_type",
",",
"src_data_iter",
",",
"src_path",
",",
"src_dir",
",",
"src_seq_length_trunc",
",",
"sample_rate",
",",
"window_size",
",",
"window_stride",
",",
"window",
",",
"normalize_audio",
",",
"image_channel_size",
"=",
"image_channel_size",
")",
"# For all data types, the tgt side corpus is in form of text.",
"tgt_examples_iter",
",",
"num_tgt_feats",
"=",
"TextDataset",
".",
"make_text_examples_nfeats_tpl",
"(",
"tgt_data_iter",
",",
"tgt_path",
",",
"tgt_seq_length_trunc",
",",
"\"tgt\"",
")",
"if",
"data_type",
"==",
"'text'",
":",
"dataset",
"=",
"TextDataset",
"(",
"fields",
",",
"src_examples_iter",
",",
"tgt_examples_iter",
",",
"num_src_feats",
",",
"num_tgt_feats",
",",
"src_seq_length",
"=",
"src_seq_length",
",",
"tgt_seq_length",
"=",
"tgt_seq_length",
",",
"dynamic_dict",
"=",
"dynamic_dict",
",",
"use_filter_pred",
"=",
"use_filter_pred",
")",
"elif",
"data_type",
"==",
"'img'",
":",
"dataset",
"=",
"ImageDataset",
"(",
"fields",
",",
"src_examples_iter",
",",
"tgt_examples_iter",
",",
"num_src_feats",
",",
"num_tgt_feats",
",",
"tgt_seq_length",
"=",
"tgt_seq_length",
",",
"use_filter_pred",
"=",
"use_filter_pred",
",",
"image_channel_size",
"=",
"image_channel_size",
")",
"elif",
"data_type",
"==",
"'audio'",
":",
"dataset",
"=",
"AudioDataset",
"(",
"fields",
",",
"src_examples_iter",
",",
"tgt_examples_iter",
",",
"num_src_feats",
",",
"num_tgt_feats",
",",
"tgt_seq_length",
"=",
"tgt_seq_length",
",",
"sample_rate",
"=",
"sample_rate",
",",
"window_size",
"=",
"window_size",
",",
"window_stride",
"=",
"window_stride",
",",
"window",
"=",
"window",
",",
"normalize_audio",
"=",
"normalize_audio",
",",
"use_filter_pred",
"=",
"use_filter_pred",
")",
"return",
"dataset"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L191-L277 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | build_vocab | (train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency) | return fields | Args:
train_dataset_files: a list of train dataset pt file.
fields (dict): fields to build vocab for.
data_type: "text", "img" or "audio"?
share_vocab(bool): share source and target vocabulary?
src_vocab_path(string): Path to src vocabulary file.
src_vocab_size(int): size of the source vocabulary.
src_words_min_frequency(int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path(string): Path to tgt vocabulary file.
tgt_vocab_size(int): size of the target vocabulary.
tgt_words_min_frequency(int): the minimum frequency needed to
include a target word in the vocabulary.
Returns:
Dict of Fields | Args:
train_dataset_files: a list of train dataset pt file.
fields (dict): fields to build vocab for.
data_type: "text", "img" or "audio"?
share_vocab(bool): share source and target vocabulary?
src_vocab_path(string): Path to src vocabulary file.
src_vocab_size(int): size of the source vocabulary.
src_words_min_frequency(int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path(string): Path to tgt vocabulary file.
tgt_vocab_size(int): size of the target vocabulary.
tgt_words_min_frequency(int): the minimum frequency needed to
include a target word in the vocabulary. | [
"Args",
":",
"train_dataset_files",
":",
"a",
"list",
"of",
"train",
"dataset",
"pt",
"file",
".",
"fields",
"(",
"dict",
")",
":",
"fields",
"to",
"build",
"vocab",
"for",
".",
"data_type",
":",
"text",
"img",
"or",
"audio",
"?",
"share_vocab",
"(",
"bool",
")",
":",
"share",
"source",
"and",
"target",
"vocabulary?",
"src_vocab_path",
"(",
"string",
")",
":",
"Path",
"to",
"src",
"vocabulary",
"file",
".",
"src_vocab_size",
"(",
"int",
")",
":",
"size",
"of",
"the",
"source",
"vocabulary",
".",
"src_words_min_frequency",
"(",
"int",
")",
":",
"the",
"minimum",
"frequency",
"needed",
"to",
"include",
"a",
"source",
"word",
"in",
"the",
"vocabulary",
".",
"tgt_vocab_path",
"(",
"string",
")",
":",
"Path",
"to",
"tgt",
"vocabulary",
"file",
".",
"tgt_vocab_size",
"(",
"int",
")",
":",
"size",
"of",
"the",
"target",
"vocabulary",
".",
"tgt_words_min_frequency",
"(",
"int",
")",
":",
"the",
"minimum",
"frequency",
"needed",
"to",
"include",
"a",
"target",
"word",
"in",
"the",
"vocabulary",
"."
] | def build_vocab(train_dataset_files, fields, data_type, share_vocab,
src_vocab_path, src_vocab_size, src_words_min_frequency,
tgt_vocab_path, tgt_vocab_size, tgt_words_min_frequency):
"""
Args:
train_dataset_files: a list of train dataset pt file.
fields (dict): fields to build vocab for.
data_type: "text", "img" or "audio"?
share_vocab(bool): share source and target vocabulary?
src_vocab_path(string): Path to src vocabulary file.
src_vocab_size(int): size of the source vocabulary.
src_words_min_frequency(int): the minimum frequency needed to
include a source word in the vocabulary.
tgt_vocab_path(string): Path to tgt vocabulary file.
tgt_vocab_size(int): size of the target vocabulary.
tgt_words_min_frequency(int): the minimum frequency needed to
include a target word in the vocabulary.
Returns:
Dict of Fields
"""
counter = {}
# Prop src from field to get lower memory using when training with image
if data_type == 'img':
fields.pop("src")
for k in fields:
counter[k] = Counter()
# Load vocabulary
src_vocab = load_vocabulary(src_vocab_path, tag="source")
tgt_vocab = load_vocabulary(tgt_vocab_path, tag="target")
for index, path in enumerate(train_dataset_files):
dataset = torch.load(path)
logger.info(" * reloading %s." % path)
for ex in dataset.examples:
for k in fields:
val = getattr(ex, k, None)
if val is not None and not fields[k].sequential:
val = [val]
elif k == 'src' and src_vocab:
val = [item for item in val if item in src_vocab]
elif k == 'tgt' and tgt_vocab:
val = [item for item in val if item in tgt_vocab]
counter[k].update(val)
# Drop the none-using from memory but keep the last
if (index < len(train_dataset_files) - 1):
dataset.examples = None
gc.collect()
del dataset.examples
gc.collect()
del dataset
gc.collect()
_build_field_vocab(fields["tgt"], counter["tgt"],
max_size=tgt_vocab_size,
min_freq=tgt_words_min_frequency)
logger.info(" * tgt vocab size: %d." % len(fields["tgt"].vocab))
# All datasets have same num of n_tgt_features,
# getting the last one is OK.
for j in range(dataset.n_tgt_feats):
key = "tgt_feat_" + str(j)
_build_field_vocab(fields[key], counter[key])
logger.info(" * %s vocab size: %d." % (key,
len(fields[key].vocab)))
if data_type == 'text':
_build_field_vocab(fields["src"], counter["src"],
max_size=src_vocab_size,
min_freq=src_words_min_frequency)
logger.info(" * src vocab size: %d." % len(fields["src"].vocab))
# All datasets have same num of n_src_features,
# getting the last one is OK.
for j in range(dataset.n_src_feats):
key = "src_feat_" + str(j)
_build_field_vocab(fields[key], counter[key])
logger.info(" * %s vocab size: %d." %
(key, len(fields[key].vocab)))
# Merge the input and output vocabularies.
if share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
logger.info(" * merging src and tgt vocab...")
merged_vocab = merge_vocabs(
[fields["src"].vocab, fields["tgt"].vocab],
vocab_size=src_vocab_size)
fields["src"].vocab = merged_vocab
fields["tgt"].vocab = merged_vocab
return fields | [
"def",
"build_vocab",
"(",
"train_dataset_files",
",",
"fields",
",",
"data_type",
",",
"share_vocab",
",",
"src_vocab_path",
",",
"src_vocab_size",
",",
"src_words_min_frequency",
",",
"tgt_vocab_path",
",",
"tgt_vocab_size",
",",
"tgt_words_min_frequency",
")",
":",
"counter",
"=",
"{",
"}",
"# Prop src from field to get lower memory using when training with image",
"if",
"data_type",
"==",
"'img'",
":",
"fields",
".",
"pop",
"(",
"\"src\"",
")",
"for",
"k",
"in",
"fields",
":",
"counter",
"[",
"k",
"]",
"=",
"Counter",
"(",
")",
"# Load vocabulary",
"src_vocab",
"=",
"load_vocabulary",
"(",
"src_vocab_path",
",",
"tag",
"=",
"\"source\"",
")",
"tgt_vocab",
"=",
"load_vocabulary",
"(",
"tgt_vocab_path",
",",
"tag",
"=",
"\"target\"",
")",
"for",
"index",
",",
"path",
"in",
"enumerate",
"(",
"train_dataset_files",
")",
":",
"dataset",
"=",
"torch",
".",
"load",
"(",
"path",
")",
"logger",
".",
"info",
"(",
"\" * reloading %s.\"",
"%",
"path",
")",
"for",
"ex",
"in",
"dataset",
".",
"examples",
":",
"for",
"k",
"in",
"fields",
":",
"val",
"=",
"getattr",
"(",
"ex",
",",
"k",
",",
"None",
")",
"if",
"val",
"is",
"not",
"None",
"and",
"not",
"fields",
"[",
"k",
"]",
".",
"sequential",
":",
"val",
"=",
"[",
"val",
"]",
"elif",
"k",
"==",
"'src'",
"and",
"src_vocab",
":",
"val",
"=",
"[",
"item",
"for",
"item",
"in",
"val",
"if",
"item",
"in",
"src_vocab",
"]",
"elif",
"k",
"==",
"'tgt'",
"and",
"tgt_vocab",
":",
"val",
"=",
"[",
"item",
"for",
"item",
"in",
"val",
"if",
"item",
"in",
"tgt_vocab",
"]",
"counter",
"[",
"k",
"]",
".",
"update",
"(",
"val",
")",
"# Drop the none-using from memory but keep the last",
"if",
"(",
"index",
"<",
"len",
"(",
"train_dataset_files",
")",
"-",
"1",
")",
":",
"dataset",
".",
"examples",
"=",
"None",
"gc",
".",
"collect",
"(",
")",
"del",
"dataset",
".",
"examples",
"gc",
".",
"collect",
"(",
")",
"del",
"dataset",
"gc",
".",
"collect",
"(",
")",
"_build_field_vocab",
"(",
"fields",
"[",
"\"tgt\"",
"]",
",",
"counter",
"[",
"\"tgt\"",
"]",
",",
"max_size",
"=",
"tgt_vocab_size",
",",
"min_freq",
"=",
"tgt_words_min_frequency",
")",
"logger",
".",
"info",
"(",
"\" * tgt vocab size: %d.\"",
"%",
"len",
"(",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
")",
")",
"# All datasets have same num of n_tgt_features,",
"# getting the last one is OK.",
"for",
"j",
"in",
"range",
"(",
"dataset",
".",
"n_tgt_feats",
")",
":",
"key",
"=",
"\"tgt_feat_\"",
"+",
"str",
"(",
"j",
")",
"_build_field_vocab",
"(",
"fields",
"[",
"key",
"]",
",",
"counter",
"[",
"key",
"]",
")",
"logger",
".",
"info",
"(",
"\" * %s vocab size: %d.\"",
"%",
"(",
"key",
",",
"len",
"(",
"fields",
"[",
"key",
"]",
".",
"vocab",
")",
")",
")",
"if",
"data_type",
"==",
"'text'",
":",
"_build_field_vocab",
"(",
"fields",
"[",
"\"src\"",
"]",
",",
"counter",
"[",
"\"src\"",
"]",
",",
"max_size",
"=",
"src_vocab_size",
",",
"min_freq",
"=",
"src_words_min_frequency",
")",
"logger",
".",
"info",
"(",
"\" * src vocab size: %d.\"",
"%",
"len",
"(",
"fields",
"[",
"\"src\"",
"]",
".",
"vocab",
")",
")",
"# All datasets have same num of n_src_features,",
"# getting the last one is OK.",
"for",
"j",
"in",
"range",
"(",
"dataset",
".",
"n_src_feats",
")",
":",
"key",
"=",
"\"src_feat_\"",
"+",
"str",
"(",
"j",
")",
"_build_field_vocab",
"(",
"fields",
"[",
"key",
"]",
",",
"counter",
"[",
"key",
"]",
")",
"logger",
".",
"info",
"(",
"\" * %s vocab size: %d.\"",
"%",
"(",
"key",
",",
"len",
"(",
"fields",
"[",
"key",
"]",
".",
"vocab",
")",
")",
")",
"# Merge the input and output vocabularies.",
"if",
"share_vocab",
":",
"# `tgt_vocab_size` is ignored when sharing vocabularies",
"logger",
".",
"info",
"(",
"\" * merging src and tgt vocab...\"",
")",
"merged_vocab",
"=",
"merge_vocabs",
"(",
"[",
"fields",
"[",
"\"src\"",
"]",
".",
"vocab",
",",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
"]",
",",
"vocab_size",
"=",
"src_vocab_size",
")",
"fields",
"[",
"\"src\"",
"]",
".",
"vocab",
"=",
"merged_vocab",
"fields",
"[",
"\"tgt\"",
"]",
".",
"vocab",
"=",
"merged_vocab",
"return",
"fields"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L288-L382 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | load_vocabulary | (vocabulary_path, tag="") | return vocabulary | Loads a vocabulary from the given path.
:param vocabulary_path: path to load vocabulary from
:param tag: tag for vocabulary (only used for logging)
:return: vocabulary or None if path is null | Loads a vocabulary from the given path.
:param vocabulary_path: path to load vocabulary from
:param tag: tag for vocabulary (only used for logging)
:return: vocabulary or None if path is null | [
"Loads",
"a",
"vocabulary",
"from",
"the",
"given",
"path",
".",
":",
"param",
"vocabulary_path",
":",
"path",
"to",
"load",
"vocabulary",
"from",
":",
"param",
"tag",
":",
"tag",
"for",
"vocabulary",
"(",
"only",
"used",
"for",
"logging",
")",
":",
"return",
":",
"vocabulary",
"or",
"None",
"if",
"path",
"is",
"null"
] | def load_vocabulary(vocabulary_path, tag=""):
"""
Loads a vocabulary from the given path.
:param vocabulary_path: path to load vocabulary from
:param tag: tag for vocabulary (only used for logging)
:return: vocabulary or None if path is null
"""
vocabulary = None
if vocabulary_path:
vocabulary = set([])
logger.info("Loading {} vocabulary from {}".format(tag,
vocabulary_path))
if not os.path.exists(vocabulary_path):
raise RuntimeError(
"{} vocabulary not found at {}!".format(tag, vocabulary_path))
else:
with open(vocabulary_path) as f:
for line in f:
if len(line.strip()) == 0:
continue
word = line.strip().split()[0]
vocabulary.add(word)
return vocabulary | [
"def",
"load_vocabulary",
"(",
"vocabulary_path",
",",
"tag",
"=",
"\"\"",
")",
":",
"vocabulary",
"=",
"None",
"if",
"vocabulary_path",
":",
"vocabulary",
"=",
"set",
"(",
"[",
"]",
")",
"logger",
".",
"info",
"(",
"\"Loading {} vocabulary from {}\"",
".",
"format",
"(",
"tag",
",",
"vocabulary_path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"vocabulary_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"{} vocabulary not found at {}!\"",
".",
"format",
"(",
"tag",
",",
"vocabulary_path",
")",
")",
"else",
":",
"with",
"open",
"(",
"vocabulary_path",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"len",
"(",
"line",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"continue",
"word",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"vocabulary",
".",
"add",
"(",
"word",
")",
"return",
"vocabulary"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L385-L408 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | build_dataset_iter | (datasets, fields, opt, is_train=True) | return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn,
device, is_train) | This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too. | This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too. | [
"This",
"returns",
"user",
"-",
"defined",
"train",
"/",
"validate",
"data",
"iterator",
"for",
"the",
"trainer",
"to",
"iterate",
"over",
".",
"We",
"implement",
"simple",
"ordered",
"iterator",
"strategy",
"here",
"but",
"more",
"sophisticated",
"strategy",
"like",
"curriculum",
"learning",
"is",
"ok",
"too",
"."
] | def build_dataset_iter(datasets, fields, opt, is_train=True):
"""
This returns user-defined train/validate data iterator for the trainer
to iterate over. We implement simple ordered iterator strategy here,
but more sophisticated strategy like curriculum learning is ok too.
"""
batch_size = opt.batch_size if is_train else opt.valid_batch_size
if is_train and opt.batch_type == "tokens":
def batch_size_fn(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch, max_tgt_in_batch
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
# Src: <bos> w1 ... wN <eos>
max_src_in_batch = max(max_src_in_batch, len(new.src) + 2)
# Tgt: w1 ... wN <eos>
max_tgt_in_batch = max(max_tgt_in_batch, len(new.tgt) + 1)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
else:
batch_size_fn = None
if opt.gpu_ranks:
device = "cuda"
else:
device = "cpu"
return DatasetLazyIter(datasets, fields, batch_size, batch_size_fn,
device, is_train) | [
"def",
"build_dataset_iter",
"(",
"datasets",
",",
"fields",
",",
"opt",
",",
"is_train",
"=",
"True",
")",
":",
"batch_size",
"=",
"opt",
".",
"batch_size",
"if",
"is_train",
"else",
"opt",
".",
"valid_batch_size",
"if",
"is_train",
"and",
"opt",
".",
"batch_type",
"==",
"\"tokens\"",
":",
"def",
"batch_size_fn",
"(",
"new",
",",
"count",
",",
"sofar",
")",
":",
"\"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"",
"# Maintains the longest src and tgt length in the current batch",
"global",
"max_src_in_batch",
",",
"max_tgt_in_batch",
"# Reset current longest length at a new batch (count=1)",
"if",
"count",
"==",
"1",
":",
"max_src_in_batch",
"=",
"0",
"max_tgt_in_batch",
"=",
"0",
"# Src: <bos> w1 ... wN <eos>",
"max_src_in_batch",
"=",
"max",
"(",
"max_src_in_batch",
",",
"len",
"(",
"new",
".",
"src",
")",
"+",
"2",
")",
"# Tgt: w1 ... wN <eos>",
"max_tgt_in_batch",
"=",
"max",
"(",
"max_tgt_in_batch",
",",
"len",
"(",
"new",
".",
"tgt",
")",
"+",
"1",
")",
"src_elements",
"=",
"count",
"*",
"max_src_in_batch",
"tgt_elements",
"=",
"count",
"*",
"max_tgt_in_batch",
"return",
"max",
"(",
"src_elements",
",",
"tgt_elements",
")",
"else",
":",
"batch_size_fn",
"=",
"None",
"if",
"opt",
".",
"gpu_ranks",
":",
"device",
"=",
"\"cuda\"",
"else",
":",
"device",
"=",
"\"cpu\"",
"return",
"DatasetLazyIter",
"(",
"datasets",
",",
"fields",
",",
"batch_size",
",",
"batch_size_fn",
",",
"device",
",",
"is_train",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L506-L542 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | lazily_load_dataset | (corpus_type, opt) | Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded. | Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time. | [
"Dataset",
"generator",
".",
"Don",
"t",
"do",
"extra",
"stuff",
"here",
"like",
"printing",
"because",
"they",
"will",
"be",
"postponed",
"to",
"the",
"first",
"loading",
"time",
"."
] | def lazily_load_dataset(corpus_type, opt):
"""
Dataset generator. Don't do extra stuff here, like printing,
because they will be postponed to the first loading time.
Args:
corpus_type: 'train' or 'valid'
Returns:
A list of dataset, the dataset(s) are lazily loaded.
"""
assert corpus_type in ["train", "valid"]
def _lazy_dataset_loader(pt_file, corpus_type):
dataset = torch.load(pt_file)
# logger.info('Loading %s dataset from %s, number of examples: %d' %
# (corpus_type, pt_file, len(dataset)))
# import pdb;
# pdb.set_trace()
return dataset
# Sort the glob output by file name (by increasing indexes).
pts = sorted(glob.glob(opt.data + '.' + corpus_type + '.[0-9]*.pt'))
if pts:
for pt in pts:
yield _lazy_dataset_loader(pt, corpus_type)
else:
# Only one inputters.*Dataset, simple!
pt = opt.data + '.' + corpus_type + '.pt'
yield _lazy_dataset_loader(pt, corpus_type) | [
"def",
"lazily_load_dataset",
"(",
"corpus_type",
",",
"opt",
")",
":",
"assert",
"corpus_type",
"in",
"[",
"\"train\"",
",",
"\"valid\"",
"]",
"def",
"_lazy_dataset_loader",
"(",
"pt_file",
",",
"corpus_type",
")",
":",
"dataset",
"=",
"torch",
".",
"load",
"(",
"pt_file",
")",
"# logger.info('Loading %s dataset from %s, number of examples: %d' %",
"# (corpus_type, pt_file, len(dataset)))",
"# import pdb;",
"# pdb.set_trace()",
"return",
"dataset",
"# Sort the glob output by file name (by increasing indexes).",
"pts",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"opt",
".",
"data",
"+",
"'.'",
"+",
"corpus_type",
"+",
"'.[0-9]*.pt'",
")",
")",
"if",
"pts",
":",
"for",
"pt",
"in",
"pts",
":",
"yield",
"_lazy_dataset_loader",
"(",
"pt",
",",
"corpus_type",
")",
"else",
":",
"# Only one inputters.*Dataset, simple!",
"pt",
"=",
"opt",
".",
"data",
"+",
"'.'",
"+",
"corpus_type",
"+",
"'.pt'",
"yield",
"_lazy_dataset_loader",
"(",
"pt",
",",
"corpus_type",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L545-L575 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/inputters/inputter.py | python | OrderedIterator.create_batches | (self) | Create batches | Create batches | [
"Create",
"batches"
] | def create_batches(self):
""" Create batches """
if self.train:
def _pool(data, random_shuffler):
for p in torchtext.data.batch(data, self.batch_size * 100):
p_batch = torchtext.data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = _pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key)) | [
"def",
"create_batches",
"(",
"self",
")",
":",
"if",
"self",
".",
"train",
":",
"def",
"_pool",
"(",
"data",
",",
"random_shuffler",
")",
":",
"for",
"p",
"in",
"torchtext",
".",
"data",
".",
"batch",
"(",
"data",
",",
"self",
".",
"batch_size",
"*",
"100",
")",
":",
"p_batch",
"=",
"torchtext",
".",
"data",
".",
"batch",
"(",
"sorted",
"(",
"p",
",",
"key",
"=",
"self",
".",
"sort_key",
")",
",",
"self",
".",
"batch_size",
",",
"self",
".",
"batch_size_fn",
")",
"for",
"b",
"in",
"random_shuffler",
"(",
"list",
"(",
"p_batch",
")",
")",
":",
"yield",
"b",
"self",
".",
"batches",
"=",
"_pool",
"(",
"self",
".",
"data",
"(",
")",
",",
"self",
".",
"random_shuffler",
")",
"else",
":",
"self",
".",
"batches",
"=",
"[",
"]",
"for",
"b",
"in",
"torchtext",
".",
"data",
".",
"batch",
"(",
"self",
".",
"data",
"(",
")",
",",
"self",
".",
"batch_size",
",",
"self",
".",
"batch_size_fn",
")",
":",
"self",
".",
"batches",
".",
"append",
"(",
"sorted",
"(",
"b",
",",
"key",
"=",
"self",
".",
"sort_key",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/inputters/inputter.py#L414-L430 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | TranslationServer.start | (self, config_file) | Read the config file and pre-/load the models | Read the config file and pre-/load the models | [
"Read",
"the",
"config",
"file",
"and",
"pre",
"-",
"/",
"load",
"the",
"models"
] | def start(self, config_file):
"""Read the config file and pre-/load the models
"""
self.config_file = config_file
with open(self.config_file) as f:
self.confs = json.load(f)
self.models_root = self.confs.get('models_root', './available_models')
for i, conf in enumerate(self.confs["models"]):
if "models" not in conf:
if "model" in conf:
# backwards compatibility for confs
conf["models"] = [conf["model"]]
else:
raise ValueError("""Incorrect config file: missing 'models'
parameter for model #%d""" % i)
kwargs = {'timeout': conf.get('timeout', None),
'load': conf.get('load', None),
'tokenizer_opt': conf.get('tokenizer', None),
'on_timeout': conf.get('on_timeout', None),
'model_root': conf.get('model_root', self.models_root)
}
kwargs = {k: v for (k, v) in kwargs.items() if v is not None}
model_id = conf.get("id", None)
opt = conf["opt"]
opt["models"] = conf["models"]
self.preload_model(opt, model_id=model_id, **kwargs) | [
"def",
"start",
"(",
"self",
",",
"config_file",
")",
":",
"self",
".",
"config_file",
"=",
"config_file",
"with",
"open",
"(",
"self",
".",
"config_file",
")",
"as",
"f",
":",
"self",
".",
"confs",
"=",
"json",
".",
"load",
"(",
"f",
")",
"self",
".",
"models_root",
"=",
"self",
".",
"confs",
".",
"get",
"(",
"'models_root'",
",",
"'./available_models'",
")",
"for",
"i",
",",
"conf",
"in",
"enumerate",
"(",
"self",
".",
"confs",
"[",
"\"models\"",
"]",
")",
":",
"if",
"\"models\"",
"not",
"in",
"conf",
":",
"if",
"\"model\"",
"in",
"conf",
":",
"# backwards compatibility for confs",
"conf",
"[",
"\"models\"",
"]",
"=",
"[",
"conf",
"[",
"\"model\"",
"]",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"Incorrect config file: missing 'models'\n parameter for model #%d\"\"\"",
"%",
"i",
")",
"kwargs",
"=",
"{",
"'timeout'",
":",
"conf",
".",
"get",
"(",
"'timeout'",
",",
"None",
")",
",",
"'load'",
":",
"conf",
".",
"get",
"(",
"'load'",
",",
"None",
")",
",",
"'tokenizer_opt'",
":",
"conf",
".",
"get",
"(",
"'tokenizer'",
",",
"None",
")",
",",
"'on_timeout'",
":",
"conf",
".",
"get",
"(",
"'on_timeout'",
",",
"None",
")",
",",
"'model_root'",
":",
"conf",
".",
"get",
"(",
"'model_root'",
",",
"self",
".",
"models_root",
")",
"}",
"kwargs",
"=",
"{",
"k",
":",
"v",
"for",
"(",
"k",
",",
"v",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"v",
"is",
"not",
"None",
"}",
"model_id",
"=",
"conf",
".",
"get",
"(",
"\"id\"",
",",
"None",
")",
"opt",
"=",
"conf",
"[",
"\"opt\"",
"]",
"opt",
"[",
"\"models\"",
"]",
"=",
"conf",
"[",
"\"models\"",
"]",
"self",
".",
"preload_model",
"(",
"opt",
",",
"model_id",
"=",
"model_id",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L54-L80 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | TranslationServer.clone_model | (self, model_id, opt, timeout=-1) | Clone a model `model_id`.
Different options may be passed. If `opt` is None, it will use the
same set of options | Clone a model `model_id`.
Different options may be passed. If `opt` is None, it will use the
same set of options | [
"Clone",
"a",
"model",
"model_id",
".",
"Different",
"options",
"may",
"be",
"passed",
".",
"If",
"opt",
"is",
"None",
"it",
"will",
"use",
"the",
"same",
"set",
"of",
"options"
] | def clone_model(self, model_id, opt, timeout=-1):
"""Clone a model `model_id`.
Different options may be passed. If `opt` is None, it will use the
same set of options
"""
if model_id in self.models:
if opt is None:
opt = self.models[model_id].user_opt
opt["models"] = self.models[model_id].opt.models
return self.load_model(opt, timeout)
else:
raise ServerModelError("No such model '%s'" % str(model_id)) | [
"def",
"clone_model",
"(",
"self",
",",
"model_id",
",",
"opt",
",",
"timeout",
"=",
"-",
"1",
")",
":",
"if",
"model_id",
"in",
"self",
".",
"models",
":",
"if",
"opt",
"is",
"None",
":",
"opt",
"=",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"user_opt",
"opt",
"[",
"\"models\"",
"]",
"=",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"opt",
".",
"models",
"return",
"self",
".",
"load_model",
"(",
"opt",
",",
"timeout",
")",
"else",
":",
"raise",
"ServerModelError",
"(",
"\"No such model '%s'\"",
"%",
"str",
"(",
"model_id",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L82-L93 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | TranslationServer.load_model | (self, opt, model_id=None, **model_kwargs) | return model_id, load_time | Loading a model given a set of options | Loading a model given a set of options | [
"Loading",
"a",
"model",
"given",
"a",
"set",
"of",
"options"
] | def load_model(self, opt, model_id=None, **model_kwargs):
"""Loading a model given a set of options
"""
model_id = self.preload_model(opt, model_id=model_id, **model_kwargs)
load_time = self.models[model_id].load_time
return model_id, load_time | [
"def",
"load_model",
"(",
"self",
",",
"opt",
",",
"model_id",
"=",
"None",
",",
"*",
"*",
"model_kwargs",
")",
":",
"model_id",
"=",
"self",
".",
"preload_model",
"(",
"opt",
",",
"model_id",
"=",
"model_id",
",",
"*",
"*",
"model_kwargs",
")",
"load_time",
"=",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"load_time",
"return",
"model_id",
",",
"load_time"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L95-L101 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | TranslationServer.preload_model | (self, opt, model_id=None, **model_kwargs) | return model_id | Preloading the model: updating internal datastructure
It will effectively load the model if `load` is set | Preloading the model: updating internal datastructure
It will effectively load the model if `load` is set | [
"Preloading",
"the",
"model",
":",
"updating",
"internal",
"datastructure",
"It",
"will",
"effectively",
"load",
"the",
"model",
"if",
"load",
"is",
"set"
] | def preload_model(self, opt, model_id=None, **model_kwargs):
"""Preloading the model: updating internal datastructure
It will effectively load the model if `load` is set
"""
if model_id is not None:
if model_id in self.models.keys():
raise ValueError("Model ID %d already exists" % model_id)
else:
model_id = self.next_id
while model_id in self.models.keys():
model_id += 1
self.next_id = model_id + 1
print("Pre-loading model %d" % model_id)
model = ServerModel(opt, model_id, **model_kwargs)
self.models[model_id] = model
return model_id | [
"def",
"preload_model",
"(",
"self",
",",
"opt",
",",
"model_id",
"=",
"None",
",",
"*",
"*",
"model_kwargs",
")",
":",
"if",
"model_id",
"is",
"not",
"None",
":",
"if",
"model_id",
"in",
"self",
".",
"models",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Model ID %d already exists\"",
"%",
"model_id",
")",
"else",
":",
"model_id",
"=",
"self",
".",
"next_id",
"while",
"model_id",
"in",
"self",
".",
"models",
".",
"keys",
"(",
")",
":",
"model_id",
"+=",
"1",
"self",
".",
"next_id",
"=",
"model_id",
"+",
"1",
"print",
"(",
"\"Pre-loading model %d\"",
"%",
"model_id",
")",
"model",
"=",
"ServerModel",
"(",
"opt",
",",
"model_id",
",",
"*",
"*",
"model_kwargs",
")",
"self",
".",
"models",
"[",
"model_id",
"]",
"=",
"model",
"return",
"model_id"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L103-L119 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | TranslationServer.run | (self, inputs) | Translate `inputs`
We keep the same format as the Lua version i.e.
[{"id": model_id, "src": "sequence to translate"},{ ...}]
We use inputs[0]["id"] as the model id | Translate `inputs`
We keep the same format as the Lua version i.e.
[{"id": model_id, "src": "sequence to translate"},{ ...}] | [
"Translate",
"inputs",
"We",
"keep",
"the",
"same",
"format",
"as",
"the",
"Lua",
"version",
"i",
".",
"e",
".",
"[",
"{",
"id",
":",
"model_id",
"src",
":",
"sequence",
"to",
"translate",
"}",
"{",
"...",
"}",
"]"
] | def run(self, inputs):
"""Translate `inputs`
We keep the same format as the Lua version i.e.
[{"id": model_id, "src": "sequence to translate"},{ ...}]
We use inputs[0]["id"] as the model id
"""
model_id = inputs[0].get("id", 0)
if model_id in self.models and self.models[model_id] is not None:
return self.models[model_id].run(inputs)
else:
print("Error No such model '%s'" % str(model_id))
raise ServerModelError("No such model '%s'" % str(model_id)) | [
"def",
"run",
"(",
"self",
",",
"inputs",
")",
":",
"model_id",
"=",
"inputs",
"[",
"0",
"]",
".",
"get",
"(",
"\"id\"",
",",
"0",
")",
"if",
"model_id",
"in",
"self",
".",
"models",
"and",
"self",
".",
"models",
"[",
"model_id",
"]",
"is",
"not",
"None",
":",
"return",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"run",
"(",
"inputs",
")",
"else",
":",
"print",
"(",
"\"Error No such model '%s'\"",
"%",
"str",
"(",
"model_id",
")",
")",
"raise",
"ServerModelError",
"(",
"\"No such model '%s'\"",
"%",
"str",
"(",
"model_id",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L121-L133 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | TranslationServer.unload_model | (self, model_id) | Manually unload a model.
It will free the memory and cancel the timer | Manually unload a model.
It will free the memory and cancel the timer | [
"Manually",
"unload",
"a",
"model",
".",
"It",
"will",
"free",
"the",
"memory",
"and",
"cancel",
"the",
"timer"
] | def unload_model(self, model_id):
"""Manually unload a model.
It will free the memory and cancel the timer
"""
if model_id in self.models and self.models[model_id] is not None:
self.models[model_id].unload()
else:
raise ServerModelError("No such model '%s'" % str(model_id)) | [
"def",
"unload_model",
"(",
"self",
",",
"model_id",
")",
":",
"if",
"model_id",
"in",
"self",
".",
"models",
"and",
"self",
".",
"models",
"[",
"model_id",
"]",
"is",
"not",
"None",
":",
"self",
".",
"models",
"[",
"model_id",
"]",
".",
"unload",
"(",
")",
"else",
":",
"raise",
"ServerModelError",
"(",
"\"No such model '%s'\"",
"%",
"str",
"(",
"model_id",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L135-L142 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | TranslationServer.list_models | (self) | return models | Return the list of available models | Return the list of available models | [
"Return",
"the",
"list",
"of",
"available",
"models"
] | def list_models(self):
"""Return the list of available models
"""
models = []
for _, model in self.models.items():
models += [model.to_dict()]
return models | [
"def",
"list_models",
"(",
"self",
")",
":",
"models",
"=",
"[",
"]",
"for",
"_",
",",
"model",
"in",
"self",
".",
"models",
".",
"items",
"(",
")",
":",
"models",
"+=",
"[",
"model",
".",
"to_dict",
"(",
")",
"]",
"return",
"models"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L144-L150 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.__init__ | (self, opt, model_id, tokenizer_opt=None, load=False,
timeout=-1, on_timeout="to_cpu", model_root="./") | Args:
opt: (dict) options for the Translator
model_id: (int) model id
tokenizer_opt: (dict) options for the tokenizer or None
load: (bool) whether to load the model during __init__
timeout: (int) seconds before running `do_timeout`
Negative values means no timeout
on_timeout: (str) in ["to_cpu", "unload"] set what to do on
timeout (see function `do_timeout`)
model_root: (str) path to the model directory
it must contain de model and tokenizer file | Args:
opt: (dict) options for the Translator
model_id: (int) model id
tokenizer_opt: (dict) options for the tokenizer or None
load: (bool) whether to load the model during __init__
timeout: (int) seconds before running `do_timeout`
Negative values means no timeout
on_timeout: (str) in ["to_cpu", "unload"] set what to do on
timeout (see function `do_timeout`)
model_root: (str) path to the model directory
it must contain de model and tokenizer file | [
"Args",
":",
"opt",
":",
"(",
"dict",
")",
"options",
"for",
"the",
"Translator",
"model_id",
":",
"(",
"int",
")",
"model",
"id",
"tokenizer_opt",
":",
"(",
"dict",
")",
"options",
"for",
"the",
"tokenizer",
"or",
"None",
"load",
":",
"(",
"bool",
")",
"whether",
"to",
"load",
"the",
"model",
"during",
"__init__",
"timeout",
":",
"(",
"int",
")",
"seconds",
"before",
"running",
"do_timeout",
"Negative",
"values",
"means",
"no",
"timeout",
"on_timeout",
":",
"(",
"str",
")",
"in",
"[",
"to_cpu",
"unload",
"]",
"set",
"what",
"to",
"do",
"on",
"timeout",
"(",
"see",
"function",
"do_timeout",
")",
"model_root",
":",
"(",
"str",
")",
"path",
"to",
"the",
"model",
"directory",
"it",
"must",
"contain",
"de",
"model",
"and",
"tokenizer",
"file"
] | def __init__(self, opt, model_id, tokenizer_opt=None, load=False,
timeout=-1, on_timeout="to_cpu", model_root="./"):
"""
Args:
opt: (dict) options for the Translator
model_id: (int) model id
tokenizer_opt: (dict) options for the tokenizer or None
load: (bool) whether to load the model during __init__
timeout: (int) seconds before running `do_timeout`
Negative values means no timeout
on_timeout: (str) in ["to_cpu", "unload"] set what to do on
timeout (see function `do_timeout`)
model_root: (str) path to the model directory
it must contain de model and tokenizer file
"""
self.model_root = model_root
self.opt = self.parse_opt(opt)
if self.opt.n_best > 1:
raise ValueError("Values of n_best > 1 are not supported")
self.model_id = model_id
self.tokenizer_opt = tokenizer_opt
self.timeout = timeout
self.on_timeout = on_timeout
self.unload_timer = None
self.user_opt = opt
self.tokenizer = None
self.logger = init_logger(self.opt.log_file)
self.loading_lock = threading.Event()
self.loading_lock.set()
if load:
self.load() | [
"def",
"__init__",
"(",
"self",
",",
"opt",
",",
"model_id",
",",
"tokenizer_opt",
"=",
"None",
",",
"load",
"=",
"False",
",",
"timeout",
"=",
"-",
"1",
",",
"on_timeout",
"=",
"\"to_cpu\"",
",",
"model_root",
"=",
"\"./\"",
")",
":",
"self",
".",
"model_root",
"=",
"model_root",
"self",
".",
"opt",
"=",
"self",
".",
"parse_opt",
"(",
"opt",
")",
"if",
"self",
".",
"opt",
".",
"n_best",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Values of n_best > 1 are not supported\"",
")",
"self",
".",
"model_id",
"=",
"model_id",
"self",
".",
"tokenizer_opt",
"=",
"tokenizer_opt",
"self",
".",
"timeout",
"=",
"timeout",
"self",
".",
"on_timeout",
"=",
"on_timeout",
"self",
".",
"unload_timer",
"=",
"None",
"self",
".",
"user_opt",
"=",
"opt",
"self",
".",
"tokenizer",
"=",
"None",
"self",
".",
"logger",
"=",
"init_logger",
"(",
"self",
".",
"opt",
".",
"log_file",
")",
"self",
".",
"loading_lock",
"=",
"threading",
".",
"Event",
"(",
")",
"self",
".",
"loading_lock",
".",
"set",
"(",
")",
"if",
"load",
":",
"self",
".",
"load",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L154-L188 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.parse_opt | (self, opt) | return opt | Parse the option set passed by the user using `onmt.opts`
Args:
opt: (dict) options passed by the user
Returns:
opt: (Namespace) full set of options for the Translator | Parse the option set passed by the user using `onmt.opts`
Args:
opt: (dict) options passed by the user | [
"Parse",
"the",
"option",
"set",
"passed",
"by",
"the",
"user",
"using",
"onmt",
".",
"opts",
"Args",
":",
"opt",
":",
"(",
"dict",
")",
"options",
"passed",
"by",
"the",
"user"
] | def parse_opt(self, opt):
"""Parse the option set passed by the user using `onmt.opts`
Args:
opt: (dict) options passed by the user
Returns:
opt: (Namespace) full set of options for the Translator
"""
prec_argv = sys.argv
sys.argv = sys.argv[:1]
parser = argparse.ArgumentParser()
onmt.opts.translate_opts(parser)
models = opt['models']
if not isinstance(models, (list, tuple)):
models = [models]
opt['models'] = [os.path.join(self.model_root, model)
for model in models]
opt['src'] = "dummy_src"
for (k, v) in opt.items():
if k == 'models':
sys.argv += ['-model']
sys.argv += [str(model) for model in v]
elif type(v) == bool:
sys.argv += ['-%s' % k]
else:
sys.argv += ['-%s' % k, str(v)]
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
sys.argv = prec_argv
return opt | [
"def",
"parse_opt",
"(",
"self",
",",
"opt",
")",
":",
"prec_argv",
"=",
"sys",
".",
"argv",
"sys",
".",
"argv",
"=",
"sys",
".",
"argv",
"[",
":",
"1",
"]",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"onmt",
".",
"opts",
".",
"translate_opts",
"(",
"parser",
")",
"models",
"=",
"opt",
"[",
"'models'",
"]",
"if",
"not",
"isinstance",
"(",
"models",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"models",
"=",
"[",
"models",
"]",
"opt",
"[",
"'models'",
"]",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"model_root",
",",
"model",
")",
"for",
"model",
"in",
"models",
"]",
"opt",
"[",
"'src'",
"]",
"=",
"\"dummy_src\"",
"for",
"(",
"k",
",",
"v",
")",
"in",
"opt",
".",
"items",
"(",
")",
":",
"if",
"k",
"==",
"'models'",
":",
"sys",
".",
"argv",
"+=",
"[",
"'-model'",
"]",
"sys",
".",
"argv",
"+=",
"[",
"str",
"(",
"model",
")",
"for",
"model",
"in",
"v",
"]",
"elif",
"type",
"(",
"v",
")",
"==",
"bool",
":",
"sys",
".",
"argv",
"+=",
"[",
"'-%s'",
"%",
"k",
"]",
"else",
":",
"sys",
".",
"argv",
"+=",
"[",
"'-%s'",
"%",
"k",
",",
"str",
"(",
"v",
")",
"]",
"opt",
"=",
"parser",
".",
"parse_args",
"(",
")",
"opt",
".",
"cuda",
"=",
"opt",
".",
"gpu",
">",
"-",
"1",
"sys",
".",
"argv",
"=",
"prec_argv",
"return",
"opt"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L190-L223 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.run | (self, inputs) | return results, scores, self.opt.n_best, timer.times | Translate `inputs` using this model
Args:
inputs: [{"src": "..."},{"src": ...}]
Returns:
result: (list) translations
times: (dict) containing times | Translate `inputs` using this model | [
"Translate",
"inputs",
"using",
"this",
"model"
] | def run(self, inputs):
"""Translate `inputs` using this model
Args:
inputs: [{"src": "..."},{"src": ...}]
Returns:
result: (list) translations
times: (dict) containing times
"""
self.stop_unload_timer()
timer = Timer()
timer.start()
self.logger.info("Running translation using %d" % self.model_id)
if not self.loading_lock.is_set():
self.logger.info(
"Model #%d is being loaded by another thread, waiting"
% self.model_id)
if not self.loading_lock.wait(timeout=30):
raise ServerModelError("Model %d loading timeout"
% self.model_id)
else:
if not self.loaded:
self.load()
timer.tick(name="load")
elif self.opt.cuda:
self.to_gpu()
timer.tick(name="to_gpu")
texts = []
head_spaces = []
tail_spaces = []
sslength = []
for i, inp in enumerate(inputs):
src = inp['src']
if src.strip() == "":
head_spaces.append(src)
texts.append("")
tail_spaces.append("")
else:
whitespaces_before, whitespaces_after = "", ""
match_before = re.search(r'^\s+', src)
match_after = re.search(r'\s+$', src)
if match_before is not None:
whitespaces_before = match_before.group(0)
if match_after is not None:
whitespaces_after = match_after.group(0)
head_spaces.append(whitespaces_before)
tok = self.maybe_tokenize(src.strip())
texts.append(tok)
sslength.append(len(tok.split()))
tail_spaces.append(whitespaces_after)
empty_indices = [i for i, x in enumerate(texts) if x == ""]
texts_to_translate = [x for x in texts if x != ""]
scores = []
predictions = []
if len(texts_to_translate) > 0:
try:
scores, predictions = self.translator.translate(
src_data_iter=texts_to_translate,
batch_size=self.opt.batch_size)
except RuntimeError as e:
raise ServerModelError("Runtime Error: %s" % str(e))
timer.tick(name="translation")
self.logger.info("""Using model #%d\t%d inputs
\ttranslation time: %f""" % (self.model_id, len(texts),
timer.times['translation']))
self.reset_unload_timer()
# NOTE: translator returns lists of `n_best` list
# we can ignore that (i.e. flatten lists) only because
# we restrict `n_best=1`
def flatten_list(_list): return sum(_list, [])
results = flatten_list(predictions)
scores = [score_tensor.item()
for score_tensor in flatten_list(scores)]
results = [self.maybe_detokenize(item)
for item in results]
# build back results with empty texts
for i in empty_indices:
results.insert(i, "")
scores.insert(i, 0)
results = ["".join(items)
for items in zip(head_spaces, results, tail_spaces)]
self.logger.info("Translation Results: %d", len(results))
return results, scores, self.opt.n_best, timer.times | [
"def",
"run",
"(",
"self",
",",
"inputs",
")",
":",
"self",
".",
"stop_unload_timer",
"(",
")",
"timer",
"=",
"Timer",
"(",
")",
"timer",
".",
"start",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Running translation using %d\"",
"%",
"self",
".",
"model_id",
")",
"if",
"not",
"self",
".",
"loading_lock",
".",
"is_set",
"(",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Model #%d is being loaded by another thread, waiting\"",
"%",
"self",
".",
"model_id",
")",
"if",
"not",
"self",
".",
"loading_lock",
".",
"wait",
"(",
"timeout",
"=",
"30",
")",
":",
"raise",
"ServerModelError",
"(",
"\"Model %d loading timeout\"",
"%",
"self",
".",
"model_id",
")",
"else",
":",
"if",
"not",
"self",
".",
"loaded",
":",
"self",
".",
"load",
"(",
")",
"timer",
".",
"tick",
"(",
"name",
"=",
"\"load\"",
")",
"elif",
"self",
".",
"opt",
".",
"cuda",
":",
"self",
".",
"to_gpu",
"(",
")",
"timer",
".",
"tick",
"(",
"name",
"=",
"\"to_gpu\"",
")",
"texts",
"=",
"[",
"]",
"head_spaces",
"=",
"[",
"]",
"tail_spaces",
"=",
"[",
"]",
"sslength",
"=",
"[",
"]",
"for",
"i",
",",
"inp",
"in",
"enumerate",
"(",
"inputs",
")",
":",
"src",
"=",
"inp",
"[",
"'src'",
"]",
"if",
"src",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"head_spaces",
".",
"append",
"(",
"src",
")",
"texts",
".",
"append",
"(",
"\"\"",
")",
"tail_spaces",
".",
"append",
"(",
"\"\"",
")",
"else",
":",
"whitespaces_before",
",",
"whitespaces_after",
"=",
"\"\"",
",",
"\"\"",
"match_before",
"=",
"re",
".",
"search",
"(",
"r'^\\s+'",
",",
"src",
")",
"match_after",
"=",
"re",
".",
"search",
"(",
"r'\\s+$'",
",",
"src",
")",
"if",
"match_before",
"is",
"not",
"None",
":",
"whitespaces_before",
"=",
"match_before",
".",
"group",
"(",
"0",
")",
"if",
"match_after",
"is",
"not",
"None",
":",
"whitespaces_after",
"=",
"match_after",
".",
"group",
"(",
"0",
")",
"head_spaces",
".",
"append",
"(",
"whitespaces_before",
")",
"tok",
"=",
"self",
".",
"maybe_tokenize",
"(",
"src",
".",
"strip",
"(",
")",
")",
"texts",
".",
"append",
"(",
"tok",
")",
"sslength",
".",
"append",
"(",
"len",
"(",
"tok",
".",
"split",
"(",
")",
")",
")",
"tail_spaces",
".",
"append",
"(",
"whitespaces_after",
")",
"empty_indices",
"=",
"[",
"i",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"texts",
")",
"if",
"x",
"==",
"\"\"",
"]",
"texts_to_translate",
"=",
"[",
"x",
"for",
"x",
"in",
"texts",
"if",
"x",
"!=",
"\"\"",
"]",
"scores",
"=",
"[",
"]",
"predictions",
"=",
"[",
"]",
"if",
"len",
"(",
"texts_to_translate",
")",
">",
"0",
":",
"try",
":",
"scores",
",",
"predictions",
"=",
"self",
".",
"translator",
".",
"translate",
"(",
"src_data_iter",
"=",
"texts_to_translate",
",",
"batch_size",
"=",
"self",
".",
"opt",
".",
"batch_size",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"raise",
"ServerModelError",
"(",
"\"Runtime Error: %s\"",
"%",
"str",
"(",
"e",
")",
")",
"timer",
".",
"tick",
"(",
"name",
"=",
"\"translation\"",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"\"\"Using model #%d\\t%d inputs\n \\ttranslation time: %f\"\"\"",
"%",
"(",
"self",
".",
"model_id",
",",
"len",
"(",
"texts",
")",
",",
"timer",
".",
"times",
"[",
"'translation'",
"]",
")",
")",
"self",
".",
"reset_unload_timer",
"(",
")",
"# NOTE: translator returns lists of `n_best` list",
"# we can ignore that (i.e. flatten lists) only because",
"# we restrict `n_best=1`",
"def",
"flatten_list",
"(",
"_list",
")",
":",
"return",
"sum",
"(",
"_list",
",",
"[",
"]",
")",
"results",
"=",
"flatten_list",
"(",
"predictions",
")",
"scores",
"=",
"[",
"score_tensor",
".",
"item",
"(",
")",
"for",
"score_tensor",
"in",
"flatten_list",
"(",
"scores",
")",
"]",
"results",
"=",
"[",
"self",
".",
"maybe_detokenize",
"(",
"item",
")",
"for",
"item",
"in",
"results",
"]",
"# build back results with empty texts",
"for",
"i",
"in",
"empty_indices",
":",
"results",
".",
"insert",
"(",
"i",
",",
"\"\"",
")",
"scores",
".",
"insert",
"(",
"i",
",",
"0",
")",
"results",
"=",
"[",
"\"\"",
".",
"join",
"(",
"items",
")",
"for",
"items",
"in",
"zip",
"(",
"head_spaces",
",",
"results",
",",
"tail_spaces",
")",
"]",
"self",
".",
"logger",
".",
"info",
"(",
"\"Translation Results: %d\"",
",",
"len",
"(",
"results",
")",
")",
"return",
"results",
",",
"scores",
",",
"self",
".",
"opt",
".",
"n_best",
",",
"timer",
".",
"times"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L286-L382 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.do_timeout | (self) | Timeout function that free GPU memory by moving the model to CPU
or unloading it; depending on `self.on_timemout` value | Timeout function that free GPU memory by moving the model to CPU
or unloading it; depending on `self.on_timemout` value | [
"Timeout",
"function",
"that",
"free",
"GPU",
"memory",
"by",
"moving",
"the",
"model",
"to",
"CPU",
"or",
"unloading",
"it",
";",
"depending",
"on",
"self",
".",
"on_timemout",
"value"
] | def do_timeout(self):
"""Timeout function that free GPU memory by moving the model to CPU
or unloading it; depending on `self.on_timemout` value
"""
if self.on_timeout == "unload":
self.logger.info("Timeout: unloading model %d" % self.model_id)
self.unload()
if self.on_timeout == "to_cpu":
self.logger.info("Timeout: sending model %d to CPU"
% self.model_id)
self.to_cpu() | [
"def",
"do_timeout",
"(",
"self",
")",
":",
"if",
"self",
".",
"on_timeout",
"==",
"\"unload\"",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Timeout: unloading model %d\"",
"%",
"self",
".",
"model_id",
")",
"self",
".",
"unload",
"(",
")",
"if",
"self",
".",
"on_timeout",
"==",
"\"to_cpu\"",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Timeout: sending model %d to CPU\"",
"%",
"self",
".",
"model_id",
")",
"self",
".",
"to_cpu",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L384-L394 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.to_cpu | (self) | Move the model to CPU and clear CUDA cache | Move the model to CPU and clear CUDA cache | [
"Move",
"the",
"model",
"to",
"CPU",
"and",
"clear",
"CUDA",
"cache"
] | def to_cpu(self):
"""Move the model to CPU and clear CUDA cache
"""
self.translator.model.cpu()
if self.opt.cuda:
torch.cuda.empty_cache() | [
"def",
"to_cpu",
"(",
"self",
")",
":",
"self",
".",
"translator",
".",
"model",
".",
"cpu",
"(",
")",
"if",
"self",
".",
"opt",
".",
"cuda",
":",
"torch",
".",
"cuda",
".",
"empty_cache",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L428-L433 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.to_gpu | (self) | Move the model to GPU | Move the model to GPU | [
"Move",
"the",
"model",
"to",
"GPU"
] | def to_gpu(self):
"""Move the model to GPU
"""
torch.cuda.set_device(self.opt.gpu)
self.translator.model.cuda() | [
"def",
"to_gpu",
"(",
"self",
")",
":",
"torch",
".",
"cuda",
".",
"set_device",
"(",
"self",
".",
"opt",
".",
"gpu",
")",
"self",
".",
"translator",
".",
"model",
".",
"cuda",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L435-L439 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.maybe_tokenize | (self, sequence) | return sequence | Tokenize the sequence (or not)
Same args/returns as `tokenize` | Tokenize the sequence (or not) | [
"Tokenize",
"the",
"sequence",
"(",
"or",
"not",
")"
] | def maybe_tokenize(self, sequence):
"""Tokenize the sequence (or not)
Same args/returns as `tokenize`
"""
if self.tokenizer_opt is not None:
return self.tokenize(sequence)
return sequence | [
"def",
"maybe_tokenize",
"(",
"self",
",",
"sequence",
")",
":",
"if",
"self",
".",
"tokenizer_opt",
"is",
"not",
"None",
":",
"return",
"self",
".",
"tokenize",
"(",
"sequence",
")",
"return",
"sequence"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L441-L448 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.tokenize | (self, sequence) | return tok | Tokenize a single sequence
Args:
sequence: (str) the sequence to tokenize
Returns:
tok: (str) the tokenized sequence | Tokenize a single sequence | [
"Tokenize",
"a",
"single",
"sequence"
] | def tokenize(self, sequence):
"""Tokenize a single sequence
Args:
sequence: (str) the sequence to tokenize
Returns:
tok: (str) the tokenized sequence
"""
if self.tokenizer is None:
raise ValueError("No tokenizer loaded")
if self.tokenizer_opt["type"] == "sentencepiece":
tok = self.tokenizer.EncodeAsPieces(sequence)
tok = " ".join(tok)
elif self.tokenizer_opt["type"] == "pyonmttok":
tok, _ = self.tokenizer.tokenize(sequence)
tok = " ".join(tok)
return tok | [
"def",
"tokenize",
"(",
"self",
",",
"sequence",
")",
":",
"if",
"self",
".",
"tokenizer",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"No tokenizer loaded\"",
")",
"if",
"self",
".",
"tokenizer_opt",
"[",
"\"type\"",
"]",
"==",
"\"sentencepiece\"",
":",
"tok",
"=",
"self",
".",
"tokenizer",
".",
"EncodeAsPieces",
"(",
"sequence",
")",
"tok",
"=",
"\" \"",
".",
"join",
"(",
"tok",
")",
"elif",
"self",
".",
"tokenizer_opt",
"[",
"\"type\"",
"]",
"==",
"\"pyonmttok\"",
":",
"tok",
",",
"_",
"=",
"self",
".",
"tokenizer",
".",
"tokenize",
"(",
"sequence",
")",
"tok",
"=",
"\" \"",
".",
"join",
"(",
"tok",
")",
"return",
"tok"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L450-L469 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.maybe_detokenize | (self, sequence) | return sequence | De-tokenize the sequence (or not)
Same args/returns as `tokenize` | De-tokenize the sequence (or not) | [
"De",
"-",
"tokenize",
"the",
"sequence",
"(",
"or",
"not",
")"
] | def maybe_detokenize(self, sequence):
"""De-tokenize the sequence (or not)
Same args/returns as `tokenize`
"""
if self.tokenizer_opt is not None and ''.join(sequence.split()) != '':
return self.detokenize(sequence)
return sequence | [
"def",
"maybe_detokenize",
"(",
"self",
",",
"sequence",
")",
":",
"if",
"self",
".",
"tokenizer_opt",
"is",
"not",
"None",
"and",
"''",
".",
"join",
"(",
"sequence",
".",
"split",
"(",
")",
")",
"!=",
"''",
":",
"return",
"self",
".",
"detokenize",
"(",
"sequence",
")",
"return",
"sequence"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L471-L478 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation_server.py | python | ServerModel.detokenize | (self, sequence) | return detok | Detokenize a single sequence
Same args/returns as `tokenize` | Detokenize a single sequence | [
"Detokenize",
"a",
"single",
"sequence"
] | def detokenize(self, sequence):
"""Detokenize a single sequence
Same args/returns as `tokenize`
"""
if self.tokenizer is None:
raise ValueError("No tokenizer loaded")
if self.tokenizer_opt["type"] == "sentencepiece":
detok = self.tokenizer.DecodePieces(sequence.split())
elif self.tokenizer_opt["type"] == "pyonmttok":
detok = self.tokenizer.detokenize(sequence.split())
return detok | [
"def",
"detokenize",
"(",
"self",
",",
"sequence",
")",
":",
"if",
"self",
".",
"tokenizer",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"No tokenizer loaded\"",
")",
"if",
"self",
".",
"tokenizer_opt",
"[",
"\"type\"",
"]",
"==",
"\"sentencepiece\"",
":",
"detok",
"=",
"self",
".",
"tokenizer",
".",
"DecodePieces",
"(",
"sequence",
".",
"split",
"(",
")",
")",
"elif",
"self",
".",
"tokenizer_opt",
"[",
"\"type\"",
"]",
"==",
"\"pyonmttok\"",
":",
"detok",
"=",
"self",
".",
"tokenizer",
".",
"detokenize",
"(",
"sequence",
".",
"split",
"(",
")",
")",
"return",
"detok"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation_server.py#L480-L493 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/penalties.py | python | PenaltyBuilder.coverage_wu | (self, beam, cov, beta=0.) | return beta * penalty | NMT coverage re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`. | NMT coverage re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`. | [
"NMT",
"coverage",
"re",
"-",
"ranking",
"score",
"from",
"Google",
"s",
"Neural",
"Machine",
"Translation",
"System",
":",
"cite",
":",
"wu2016google",
"."
] | def coverage_wu(self, beam, cov, beta=0.):
"""
NMT coverage re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
penalty = -torch.min(cov, cov.clone().fill_(1.0)).log().sum(1)
return beta * penalty | [
"def",
"coverage_wu",
"(",
"self",
",",
"beam",
",",
"cov",
",",
"beta",
"=",
"0.",
")",
":",
"penalty",
"=",
"-",
"torch",
".",
"min",
"(",
"cov",
",",
"cov",
".",
"clone",
"(",
")",
".",
"fill_",
"(",
"1.0",
")",
")",
".",
"log",
"(",
")",
".",
"sum",
"(",
"1",
")",
"return",
"beta",
"*",
"penalty"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/penalties.py#L38-L44 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/penalties.py | python | PenaltyBuilder.coverage_summary | (self, beam, cov, beta=0.) | return beta * penalty | Our summary penalty. | Our summary penalty. | [
"Our",
"summary",
"penalty",
"."
] | def coverage_summary(self, beam, cov, beta=0.):
"""
Our summary penalty.
"""
penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(1)
penalty -= cov.size(1)
return beta * penalty | [
"def",
"coverage_summary",
"(",
"self",
",",
"beam",
",",
"cov",
",",
"beta",
"=",
"0.",
")",
":",
"penalty",
"=",
"torch",
".",
"max",
"(",
"cov",
",",
"cov",
".",
"clone",
"(",
")",
".",
"fill_",
"(",
"1.0",
")",
")",
".",
"sum",
"(",
"1",
")",
"penalty",
"-=",
"cov",
".",
"size",
"(",
"1",
")",
"return",
"beta",
"*",
"penalty"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/penalties.py#L46-L52 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/penalties.py | python | PenaltyBuilder.coverage_none | (self, beam, cov, beta=0.) | return beam.scores.clone().fill_(0.0) | returns zero as penalty | returns zero as penalty | [
"returns",
"zero",
"as",
"penalty"
] | def coverage_none(self, beam, cov, beta=0.):
"""
returns zero as penalty
"""
return beam.scores.clone().fill_(0.0) | [
"def",
"coverage_none",
"(",
"self",
",",
"beam",
",",
"cov",
",",
"beta",
"=",
"0.",
")",
":",
"return",
"beam",
".",
"scores",
".",
"clone",
"(",
")",
".",
"fill_",
"(",
"0.0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/penalties.py#L54-L58 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/penalties.py | python | PenaltyBuilder.length_wu | (self, beam, logprobs, alpha=0.) | return (logprobs / modifier) | NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`. | NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`. | [
"NMT",
"length",
"re",
"-",
"ranking",
"score",
"from",
"Google",
"s",
"Neural",
"Machine",
"Translation",
"System",
":",
"cite",
":",
"wu2016google",
"."
] | def length_wu(self, beam, logprobs, alpha=0.):
"""
NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
modifier = (((5 + len(beam.next_ys)) ** alpha) /
((5 + 1) ** alpha))
return (logprobs / modifier) | [
"def",
"length_wu",
"(",
"self",
",",
"beam",
",",
"logprobs",
",",
"alpha",
"=",
"0.",
")",
":",
"modifier",
"=",
"(",
"(",
"(",
"5",
"+",
"len",
"(",
"beam",
".",
"next_ys",
")",
")",
"**",
"alpha",
")",
"/",
"(",
"(",
"5",
"+",
"1",
")",
"**",
"alpha",
")",
")",
"return",
"(",
"logprobs",
"/",
"modifier",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/penalties.py#L60-L68 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/penalties.py | python | PenaltyBuilder.length_average | (self, beam, logprobs, alpha=0.) | return logprobs / len(beam.next_ys) | Returns the average probability of tokens in a sequence. | Returns the average probability of tokens in a sequence. | [
"Returns",
"the",
"average",
"probability",
"of",
"tokens",
"in",
"a",
"sequence",
"."
] | def length_average(self, beam, logprobs, alpha=0.):
"""
Returns the average probability of tokens in a sequence.
"""
return logprobs / len(beam.next_ys) | [
"def",
"length_average",
"(",
"self",
",",
"beam",
",",
"logprobs",
",",
"alpha",
"=",
"0.",
")",
":",
"return",
"logprobs",
"/",
"len",
"(",
"beam",
".",
"next_ys",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/penalties.py#L70-L74 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/penalties.py | python | PenaltyBuilder.length_none | (self, beam, logprobs, alpha=0., beta=0.) | return logprobs | Returns unmodified scores. | Returns unmodified scores. | [
"Returns",
"unmodified",
"scores",
"."
] | def length_none(self, beam, logprobs, alpha=0., beta=0.):
"""
Returns unmodified scores.
"""
return logprobs | [
"def",
"length_none",
"(",
"self",
",",
"beam",
",",
"logprobs",
",",
"alpha",
"=",
"0.",
",",
"beta",
"=",
"0.",
")",
":",
"return",
"logprobs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/penalties.py#L76-L80 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translator.py | python | Translator.translate | (self,
src_path=None,
src_data_iter=None,
tgt_path=None,
tgt_data_iter=None,
src_dir=None,
batch_size=None,
attn_debug=False) | return all_scores, all_predictions | Translate content of `src_data_iter` (if not None) or `src_path`
and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.
Note: batch_size must not be None
Note: one of ('src_path', 'src_data_iter') must not be None
Args:
src_path (str): filepath of source data
src_data_iter (iterator): an interator generating source data
e.g. it may be a list or an openned file
tgt_path (str): filepath of target data
tgt_data_iter (iterator): an interator generating target data
src_dir (str): source directory path
(used for Audio and Image datasets)
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions | Translate content of `src_data_iter` (if not None) or `src_path`
and get gold scores if one of `tgt_data_iter` or `tgt_path` is set. | [
"Translate",
"content",
"of",
"src_data_iter",
"(",
"if",
"not",
"None",
")",
"or",
"src_path",
"and",
"get",
"gold",
"scores",
"if",
"one",
"of",
"tgt_data_iter",
"or",
"tgt_path",
"is",
"set",
"."
] | def translate(self,
src_path=None,
src_data_iter=None,
tgt_path=None,
tgt_data_iter=None,
src_dir=None,
batch_size=None,
attn_debug=False):
"""
Translate content of `src_data_iter` (if not None) or `src_path`
and get gold scores if one of `tgt_data_iter` or `tgt_path` is set.
Note: batch_size must not be None
Note: one of ('src_path', 'src_data_iter') must not be None
Args:
src_path (str): filepath of source data
src_data_iter (iterator): an interator generating source data
e.g. it may be a list or an openned file
tgt_path (str): filepath of target data
tgt_data_iter (iterator): an interator generating target data
src_dir (str): source directory path
(used for Audio and Image datasets)
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions
"""
assert src_data_iter is not None or src_path is not None
if batch_size is None:
raise ValueError("batch_size must be set")
data = inputters. \
build_dataset(self.fields,
self.data_type,
src_path=src_path,
src_data_iter=src_data_iter,
tgt_path=tgt_path,
tgt_data_iter=tgt_data_iter,
src_dir=src_dir,
sample_rate=self.sample_rate,
window_size=self.window_size,
window_stride=self.window_stride,
window=self.window,
use_filter_pred=self.use_filter_pred,
image_channel_size=self.image_channel_size)
if self.cuda:
cur_device = "cuda"
else:
cur_device = "cpu"
data_iter = inputters.OrderedIterator(
dataset=data, device=cur_device,
batch_size=batch_size, train=False, sort=False,
sort_within_batch=True, shuffle=False)
builder = onmt.translate.TranslationBuilder(
data, self.fields,
self.n_best, self.replace_unk, tgt_path)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
all_scores = []
all_predictions = []
for batch in data_iter:
batch_data = self.translate_batch(batch, data, fast=self.fast)
translations = builder.from_batch(batch_data)
for trans in translations:
all_scores += [trans.pred_scores[:self.n_best]]
pred_score_total += trans.pred_scores[0]
pred_words_total += len(trans.pred_sents[0])
if tgt_path is not None:
gold_score_total += trans.gold_score
gold_words_total += len(trans.gold_sent) + 1
n_best_preds = [" ".join(pred)
for pred in trans.pred_sents[:self.n_best]]
all_predictions += [n_best_preds]
self.out_file.write('\n'.join(n_best_preds) + '\n')
self.out_file.flush()
if self.verbose:
sent_number = next(counter)
output = trans.log(sent_number)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
# Debug attention.
if attn_debug:
srcs = trans.src_raw
preds = trans.pred_sents[0]
preds.append('</s>')
attns = trans.attns[0].tolist()
header_format = "{:>10.10} " + "{:>10.7} " * len(srcs)
row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
output = header_format.format("", *trans.src_raw) + '\n'
for word, row in zip(preds, attns):
max_index = row.index(max(row))
row_format = row_format.replace(
"{:>10.7f} ", "{:*>10.7f} ", max_index + 1)
row_format = row_format.replace(
"{:*>10.7f} ", "{:>10.7f} ", max_index)
output += row_format.format(word, *row) + '\n'
row_format = "{:>10.10} " + "{:>10.7f} " * len(srcs)
os.write(1, output.encode('utf-8'))
#TODO change back
#if self.report_score:
# msg = self._report_score('PRED', pred_score_total,
# pred_words_total)
# if self.logger:
# self.logger.info(msg)
# else:
# print(msg)
# if tgt_path is not None:
# msg = self._report_score('GOLD', gold_score_total,
# gold_words_total)
# if self.logger:
# self.logger.info(msg)
# else:
# print(msg)
# if self.report_bleu:
# msg = self._report_bleu(tgt_path)
# if self.logger:
# self.logger.info(msg)
# else:
# print(msg)
# if self.report_rouge:
# msg = self._report_rouge(tgt_path)
# if self.logger:
# self.logger.info(msg)
# else:
# print(msg)
if self.dump_beam:
import json
json.dump(self.translator.beam_accum,
codecs.open(self.dump_beam, 'w', 'utf-8'))
return all_scores, all_predictions | [
"def",
"translate",
"(",
"self",
",",
"src_path",
"=",
"None",
",",
"src_data_iter",
"=",
"None",
",",
"tgt_path",
"=",
"None",
",",
"tgt_data_iter",
"=",
"None",
",",
"src_dir",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"attn_debug",
"=",
"False",
")",
":",
"assert",
"src_data_iter",
"is",
"not",
"None",
"or",
"src_path",
"is",
"not",
"None",
"if",
"batch_size",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"batch_size must be set\"",
")",
"data",
"=",
"inputters",
".",
"build_dataset",
"(",
"self",
".",
"fields",
",",
"self",
".",
"data_type",
",",
"src_path",
"=",
"src_path",
",",
"src_data_iter",
"=",
"src_data_iter",
",",
"tgt_path",
"=",
"tgt_path",
",",
"tgt_data_iter",
"=",
"tgt_data_iter",
",",
"src_dir",
"=",
"src_dir",
",",
"sample_rate",
"=",
"self",
".",
"sample_rate",
",",
"window_size",
"=",
"self",
".",
"window_size",
",",
"window_stride",
"=",
"self",
".",
"window_stride",
",",
"window",
"=",
"self",
".",
"window",
",",
"use_filter_pred",
"=",
"self",
".",
"use_filter_pred",
",",
"image_channel_size",
"=",
"self",
".",
"image_channel_size",
")",
"if",
"self",
".",
"cuda",
":",
"cur_device",
"=",
"\"cuda\"",
"else",
":",
"cur_device",
"=",
"\"cpu\"",
"data_iter",
"=",
"inputters",
".",
"OrderedIterator",
"(",
"dataset",
"=",
"data",
",",
"device",
"=",
"cur_device",
",",
"batch_size",
"=",
"batch_size",
",",
"train",
"=",
"False",
",",
"sort",
"=",
"False",
",",
"sort_within_batch",
"=",
"True",
",",
"shuffle",
"=",
"False",
")",
"builder",
"=",
"onmt",
".",
"translate",
".",
"TranslationBuilder",
"(",
"data",
",",
"self",
".",
"fields",
",",
"self",
".",
"n_best",
",",
"self",
".",
"replace_unk",
",",
"tgt_path",
")",
"# Statistics",
"counter",
"=",
"count",
"(",
"1",
")",
"pred_score_total",
",",
"pred_words_total",
"=",
"0",
",",
"0",
"gold_score_total",
",",
"gold_words_total",
"=",
"0",
",",
"0",
"all_scores",
"=",
"[",
"]",
"all_predictions",
"=",
"[",
"]",
"for",
"batch",
"in",
"data_iter",
":",
"batch_data",
"=",
"self",
".",
"translate_batch",
"(",
"batch",
",",
"data",
",",
"fast",
"=",
"self",
".",
"fast",
")",
"translations",
"=",
"builder",
".",
"from_batch",
"(",
"batch_data",
")",
"for",
"trans",
"in",
"translations",
":",
"all_scores",
"+=",
"[",
"trans",
".",
"pred_scores",
"[",
":",
"self",
".",
"n_best",
"]",
"]",
"pred_score_total",
"+=",
"trans",
".",
"pred_scores",
"[",
"0",
"]",
"pred_words_total",
"+=",
"len",
"(",
"trans",
".",
"pred_sents",
"[",
"0",
"]",
")",
"if",
"tgt_path",
"is",
"not",
"None",
":",
"gold_score_total",
"+=",
"trans",
".",
"gold_score",
"gold_words_total",
"+=",
"len",
"(",
"trans",
".",
"gold_sent",
")",
"+",
"1",
"n_best_preds",
"=",
"[",
"\" \"",
".",
"join",
"(",
"pred",
")",
"for",
"pred",
"in",
"trans",
".",
"pred_sents",
"[",
":",
"self",
".",
"n_best",
"]",
"]",
"all_predictions",
"+=",
"[",
"n_best_preds",
"]",
"self",
".",
"out_file",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"n_best_preds",
")",
"+",
"'\\n'",
")",
"self",
".",
"out_file",
".",
"flush",
"(",
")",
"if",
"self",
".",
"verbose",
":",
"sent_number",
"=",
"next",
"(",
"counter",
")",
"output",
"=",
"trans",
".",
"log",
"(",
"sent_number",
")",
"if",
"self",
".",
"logger",
":",
"self",
".",
"logger",
".",
"info",
"(",
"output",
")",
"else",
":",
"os",
".",
"write",
"(",
"1",
",",
"output",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"# Debug attention.",
"if",
"attn_debug",
":",
"srcs",
"=",
"trans",
".",
"src_raw",
"preds",
"=",
"trans",
".",
"pred_sents",
"[",
"0",
"]",
"preds",
".",
"append",
"(",
"'</s>'",
")",
"attns",
"=",
"trans",
".",
"attns",
"[",
"0",
"]",
".",
"tolist",
"(",
")",
"header_format",
"=",
"\"{:>10.10} \"",
"+",
"\"{:>10.7} \"",
"*",
"len",
"(",
"srcs",
")",
"row_format",
"=",
"\"{:>10.10} \"",
"+",
"\"{:>10.7f} \"",
"*",
"len",
"(",
"srcs",
")",
"output",
"=",
"header_format",
".",
"format",
"(",
"\"\"",
",",
"*",
"trans",
".",
"src_raw",
")",
"+",
"'\\n'",
"for",
"word",
",",
"row",
"in",
"zip",
"(",
"preds",
",",
"attns",
")",
":",
"max_index",
"=",
"row",
".",
"index",
"(",
"max",
"(",
"row",
")",
")",
"row_format",
"=",
"row_format",
".",
"replace",
"(",
"\"{:>10.7f} \"",
",",
"\"{:*>10.7f} \"",
",",
"max_index",
"+",
"1",
")",
"row_format",
"=",
"row_format",
".",
"replace",
"(",
"\"{:*>10.7f} \"",
",",
"\"{:>10.7f} \"",
",",
"max_index",
")",
"output",
"+=",
"row_format",
".",
"format",
"(",
"word",
",",
"*",
"row",
")",
"+",
"'\\n'",
"row_format",
"=",
"\"{:>10.10} \"",
"+",
"\"{:>10.7f} \"",
"*",
"len",
"(",
"srcs",
")",
"os",
".",
"write",
"(",
"1",
",",
"output",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"#TODO change back",
"#if self.report_score:",
"# msg = self._report_score('PRED', pred_score_total,",
"# pred_words_total)",
"# if self.logger:",
"# self.logger.info(msg)",
"# else:",
"# print(msg)",
"# if tgt_path is not None:",
"# msg = self._report_score('GOLD', gold_score_total,",
"# gold_words_total)",
"# if self.logger:",
"# self.logger.info(msg)",
"# else:",
"# print(msg)",
"# if self.report_bleu:",
"# msg = self._report_bleu(tgt_path)",
"# if self.logger:",
"# self.logger.info(msg)",
"# else:",
"# print(msg)",
"# if self.report_rouge:",
"# msg = self._report_rouge(tgt_path)",
"# if self.logger:",
"# self.logger.info(msg)",
"# else:",
"# print(msg)",
"if",
"self",
".",
"dump_beam",
":",
"import",
"json",
"json",
".",
"dump",
"(",
"self",
".",
"translator",
".",
"beam_accum",
",",
"codecs",
".",
"open",
"(",
"self",
".",
"dump_beam",
",",
"'w'",
",",
"'utf-8'",
")",
")",
"return",
"all_scores",
",",
"all_predictions"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translator.py#L154-L313 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translator.py | python | Translator.translate_batch | (self, batch, data, fast=False) | Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset. | Translate a batch of sentences. | [
"Translate",
"a",
"batch",
"of",
"sentences",
"."
] | def translate_batch(self, batch, data, fast=False):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
fast (bool): enables fast beam search (may not support all features)
Todo:
Shouldn't need the original dataset.
"""
with torch.no_grad():
if fast:
return self._fast_translate_batch(
batch,
data,
self.max_length,
min_length=self.min_length,
n_best=self.n_best,
return_attention=self.replace_unk)
else:
# 2333: go here
return self._translate_batch(batch, data) | [
"def",
"translate_batch",
"(",
"self",
",",
"batch",
",",
"data",
",",
"fast",
"=",
"False",
")",
":",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"if",
"fast",
":",
"return",
"self",
".",
"_fast_translate_batch",
"(",
"batch",
",",
"data",
",",
"self",
".",
"max_length",
",",
"min_length",
"=",
"self",
".",
"min_length",
",",
"n_best",
"=",
"self",
".",
"n_best",
",",
"return_attention",
"=",
"self",
".",
"replace_unk",
")",
"else",
":",
"# 2333: go here",
"return",
"self",
".",
"_translate_batch",
"(",
"batch",
",",
"data",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translator.py#L315-L342 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/beam.py | python | Beam.get_current_state | (self) | return self.next_ys[-1] | Get the outputs for the current timestep. | Get the outputs for the current timestep. | [
"Get",
"the",
"outputs",
"for",
"the",
"current",
"timestep",
"."
] | def get_current_state(self):
"Get the outputs for the current timestep."
return self.next_ys[-1] | [
"def",
"get_current_state",
"(",
"self",
")",
":",
"return",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/beam.py#L68-L70 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/beam.py | python | Beam.get_current_origin | (self) | return self.prev_ks[-1] | Get the backpointers for the current timestep. | Get the backpointers for the current timestep. | [
"Get",
"the",
"backpointers",
"for",
"the",
"current",
"timestep",
"."
] | def get_current_origin(self):
"Get the backpointers for the current timestep."
return self.prev_ks[-1] | [
"def",
"get_current_origin",
"(",
"self",
")",
":",
"return",
"self",
".",
"prev_ks",
"[",
"-",
"1",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/beam.py#L72-L74 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/beam.py | python | Beam.advance | (self, word_probs, attn_out) | Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Parameters:
* `word_probs`- probs of advancing from the last step (K x words)
* `attn_out`- attention at the last step
Returns: True if beam search is complete. | Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search. | [
"Given",
"prob",
"over",
"words",
"for",
"every",
"last",
"beam",
"wordLk",
"and",
"attention",
"attn_out",
":",
"Compute",
"and",
"update",
"the",
"beam",
"search",
"."
] | def advance(self, word_probs, attn_out):
"""
Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Parameters:
* `word_probs`- probs of advancing from the last step (K x words)
* `attn_out`- attention at the last step
Returns: True if beam search is complete.
"""
num_words = word_probs.size(1)
if self.stepwise_penalty:
self.global_scorer.update_score(self, attn_out)
# force the output to be longer than self.min_length
cur_len = len(self.next_ys)
if cur_len < self.min_length:
word_probs[:, self._eos] = -1e20
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_scores = word_probs + \
self.scores.unsqueeze(1).expand_as(word_probs)
# Don't let EOS have children.
beam_scores[self.next_ys[-1] == self._eos] = -1e20
# Block ngram repeats
if self.block_ngram_repeat > 0:
ngrams = []
le = len(self.next_ys)
for j in range(self.next_ys[-1].size(0)):
hyp, _ = self.get_hyp(le - 1, j, requires_attn=False)
ngrams = set()
fail = False
gram = []
for i in range(le - 1):
# Last n tokens, n = block_ngram_repeat
gram = (gram +
[hyp[i]])[-self.block_ngram_repeat:]
# Skip the blocking if it is in the exclusion list
if set(gram) & self.exclusion_tokens:
continue
if tuple(gram) in ngrams:
fail = True
ngrams.add(tuple(gram))
if fail:
beam_scores[j] = -10e20
else:
beam_scores = word_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0,
True, True)
self.all_scores.append(self.scores)
self.scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.prev_ks_cpu.append(prev_k.tolist())
self.next_ys.append((best_scores_id - prev_k * num_words))
self.next_ys_cpu.append((best_scores_id - prev_k * num_words).tolist())
self.attn.append(attn_out.index_select(0, prev_k))
self.global_scorer.update_global_state(self)
eos_indicator = self.next_ys[-1] == self._eos
if eos_indicator.any():
global_scores = self.global_scorer.score(self, self.scores)
global_scores_eos = global_scores[eos_indicator]
i_indexes = torch.where(eos_indicator)[0]
for s, i, in zip(global_scores_eos.tolist(), i_indexes.tolist()):
self.finished.append((s, len(self.next_ys) - 1, i))
# for i in range(self.next_ys[-1].size(0)):
# if self.next_ys[-1][i] == self._eos:
# global_scores = self.global_scorer.score(self, self.scores)
# s = global_scores[i]
# self.finished.append((s, len(self.next_ys) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.next_ys[-1][0] == self._eos:
self.all_scores.append(self.scores)
self.eos_top = True | [
"def",
"advance",
"(",
"self",
",",
"word_probs",
",",
"attn_out",
")",
":",
"num_words",
"=",
"word_probs",
".",
"size",
"(",
"1",
")",
"if",
"self",
".",
"stepwise_penalty",
":",
"self",
".",
"global_scorer",
".",
"update_score",
"(",
"self",
",",
"attn_out",
")",
"# force the output to be longer than self.min_length",
"cur_len",
"=",
"len",
"(",
"self",
".",
"next_ys",
")",
"if",
"cur_len",
"<",
"self",
".",
"min_length",
":",
"word_probs",
"[",
":",
",",
"self",
".",
"_eos",
"]",
"=",
"-",
"1e20",
"# Sum the previous scores.",
"if",
"len",
"(",
"self",
".",
"prev_ks",
")",
">",
"0",
":",
"beam_scores",
"=",
"word_probs",
"+",
"self",
".",
"scores",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand_as",
"(",
"word_probs",
")",
"# Don't let EOS have children. ",
"beam_scores",
"[",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
"==",
"self",
".",
"_eos",
"]",
"=",
"-",
"1e20",
"# Block ngram repeats",
"if",
"self",
".",
"block_ngram_repeat",
">",
"0",
":",
"ngrams",
"=",
"[",
"]",
"le",
"=",
"len",
"(",
"self",
".",
"next_ys",
")",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
".",
"size",
"(",
"0",
")",
")",
":",
"hyp",
",",
"_",
"=",
"self",
".",
"get_hyp",
"(",
"le",
"-",
"1",
",",
"j",
",",
"requires_attn",
"=",
"False",
")",
"ngrams",
"=",
"set",
"(",
")",
"fail",
"=",
"False",
"gram",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"le",
"-",
"1",
")",
":",
"# Last n tokens, n = block_ngram_repeat",
"gram",
"=",
"(",
"gram",
"+",
"[",
"hyp",
"[",
"i",
"]",
"]",
")",
"[",
"-",
"self",
".",
"block_ngram_repeat",
":",
"]",
"# Skip the blocking if it is in the exclusion list",
"if",
"set",
"(",
"gram",
")",
"&",
"self",
".",
"exclusion_tokens",
":",
"continue",
"if",
"tuple",
"(",
"gram",
")",
"in",
"ngrams",
":",
"fail",
"=",
"True",
"ngrams",
".",
"add",
"(",
"tuple",
"(",
"gram",
")",
")",
"if",
"fail",
":",
"beam_scores",
"[",
"j",
"]",
"=",
"-",
"10e20",
"else",
":",
"beam_scores",
"=",
"word_probs",
"[",
"0",
"]",
"flat_beam_scores",
"=",
"beam_scores",
".",
"view",
"(",
"-",
"1",
")",
"best_scores",
",",
"best_scores_id",
"=",
"flat_beam_scores",
".",
"topk",
"(",
"self",
".",
"size",
",",
"0",
",",
"True",
",",
"True",
")",
"self",
".",
"all_scores",
".",
"append",
"(",
"self",
".",
"scores",
")",
"self",
".",
"scores",
"=",
"best_scores",
"# best_scores_id is flattened beam x word array, so calculate which",
"# word and beam each score came from",
"prev_k",
"=",
"best_scores_id",
"/",
"num_words",
"self",
".",
"prev_ks",
".",
"append",
"(",
"prev_k",
")",
"self",
".",
"prev_ks_cpu",
".",
"append",
"(",
"prev_k",
".",
"tolist",
"(",
")",
")",
"self",
".",
"next_ys",
".",
"append",
"(",
"(",
"best_scores_id",
"-",
"prev_k",
"*",
"num_words",
")",
")",
"self",
".",
"next_ys_cpu",
".",
"append",
"(",
"(",
"best_scores_id",
"-",
"prev_k",
"*",
"num_words",
")",
".",
"tolist",
"(",
")",
")",
"self",
".",
"attn",
".",
"append",
"(",
"attn_out",
".",
"index_select",
"(",
"0",
",",
"prev_k",
")",
")",
"self",
".",
"global_scorer",
".",
"update_global_state",
"(",
"self",
")",
"eos_indicator",
"=",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
"==",
"self",
".",
"_eos",
"if",
"eos_indicator",
".",
"any",
"(",
")",
":",
"global_scores",
"=",
"self",
".",
"global_scorer",
".",
"score",
"(",
"self",
",",
"self",
".",
"scores",
")",
"global_scores_eos",
"=",
"global_scores",
"[",
"eos_indicator",
"]",
"i_indexes",
"=",
"torch",
".",
"where",
"(",
"eos_indicator",
")",
"[",
"0",
"]",
"for",
"s",
",",
"i",
",",
"in",
"zip",
"(",
"global_scores_eos",
".",
"tolist",
"(",
")",
",",
"i_indexes",
".",
"tolist",
"(",
")",
")",
":",
"self",
".",
"finished",
".",
"append",
"(",
"(",
"s",
",",
"len",
"(",
"self",
".",
"next_ys",
")",
"-",
"1",
",",
"i",
")",
")",
"# for i in range(self.next_ys[-1].size(0)):",
"# if self.next_ys[-1][i] == self._eos:",
"# global_scores = self.global_scorer.score(self, self.scores)",
"# s = global_scores[i]",
"# self.finished.append((s, len(self.next_ys) - 1, i))",
"# End condition is when top-of-beam is EOS and no global score.",
"if",
"self",
".",
"next_ys",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"self",
".",
"_eos",
":",
"self",
".",
"all_scores",
".",
"append",
"(",
"self",
".",
"scores",
")",
"self",
".",
"eos_top",
"=",
"True"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/beam.py#L76-L160 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/beam.py | python | Beam.get_hyp | (self, timestep, k, requires_attn=True) | return hyp[::-1], attn | Walk back to construct the full hypothesis. | Walk back to construct the full hypothesis. | [
"Walk",
"back",
"to",
"construct",
"the",
"full",
"hypothesis",
"."
] | def get_hyp(self, timestep, k, requires_attn=True):
"""
Walk back to construct the full hypothesis.
"""
hyp, attn = [], []
for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):
hyp.append(self.next_ys_cpu[j + 1][k])
if requires_attn:
attn.append(self.attn[j][k])
k = self.prev_ks_cpu[j][k]
if requires_attn:
attn = torch.stack(attn[::-1])
return hyp[::-1], attn | [
"def",
"get_hyp",
"(",
"self",
",",
"timestep",
",",
"k",
",",
"requires_attn",
"=",
"True",
")",
":",
"hyp",
",",
"attn",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"prev_ks",
"[",
":",
"timestep",
"]",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"hyp",
".",
"append",
"(",
"self",
".",
"next_ys_cpu",
"[",
"j",
"+",
"1",
"]",
"[",
"k",
"]",
")",
"if",
"requires_attn",
":",
"attn",
".",
"append",
"(",
"self",
".",
"attn",
"[",
"j",
"]",
"[",
"k",
"]",
")",
"k",
"=",
"self",
".",
"prev_ks_cpu",
"[",
"j",
"]",
"[",
"k",
"]",
"if",
"requires_attn",
":",
"attn",
"=",
"torch",
".",
"stack",
"(",
"attn",
"[",
":",
":",
"-",
"1",
"]",
")",
"return",
"hyp",
"[",
":",
":",
"-",
"1",
"]",
",",
"attn"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/beam.py#L180-L193 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/beam.py | python | GNMTGlobalScorer.score | (self, beam, logprobs) | return normalized_probs | Rescores a prediction based on penalty functions | Rescores a prediction based on penalty functions | [
"Rescores",
"a",
"prediction",
"based",
"on",
"penalty",
"functions"
] | def score(self, beam, logprobs):
"""
Rescores a prediction based on penalty functions
"""
normalized_probs = self.length_penalty(beam,
logprobs,
self.alpha)
if not beam.stepwise_penalty:
penalty = self.cov_penalty(beam,
beam.global_state["coverage"],
self.beta)
normalized_probs -= penalty
return normalized_probs | [
"def",
"score",
"(",
"self",
",",
"beam",
",",
"logprobs",
")",
":",
"normalized_probs",
"=",
"self",
".",
"length_penalty",
"(",
"beam",
",",
"logprobs",
",",
"self",
".",
"alpha",
")",
"if",
"not",
"beam",
".",
"stepwise_penalty",
":",
"penalty",
"=",
"self",
".",
"cov_penalty",
"(",
"beam",
",",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
",",
"self",
".",
"beta",
")",
"normalized_probs",
"-=",
"penalty",
"return",
"normalized_probs"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/beam.py#L216-L229 |