nwo
stringlengths 6
76
| sha
stringlengths 40
40
| path
stringlengths 5
118
| language
stringclasses 1
value | identifier
stringlengths 1
89
| parameters
stringlengths 2
5.4k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
51.1k
| docstring
stringlengths 1
17.6k
| docstring_summary
stringlengths 0
7.02k
| docstring_tokens
sequence | function
stringlengths 30
51.1k
| function_tokens
sequence | url
stringlengths 85
218
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/beam.py | python | GNMTGlobalScorer.update_score | (self, beam, attn) | Function to update scores of a Beam that is not finished | Function to update scores of a Beam that is not finished | [
"Function",
"to",
"update",
"scores",
"of",
"a",
"Beam",
"that",
"is",
"not",
"finished"
] | def update_score(self, beam, attn):
"""
Function to update scores of a Beam that is not finished
"""
if "prev_penalty" in beam.global_state.keys():
beam.scores.add_(beam.global_state["prev_penalty"])
penalty = self.cov_penalty(beam,
beam.global_state["coverage"] + attn,
self.beta)
beam.scores.sub_(penalty) | [
"def",
"update_score",
"(",
"self",
",",
"beam",
",",
"attn",
")",
":",
"if",
"\"prev_penalty\"",
"in",
"beam",
".",
"global_state",
".",
"keys",
"(",
")",
":",
"beam",
".",
"scores",
".",
"add_",
"(",
"beam",
".",
"global_state",
"[",
"\"prev_penalty\"",
"]",
")",
"penalty",
"=",
"self",
".",
"cov_penalty",
"(",
"beam",
",",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
"+",
"attn",
",",
"self",
".",
"beta",
")",
"beam",
".",
"scores",
".",
"sub_",
"(",
"penalty",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/beam.py#L231-L240 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/beam.py | python | GNMTGlobalScorer.update_global_state | (self, beam) | Keeps the coverage vector as sum of attentions | Keeps the coverage vector as sum of attentions | [
"Keeps",
"the",
"coverage",
"vector",
"as",
"sum",
"of",
"attentions"
] | def update_global_state(self, beam):
"Keeps the coverage vector as sum of attentions"
if len(beam.prev_ks) == 1:
beam.global_state["prev_penalty"] = beam.scores.clone().fill_(0.0)
beam.global_state["coverage"] = beam.attn[-1]
self.cov_total = beam.attn[-1].sum(1)
else:
self.cov_total += torch.min(beam.attn[-1],
beam.global_state['coverage']).sum(1)
beam.global_state["coverage"] = beam.global_state["coverage"] \
.index_select(0, beam.prev_ks[-1]).add(beam.attn[-1])
prev_penalty = self.cov_penalty(beam,
beam.global_state["coverage"],
self.beta)
beam.global_state["prev_penalty"] = prev_penalty | [
"def",
"update_global_state",
"(",
"self",
",",
"beam",
")",
":",
"if",
"len",
"(",
"beam",
".",
"prev_ks",
")",
"==",
"1",
":",
"beam",
".",
"global_state",
"[",
"\"prev_penalty\"",
"]",
"=",
"beam",
".",
"scores",
".",
"clone",
"(",
")",
".",
"fill_",
"(",
"0.0",
")",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
"=",
"beam",
".",
"attn",
"[",
"-",
"1",
"]",
"self",
".",
"cov_total",
"=",
"beam",
".",
"attn",
"[",
"-",
"1",
"]",
".",
"sum",
"(",
"1",
")",
"else",
":",
"self",
".",
"cov_total",
"+=",
"torch",
".",
"min",
"(",
"beam",
".",
"attn",
"[",
"-",
"1",
"]",
",",
"beam",
".",
"global_state",
"[",
"'coverage'",
"]",
")",
".",
"sum",
"(",
"1",
")",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
"=",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
".",
"index_select",
"(",
"0",
",",
"beam",
".",
"prev_ks",
"[",
"-",
"1",
"]",
")",
".",
"add",
"(",
"beam",
".",
"attn",
"[",
"-",
"1",
"]",
")",
"prev_penalty",
"=",
"self",
".",
"cov_penalty",
"(",
"beam",
",",
"beam",
".",
"global_state",
"[",
"\"coverage\"",
"]",
",",
"self",
".",
"beta",
")",
"beam",
".",
"global_state",
"[",
"\"prev_penalty\"",
"]",
"=",
"prev_penalty"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/beam.py#L242-L257 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/translate/translation.py | python | Translation.log | (self, sent_number) | return output | Log translation. | Log translation. | [
"Log",
"translation",
"."
] | def log(self, sent_number):
"""
Log translation.
"""
output = '\nSENT {}: {}\n'.format(sent_number, self.src_raw)
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
output += 'PRED {}: {}\n'.format(sent_number, pred_sent)
output += "PRED SCORE: {:.4f}\n".format(best_score)
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
output += 'GOLD {}: {}\n'.format(sent_number, tgt_sent)
output += ("GOLD SCORE: {:.4f}\n".format(self.gold_score))
if len(self.pred_sents) > 1:
output += '\nBEST HYP:\n'
for score, sent in zip(self.pred_scores, self.pred_sents):
output += "[{:.4f}] {}\n".format(score, sent)
return output | [
"def",
"log",
"(",
"self",
",",
"sent_number",
")",
":",
"output",
"=",
"'\\nSENT {}: {}\\n'",
".",
"format",
"(",
"sent_number",
",",
"self",
".",
"src_raw",
")",
"best_pred",
"=",
"self",
".",
"pred_sents",
"[",
"0",
"]",
"best_score",
"=",
"self",
".",
"pred_scores",
"[",
"0",
"]",
"pred_sent",
"=",
"' '",
".",
"join",
"(",
"best_pred",
")",
"output",
"+=",
"'PRED {}: {}\\n'",
".",
"format",
"(",
"sent_number",
",",
"pred_sent",
")",
"output",
"+=",
"\"PRED SCORE: {:.4f}\\n\"",
".",
"format",
"(",
"best_score",
")",
"if",
"self",
".",
"gold_sent",
"is",
"not",
"None",
":",
"tgt_sent",
"=",
"' '",
".",
"join",
"(",
"self",
".",
"gold_sent",
")",
"output",
"+=",
"'GOLD {}: {}\\n'",
".",
"format",
"(",
"sent_number",
",",
"tgt_sent",
")",
"output",
"+=",
"(",
"\"GOLD SCORE: {:.4f}\\n\"",
".",
"format",
"(",
"self",
".",
"gold_score",
")",
")",
"if",
"len",
"(",
"self",
".",
"pred_sents",
")",
">",
"1",
":",
"output",
"+=",
"'\\nBEST HYP:\\n'",
"for",
"score",
",",
"sent",
"in",
"zip",
"(",
"self",
".",
"pred_scores",
",",
"self",
".",
"pred_sents",
")",
":",
"output",
"+=",
"\"[{:.4f}] {}\\n\"",
".",
"format",
"(",
"score",
",",
"sent",
")",
"return",
"output"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/translate/translation.py#L134-L156 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | RNNDecoderBase.forward | (self, tgt, memory_bank, state, memory_lengths=None,
step=None,sent_encoder=None,src_sents=None,dec=None) | return decoder_outputs, state, attns | Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`. | Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`. | [
"Args",
":",
"tgt",
"(",
"LongTensor",
")",
":",
"sequences",
"of",
"padded",
"tokens",
"[",
"tgt_len",
"x",
"batch",
"x",
"nfeats",
"]",
".",
"memory_bank",
"(",
"FloatTensor",
")",
":",
"vectors",
"from",
"the",
"encoder",
"[",
"src_len",
"x",
"batch",
"x",
"hidden",
"]",
".",
"state",
"(",
":",
"obj",
":",
"onmt",
".",
"models",
".",
"DecoderState",
")",
":",
"decoder",
"state",
"object",
"to",
"initialize",
"the",
"decoder",
"memory_lengths",
"(",
"LongTensor",
")",
":",
"the",
"padded",
"source",
"lengths",
"[",
"batch",
"]",
".",
"Returns",
":",
"(",
"FloatTensor",
":",
"obj",
":",
"onmt",
".",
"Models",
".",
"DecoderState",
"FloatTensor",
")",
":",
"*",
"decoder_outputs",
":",
"output",
"from",
"the",
"decoder",
"(",
"after",
"attn",
")",
"[",
"tgt_len",
"x",
"batch",
"x",
"hidden",
"]",
".",
"*",
"decoder_state",
":",
"final",
"hidden",
"state",
"from",
"the",
"decoder",
"*",
"attns",
":",
"distribution",
"over",
"src",
"at",
"each",
"tgt",
"[",
"tgt_len",
"x",
"batch",
"x",
"src_len",
"]",
"."
] | def forward(self, tgt, memory_bank, state, memory_lengths=None,
step=None,sent_encoder=None,src_sents=None,dec=None):
"""
Args:
tgt (`LongTensor`): sequences of padded tokens
`[tgt_len x batch x nfeats]`.
memory_bank (`FloatTensor`): vectors from the encoder
`[src_len x batch x hidden]`.
state (:obj:`onmt.models.DecoderState`):
decoder state object to initialize the decoder
memory_lengths (`LongTensor`): the padded source lengths
`[batch]`.
Returns:
(`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):
* decoder_outputs: output from the decoder (after attn)
`[tgt_len x batch x hidden]`.
* decoder_state: final hidden state from the decoder
* attns: distribution over src at each tgt
`[tgt_len x batch x src_len]`.
"""
# Check
assert isinstance(state, RNNDecoderState)
# tgt.size() returns tgt length and batch
_, tgt_batch, _ = tgt.size()
_, memory_batch, _ = memory_bank.size()
aeq(tgt_batch, memory_batch)
# END
# 23333: TODO I changed this return value 'sent_decoder'
# Run the forward pass of the RNN.
decoder_final, decoder_outputs, attns = self._run_forward_pass(
tgt, memory_bank, state, memory_lengths=memory_lengths,sent_encoder=sent_encoder,src_sents=src_sents,dec=dec)
# Update the state with the result.
final_output = decoder_outputs[-1]
coverage = None
if "coverage" in attns:
coverage = attns["coverage"][-1].unsqueeze(0)
state.update_state(decoder_final, final_output.unsqueeze(0), coverage)
# Concatenates sequence of tensors along a new dimension.
# NOTE: v0.3 to 0.4: decoder_outputs / attns[*] may not be list
# (in particular in case of SRU) it was not raising error in 0.3
# since stack(Variable) was allowed.
# In 0.4, SRU returns a tensor that shouldn't be stacke
if type(decoder_outputs) == list:
decoder_outputs = torch.stack(decoder_outputs)
for k in attns:
if type(attns[k]) == list:
attns[k] = torch.stack(attns[k])
return decoder_outputs, state, attns | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
",",
"sent_encoder",
"=",
"None",
",",
"src_sents",
"=",
"None",
",",
"dec",
"=",
"None",
")",
":",
"# Check",
"assert",
"isinstance",
"(",
"state",
",",
"RNNDecoderState",
")",
"# tgt.size() returns tgt length and batch",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"_",
",",
"memory_batch",
",",
"_",
"=",
"memory_bank",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"memory_batch",
")",
"# END",
"# 23333: TODO I changed this return value 'sent_decoder'",
"# Run the forward pass of the RNN.",
"decoder_final",
",",
"decoder_outputs",
",",
"attns",
"=",
"self",
".",
"_run_forward_pass",
"(",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"memory_lengths",
",",
"sent_encoder",
"=",
"sent_encoder",
",",
"src_sents",
"=",
"src_sents",
",",
"dec",
"=",
"dec",
")",
"# Update the state with the result.",
"final_output",
"=",
"decoder_outputs",
"[",
"-",
"1",
"]",
"coverage",
"=",
"None",
"if",
"\"coverage\"",
"in",
"attns",
":",
"coverage",
"=",
"attns",
"[",
"\"coverage\"",
"]",
"[",
"-",
"1",
"]",
".",
"unsqueeze",
"(",
"0",
")",
"state",
".",
"update_state",
"(",
"decoder_final",
",",
"final_output",
".",
"unsqueeze",
"(",
"0",
")",
",",
"coverage",
")",
"# Concatenates sequence of tensors along a new dimension.",
"# NOTE: v0.3 to 0.4: decoder_outputs / attns[*] may not be list",
"# (in particular in case of SRU) it was not raising error in 0.3",
"# since stack(Variable) was allowed.",
"# In 0.4, SRU returns a tensor that shouldn't be stacke",
"if",
"type",
"(",
"decoder_outputs",
")",
"==",
"list",
":",
"decoder_outputs",
"=",
"torch",
".",
"stack",
"(",
"decoder_outputs",
")",
"for",
"k",
"in",
"attns",
":",
"if",
"type",
"(",
"attns",
"[",
"k",
"]",
")",
"==",
"list",
":",
"attns",
"[",
"k",
"]",
"=",
"torch",
".",
"stack",
"(",
"attns",
"[",
"k",
"]",
")",
"return",
"decoder_outputs",
",",
"state",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L115-L172 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | RNNDecoderBase.init_decoder_state | (self, src, memory_bank, encoder_final,
with_cache=False) | Init decoder state with last state of the encoder | Init decoder state with last state of the encoder | [
"Init",
"decoder",
"state",
"with",
"last",
"state",
"of",
"the",
"encoder"
] | def init_decoder_state(self, src, memory_bank, encoder_final,
with_cache=False):
""" Init decoder state with last state of the encoder """
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat([hidden[0:hidden.size(0):2],
hidden[1:hidden.size(0):2]], 2)
return hidden
if isinstance(encoder_final, tuple): # LSTM
return RNNDecoderState(self.hidden_size,
tuple([_fix_enc_hidden(enc_hid)
for enc_hid in encoder_final]))
else: # GRU
return RNNDecoderState(self.hidden_size,
_fix_enc_hidden(encoder_final)) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"src",
",",
"memory_bank",
",",
"encoder_final",
",",
"with_cache",
"=",
"False",
")",
":",
"def",
"_fix_enc_hidden",
"(",
"hidden",
")",
":",
"# The encoder hidden is (layers*directions) x batch x dim.",
"# We need to convert it to layers x batch x (directions*dim).",
"if",
"self",
".",
"bidirectional_encoder",
":",
"hidden",
"=",
"torch",
".",
"cat",
"(",
"[",
"hidden",
"[",
"0",
":",
"hidden",
".",
"size",
"(",
"0",
")",
":",
"2",
"]",
",",
"hidden",
"[",
"1",
":",
"hidden",
".",
"size",
"(",
"0",
")",
":",
"2",
"]",
"]",
",",
"2",
")",
"return",
"hidden",
"if",
"isinstance",
"(",
"encoder_final",
",",
"tuple",
")",
":",
"# LSTM",
"return",
"RNNDecoderState",
"(",
"self",
".",
"hidden_size",
",",
"tuple",
"(",
"[",
"_fix_enc_hidden",
"(",
"enc_hid",
")",
"for",
"enc_hid",
"in",
"encoder_final",
"]",
")",
")",
"else",
":",
"# GRU",
"return",
"RNNDecoderState",
"(",
"self",
".",
"hidden_size",
",",
"_fix_enc_hidden",
"(",
"encoder_final",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L174-L191 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | StdRNNDecoder._run_forward_pass | (self, tgt, memory_bank, state, memory_lengths=None, dec=False) | return decoder_final, decoder_outputs, attns | Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder. | Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder. | [
"Private",
"helper",
"for",
"running",
"the",
"specific",
"RNN",
"forward",
"pass",
".",
"Must",
"be",
"overriden",
"by",
"all",
"subclasses",
".",
"Args",
":",
"tgt",
"(",
"LongTensor",
")",
":",
"a",
"sequence",
"of",
"input",
"tokens",
"tensors",
"[",
"len",
"x",
"batch",
"x",
"nfeats",
"]",
".",
"memory_bank",
"(",
"FloatTensor",
")",
":",
"output",
"(",
"tensor",
"sequence",
")",
"from",
"the",
"encoder",
"RNN",
"of",
"size",
"(",
"src_len",
"x",
"batch",
"x",
"hidden_size",
")",
".",
"state",
"(",
"FloatTensor",
")",
":",
"hidden",
"state",
"from",
"the",
"encoder",
"RNN",
"for",
"initializing",
"the",
"decoder",
".",
"memory_lengths",
"(",
"LongTensor",
")",
":",
"the",
"source",
"memory_bank",
"lengths",
".",
"Returns",
":",
"decoder_final",
"(",
"Tensor",
")",
":",
"final",
"hidden",
"state",
"from",
"the",
"decoder",
".",
"decoder_outputs",
"(",
"[",
"FloatTensor",
"]",
")",
":",
"an",
"array",
"of",
"output",
"of",
"every",
"time",
"step",
"from",
"the",
"decoder",
".",
"attns",
"(",
"dict",
"of",
"(",
"str",
"[",
"FloatTensor",
"]",
")",
":",
"a",
"dictionary",
"of",
"different",
"type",
"of",
"attention",
"Tensor",
"array",
"of",
"every",
"time",
"step",
"from",
"the",
"decoder",
"."
] | def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None, dec=False):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
[len x batch x nfeats].
memory_bank (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
decoder_final (Tensor): final hidden state from the decoder.
decoder_outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder.
"""
assert not self._copy # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
# Initialize local and return variables.
attns = {}
emb = self.embeddings(tgt)
# Run the forward pass of the RNN.
if isinstance(self.rnn, nn.GRU):
rnn_output, decoder_final = self.rnn(emb, state.hidden[0])
else:
rnn_output, decoder_final = self.rnn(emb, state.hidden)
# Check
tgt_len, tgt_batch, _ = tgt.size()
output_len, output_batch, _ = rnn_output.size()
aeq(tgt_len, output_len)
aeq(tgt_batch, output_batch)
# END
# Calculate the attention.
decoder_outputs, p_attn = self.attn(
rnn_output.transpose(0, 1).contiguous(),
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths
)
attns["std"] = p_attn
# Calculate the context gate.
if self.context_gate is not None:
decoder_outputs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
decoder_outputs.view(-1, decoder_outputs.size(2))
)
decoder_outputs = \
decoder_outputs.view(tgt_len, tgt_batch, self.hidden_size)
decoder_outputs = self.dropout(decoder_outputs)
return decoder_final, decoder_outputs, attns | [
"def",
"_run_forward_pass",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"dec",
"=",
"False",
")",
":",
"assert",
"not",
"self",
".",
"_copy",
"# TODO, no support yet.",
"assert",
"not",
"self",
".",
"_coverage",
"# TODO, no support yet.",
"# Initialize local and return variables.",
"attns",
"=",
"{",
"}",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"# Run the forward pass of the RNN.",
"if",
"isinstance",
"(",
"self",
".",
"rnn",
",",
"nn",
".",
"GRU",
")",
":",
"rnn_output",
",",
"decoder_final",
"=",
"self",
".",
"rnn",
"(",
"emb",
",",
"state",
".",
"hidden",
"[",
"0",
"]",
")",
"else",
":",
"rnn_output",
",",
"decoder_final",
"=",
"self",
".",
"rnn",
"(",
"emb",
",",
"state",
".",
"hidden",
")",
"# Check",
"tgt_len",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"output_len",
",",
"output_batch",
",",
"_",
"=",
"rnn_output",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_len",
",",
"output_len",
")",
"aeq",
"(",
"tgt_batch",
",",
"output_batch",
")",
"# END",
"# Calculate the attention.",
"decoder_outputs",
",",
"p_attn",
"=",
"self",
".",
"attn",
"(",
"rnn_output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"memory_lengths",
"=",
"memory_lengths",
")",
"attns",
"[",
"\"std\"",
"]",
"=",
"p_attn",
"# Calculate the context gate.",
"if",
"self",
".",
"context_gate",
"is",
"not",
"None",
":",
"decoder_outputs",
"=",
"self",
".",
"context_gate",
"(",
"emb",
".",
"view",
"(",
"-",
"1",
",",
"emb",
".",
"size",
"(",
"2",
")",
")",
",",
"rnn_output",
".",
"view",
"(",
"-",
"1",
",",
"rnn_output",
".",
"size",
"(",
"2",
")",
")",
",",
"decoder_outputs",
".",
"view",
"(",
"-",
"1",
",",
"decoder_outputs",
".",
"size",
"(",
"2",
")",
")",
")",
"decoder_outputs",
"=",
"decoder_outputs",
".",
"view",
"(",
"tgt_len",
",",
"tgt_batch",
",",
"self",
".",
"hidden_size",
")",
"decoder_outputs",
"=",
"self",
".",
"dropout",
"(",
"decoder_outputs",
")",
"return",
"decoder_final",
",",
"decoder_outputs",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L210-L271 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | StdRNNDecoder._input_size | (self) | return self.embeddings.embedding_size | Private helper returning the number of expected features. | Private helper returning the number of expected features. | [
"Private",
"helper",
"returning",
"the",
"number",
"of",
"expected",
"features",
"."
] | def _input_size(self):
"""
Private helper returning the number of expected features.
"""
return self.embeddings.embedding_size | [
"def",
"_input_size",
"(",
"self",
")",
":",
"return",
"self",
".",
"embeddings",
".",
"embedding_size"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L278-L282 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | InputFeedRNNDecoder._run_mmr | (self,sent_encoder,sent_decoder,src_sents, input_step) | return mmr_among_words | # sent_encoder: size (sent_len=9,batch=2,dim=512)
# sent_decoder: size (sent_len=1,batch=2,dim=512)
# src_sents: size (batch=2,sent_len=9)
function to calculate mmr
:param sent_encoder:
:param sent_decoder:
:param src_sents:
:return: | # sent_encoder: size (sent_len=9,batch=2,dim=512)
# sent_decoder: size (sent_len=1,batch=2,dim=512)
# src_sents: size (batch=2,sent_len=9)
function to calculate mmr
:param sent_encoder:
:param sent_decoder:
:param src_sents:
:return: | [
"#",
"sent_encoder",
":",
"size",
"(",
"sent_len",
"=",
"9",
"batch",
"=",
"2",
"dim",
"=",
"512",
")",
"#",
"sent_decoder",
":",
"size",
"(",
"sent_len",
"=",
"1",
"batch",
"=",
"2",
"dim",
"=",
"512",
")",
"#",
"src_sents",
":",
"size",
"(",
"batch",
"=",
"2",
"sent_len",
"=",
"9",
")",
"function",
"to",
"calculate",
"mmr",
":",
"param",
"sent_encoder",
":",
":",
"param",
"sent_decoder",
":",
":",
"param",
"src_sents",
":",
":",
"return",
":"
] | def _run_mmr(self,sent_encoder,sent_decoder,src_sents, input_step):
'''
# sent_encoder: size (sent_len=9,batch=2,dim=512)
# sent_decoder: size (sent_len=1,batch=2,dim=512)
# src_sents: size (batch=2,sent_len=9)
function to calculate mmr
:param sent_encoder:
:param sent_decoder:
:param src_sents:
:return:
'''
pdist = nn.PairwiseDistance(p=2)
sent_decoder=sent_decoder.permute(1,0,2) # (2,1,512)
scores =[]
# define sent matrix and current vector distance as the Euclidean distance
for sent in sent_encoder: # iterate over each batch sample
# distance: https://pytorch.org/docs/stable/_modules/torch/nn/modules/distance.html
# import pdb;
# pdb.set_trace()
# sim1=torch.sum(pdist(sent_encoder.permute(1,0,2),sent.unsqueeze(1)),1).unsqueeze(1) # -> this is sim2 on my equation, note this is distance!
sim1 = 1 - torch.mean(pdist(sent_encoder.permute(1, 0, 2), sent.unsqueeze(1)), 1).unsqueeze(1) # this is a similarity function
# sim1 shape: (batch_size,1)
sim2=torch.bmm(self.mmr_W(sent_decoder),sent.unsqueeze(2)).squeeze(2) # (2,1) -> this is sim1 on my equation
# scores.append(sim1-sim2)
scores.append(sim2 - sim1)
sent_ranking_att = torch.t(torch.cat(scores,1)) #(sent_len=9,batch_size)
sent_ranking_att = torch.softmax(sent_ranking_att, dim=0).permute(1,0) #(sent_len=9,batch_size)
# scores is a list of score (sent_len=9, tensor shape (batch_size, 1))
mmr_among_words = [] # should be (batch=2,input_step=200)
for batch_id in range(sent_ranking_att.size()[0]):
# iterate each batch, create zero weight on the input steps
# mmr= torch.zeros([input_step], dtype=torch.float32).cuda()
tmp = []
for id,position in enumerate(src_sents[batch_id]):
for x in range(position):
tmp.append(sent_ranking_att[batch_id][id])
mmr = torch.stack(tmp) # make to 1-d
if len(mmr) < input_step: # pad with 0
tmp = torch.zeros(input_step - len(mmr)).float().cuda()
# for x in range(input_step-len(mmr)):
mmr = torch.cat((mmr, tmp), 0)
else:
mmr = mmr[:input_step]
mmr_among_words.append(mmr.unsqueeze(0))
mmr_among_words = torch.cat(mmr_among_words,0)
# shape: (batch=2, input_step=200)
return mmr_among_words | [
"def",
"_run_mmr",
"(",
"self",
",",
"sent_encoder",
",",
"sent_decoder",
",",
"src_sents",
",",
"input_step",
")",
":",
"pdist",
"=",
"nn",
".",
"PairwiseDistance",
"(",
"p",
"=",
"2",
")",
"sent_decoder",
"=",
"sent_decoder",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
"# (2,1,512)",
"scores",
"=",
"[",
"]",
"# define sent matrix and current vector distance as the Euclidean distance",
"for",
"sent",
"in",
"sent_encoder",
":",
"# iterate over each batch sample",
"# distance: https://pytorch.org/docs/stable/_modules/torch/nn/modules/distance.html",
"# import pdb;",
"# pdb.set_trace()",
"# sim1=torch.sum(pdist(sent_encoder.permute(1,0,2),sent.unsqueeze(1)),1).unsqueeze(1) # -> this is sim2 on my equation, note this is distance!",
"sim1",
"=",
"1",
"-",
"torch",
".",
"mean",
"(",
"pdist",
"(",
"sent_encoder",
".",
"permute",
"(",
"1",
",",
"0",
",",
"2",
")",
",",
"sent",
".",
"unsqueeze",
"(",
"1",
")",
")",
",",
"1",
")",
".",
"unsqueeze",
"(",
"1",
")",
"# this is a similarity function",
"# sim1 shape: (batch_size,1)",
"sim2",
"=",
"torch",
".",
"bmm",
"(",
"self",
".",
"mmr_W",
"(",
"sent_decoder",
")",
",",
"sent",
".",
"unsqueeze",
"(",
"2",
")",
")",
".",
"squeeze",
"(",
"2",
")",
"# (2,1) -> this is sim1 on my equation",
"# scores.append(sim1-sim2)",
"scores",
".",
"append",
"(",
"sim2",
"-",
"sim1",
")",
"sent_ranking_att",
"=",
"torch",
".",
"t",
"(",
"torch",
".",
"cat",
"(",
"scores",
",",
"1",
")",
")",
"#(sent_len=9,batch_size)",
"sent_ranking_att",
"=",
"torch",
".",
"softmax",
"(",
"sent_ranking_att",
",",
"dim",
"=",
"0",
")",
".",
"permute",
"(",
"1",
",",
"0",
")",
"#(sent_len=9,batch_size)",
"# scores is a list of score (sent_len=9, tensor shape (batch_size, 1))",
"mmr_among_words",
"=",
"[",
"]",
"# should be (batch=2,input_step=200)",
"for",
"batch_id",
"in",
"range",
"(",
"sent_ranking_att",
".",
"size",
"(",
")",
"[",
"0",
"]",
")",
":",
"# iterate each batch, create zero weight on the input steps",
"# mmr= torch.zeros([input_step], dtype=torch.float32).cuda()",
"tmp",
"=",
"[",
"]",
"for",
"id",
",",
"position",
"in",
"enumerate",
"(",
"src_sents",
"[",
"batch_id",
"]",
")",
":",
"for",
"x",
"in",
"range",
"(",
"position",
")",
":",
"tmp",
".",
"append",
"(",
"sent_ranking_att",
"[",
"batch_id",
"]",
"[",
"id",
"]",
")",
"mmr",
"=",
"torch",
".",
"stack",
"(",
"tmp",
")",
"# make to 1-d",
"if",
"len",
"(",
"mmr",
")",
"<",
"input_step",
":",
"# pad with 0",
"tmp",
"=",
"torch",
".",
"zeros",
"(",
"input_step",
"-",
"len",
"(",
"mmr",
")",
")",
".",
"float",
"(",
")",
".",
"cuda",
"(",
")",
"# for x in range(input_step-len(mmr)):",
"mmr",
"=",
"torch",
".",
"cat",
"(",
"(",
"mmr",
",",
"tmp",
")",
",",
"0",
")",
"else",
":",
"mmr",
"=",
"mmr",
"[",
":",
"input_step",
"]",
"mmr_among_words",
".",
"append",
"(",
"mmr",
".",
"unsqueeze",
"(",
"0",
")",
")",
"mmr_among_words",
"=",
"torch",
".",
"cat",
"(",
"mmr_among_words",
",",
"0",
")",
"# shape: (batch=2, input_step=200)",
"return",
"mmr_among_words"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L315-L379 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | InputFeedRNNDecoder._run_forward_pass | (self, tgt, memory_bank, state, memory_lengths=None,sent_encoder=None,src_sents=None,dec=None) | return hidden, decoder_outputs, attns | See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns["mmr"] = []. | See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns["mmr"] = []. | [
"See",
"StdRNNDecoder",
".",
"_run_forward_pass",
"()",
"for",
"description",
"of",
"arguments",
"and",
"return",
"values",
".",
"TODO",
":",
"added",
"a",
"new",
"param",
":",
"sent_encoder",
"from",
"model",
".",
"py",
"this",
"is",
"the",
"sentence",
"matrix",
";",
"add",
"attns",
"[",
"mmr",
"]",
"=",
"[]",
"."
] | def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None,sent_encoder=None,src_sents=None,dec=None):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
TODO: added a new param: sent_encoder, from model.py, this is the sentence matrix; add attns["mmr"] = [].
"""
# Additional args check.
input_feed = state.input_feed.squeeze(0)
#print("input feed size: {}\n".format(input_feed.size()))
input_feed_batch, _ = input_feed.size()
_, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
# Initialize local and return variables.
decoder_outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
hidden = state.hidden
coverage = state.coverage.squeeze(0) \
if state.coverage is not None else None
# Input feed concatenates hidden state with
# input at every time step.
#print("emb size: {}\n".format(emb.size()));exit()
for _, emb_t in enumerate(emb.split(1)):
# for each output time step in the loop
emb_t = emb_t.squeeze(0)
decoder_input = torch.cat([emb_t, input_feed], 1)
# TODO: the following is where we get attention!
rnn_output, hidden = self.rnn(decoder_input, hidden)
decoder_output, p_attn = self.attn(
rnn_output,
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths)
# p_attn: size (batch=2,input_step=200)
if self.context_gate is not None:
# TODO: context gate should be employed (not me)
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
decoder_outputs += [decoder_output]
attns["std"] += [p_attn]
# Update the coverage attention.
if self._coverage:
coverage = coverage + p_attn \
if coverage is not None else p_attn
attns["coverage"] += [coverage]
# Run the forward pass of the copy attention layer.
#
if self._copy and not self._reuse_copy_attn:
_, copy_attn = self.copy_attn(decoder_output, memory_bank.transpose(0, 1))
attns["copy"] += [copy_attn]
elif self._copy:
attns["copy"] = attns["std"] # attns["copy"] is a list of tensor for each output step=51, each size: [batch_size=2, input_step=200]
if not dec: #if this is not dec?
attns["mmr"] = []
# 2333: TODO : the sentence representation for decoder
sent_decoder = decoder_outputs[-1].unsqueeze(0) # shape: (1, batch_size=2,dim=512)
# Return result.
# 2333: TODO: attns['std'] is a list of tensors, length is output_step, each tensor shape is (batch=2,input_step=200)
# 2333: TODO: compute mmr attention here:
mmr_among_words = self._run_mmr(sent_encoder, sent_decoder, src_sents,attns["std"][0].size()[-1])
# 2333: TODO: bring mmr to attention...
for output_step in attns["std"]:
attention_weight = output_step
# pairwise multiplication
attention_weight = torch.mul(mmr_among_words,attention_weight)
attns["mmr"].append(attention_weight.cuda())
# pdb.set_trace()
attns["std"] = attns["mmr"]
# decoder_outputs is a list of tensors for each output step=51, each tensor: (batch_size=2,dim=512)
return hidden, decoder_outputs, attns | [
"def",
"_run_forward_pass",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"sent_encoder",
"=",
"None",
",",
"src_sents",
"=",
"None",
",",
"dec",
"=",
"None",
")",
":",
"# Additional args check.",
"input_feed",
"=",
"state",
".",
"input_feed",
".",
"squeeze",
"(",
"0",
")",
"#print(\"input feed size: {}\\n\".format(input_feed.size()))",
"input_feed_batch",
",",
"_",
"=",
"input_feed",
".",
"size",
"(",
")",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"input_feed_batch",
")",
"# END Additional args check.",
"# Initialize local and return variables.",
"decoder_outputs",
"=",
"[",
"]",
"attns",
"=",
"{",
"\"std\"",
":",
"[",
"]",
"}",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"[",
"]",
"if",
"self",
".",
"_coverage",
":",
"attns",
"[",
"\"coverage\"",
"]",
"=",
"[",
"]",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"assert",
"emb",
".",
"dim",
"(",
")",
"==",
"3",
"# len x batch x embedding_dim",
"hidden",
"=",
"state",
".",
"hidden",
"coverage",
"=",
"state",
".",
"coverage",
".",
"squeeze",
"(",
"0",
")",
"if",
"state",
".",
"coverage",
"is",
"not",
"None",
"else",
"None",
"# Input feed concatenates hidden state with",
"# input at every time step.",
"#print(\"emb size: {}\\n\".format(emb.size()));exit()",
"for",
"_",
",",
"emb_t",
"in",
"enumerate",
"(",
"emb",
".",
"split",
"(",
"1",
")",
")",
":",
"# for each output time step in the loop",
"emb_t",
"=",
"emb_t",
".",
"squeeze",
"(",
"0",
")",
"decoder_input",
"=",
"torch",
".",
"cat",
"(",
"[",
"emb_t",
",",
"input_feed",
"]",
",",
"1",
")",
"# TODO: the following is where we get attention!",
"rnn_output",
",",
"hidden",
"=",
"self",
".",
"rnn",
"(",
"decoder_input",
",",
"hidden",
")",
"decoder_output",
",",
"p_attn",
"=",
"self",
".",
"attn",
"(",
"rnn_output",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"memory_lengths",
"=",
"memory_lengths",
")",
"# p_attn: size (batch=2,input_step=200)",
"if",
"self",
".",
"context_gate",
"is",
"not",
"None",
":",
"# TODO: context gate should be employed (not me)",
"# instead of second RNN transform.",
"decoder_output",
"=",
"self",
".",
"context_gate",
"(",
"decoder_input",
",",
"rnn_output",
",",
"decoder_output",
")",
"decoder_output",
"=",
"self",
".",
"dropout",
"(",
"decoder_output",
")",
"input_feed",
"=",
"decoder_output",
"decoder_outputs",
"+=",
"[",
"decoder_output",
"]",
"attns",
"[",
"\"std\"",
"]",
"+=",
"[",
"p_attn",
"]",
"# Update the coverage attention.",
"if",
"self",
".",
"_coverage",
":",
"coverage",
"=",
"coverage",
"+",
"p_attn",
"if",
"coverage",
"is",
"not",
"None",
"else",
"p_attn",
"attns",
"[",
"\"coverage\"",
"]",
"+=",
"[",
"coverage",
"]",
"# Run the forward pass of the copy attention layer.",
"#",
"if",
"self",
".",
"_copy",
"and",
"not",
"self",
".",
"_reuse_copy_attn",
":",
"_",
",",
"copy_attn",
"=",
"self",
".",
"copy_attn",
"(",
"decoder_output",
",",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
")",
"attns",
"[",
"\"copy\"",
"]",
"+=",
"[",
"copy_attn",
"]",
"elif",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"attns",
"[",
"\"std\"",
"]",
"# attns[\"copy\"] is a list of tensor for each output step=51, each size: [batch_size=2, input_step=200]",
"if",
"not",
"dec",
":",
"#if this is not dec?",
"attns",
"[",
"\"mmr\"",
"]",
"=",
"[",
"]",
"# 2333: TODO : the sentence representation for decoder",
"sent_decoder",
"=",
"decoder_outputs",
"[",
"-",
"1",
"]",
".",
"unsqueeze",
"(",
"0",
")",
"# shape: (1, batch_size=2,dim=512)",
"# Return result.",
"# 2333: TODO: attns['std'] is a list of tensors, length is output_step, each tensor shape is (batch=2,input_step=200)",
"# 2333: TODO: compute mmr attention here:",
"mmr_among_words",
"=",
"self",
".",
"_run_mmr",
"(",
"sent_encoder",
",",
"sent_decoder",
",",
"src_sents",
",",
"attns",
"[",
"\"std\"",
"]",
"[",
"0",
"]",
".",
"size",
"(",
")",
"[",
"-",
"1",
"]",
")",
"# 2333: TODO: bring mmr to attention...",
"for",
"output_step",
"in",
"attns",
"[",
"\"std\"",
"]",
":",
"attention_weight",
"=",
"output_step",
"# pairwise multiplication",
"attention_weight",
"=",
"torch",
".",
"mul",
"(",
"mmr_among_words",
",",
"attention_weight",
")",
"attns",
"[",
"\"mmr\"",
"]",
".",
"append",
"(",
"attention_weight",
".",
"cuda",
"(",
")",
")",
"# pdb.set_trace()",
"attns",
"[",
"\"std\"",
"]",
"=",
"attns",
"[",
"\"mmr\"",
"]",
"# decoder_outputs is a list of tensors for each output step=51, each tensor: (batch_size=2,dim=512)",
"return",
"hidden",
",",
"decoder_outputs",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L381-L485 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | InputFeedRNNDecoder._input_size | (self) | return self.embeddings.embedding_size + self.hidden_size | Using input feed by concatenating input with attention vectors. | Using input feed by concatenating input with attention vectors. | [
"Using",
"input",
"feed",
"by",
"concatenating",
"input",
"with",
"attention",
"vectors",
"."
] | def _input_size(self):
"""
Using input feed by concatenating input with attention vectors.
"""
return self.embeddings.embedding_size + self.hidden_size | [
"def",
"_input_size",
"(",
"self",
")",
":",
"return",
"self",
".",
"embeddings",
".",
"embedding_size",
"+",
"self",
".",
"hidden_size"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L499-L503 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | DecoderState.detach | (self) | Need to document this | Need to document this | [
"Need",
"to",
"document",
"this"
] | def detach(self):
""" Need to document this """
self.hidden = tuple([_.detach() for _ in self.hidden])
self.input_feed = self.input_feed.detach() | [
"def",
"detach",
"(",
"self",
")",
":",
"self",
".",
"hidden",
"=",
"tuple",
"(",
"[",
"_",
".",
"detach",
"(",
")",
"for",
"_",
"in",
"self",
".",
"hidden",
"]",
")",
"self",
".",
"input_feed",
"=",
"self",
".",
"input_feed",
".",
"detach",
"(",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L514-L517 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | DecoderState.beam_update | (self, idx, positions, beam_size) | Need to document this | Need to document this | [
"Need",
"to",
"document",
"this"
] | def beam_update(self, idx, positions, beam_size):
""" Need to document this """
for e in self._all:
sizes = e.size()
br = sizes[1]
if len(sizes) == 3:
sent_states = e.view(sizes[0], beam_size, br // beam_size,
sizes[2])[:, :, idx]
else:
sent_states = e.view(sizes[0], beam_size,
br // beam_size,
sizes[2],
sizes[3])[:, :, idx]
sent_states.data.copy_(
sent_states.data.index_select(1, positions)) | [
"def",
"beam_update",
"(",
"self",
",",
"idx",
",",
"positions",
",",
"beam_size",
")",
":",
"for",
"e",
"in",
"self",
".",
"_all",
":",
"sizes",
"=",
"e",
".",
"size",
"(",
")",
"br",
"=",
"sizes",
"[",
"1",
"]",
"if",
"len",
"(",
"sizes",
")",
"==",
"3",
":",
"sent_states",
"=",
"e",
".",
"view",
"(",
"sizes",
"[",
"0",
"]",
",",
"beam_size",
",",
"br",
"//",
"beam_size",
",",
"sizes",
"[",
"2",
"]",
")",
"[",
":",
",",
":",
",",
"idx",
"]",
"else",
":",
"sent_states",
"=",
"e",
".",
"view",
"(",
"sizes",
"[",
"0",
"]",
",",
"beam_size",
",",
"br",
"//",
"beam_size",
",",
"sizes",
"[",
"2",
"]",
",",
"sizes",
"[",
"3",
"]",
")",
"[",
":",
",",
":",
",",
"idx",
"]",
"sent_states",
".",
"data",
".",
"copy_",
"(",
"sent_states",
".",
"data",
".",
"index_select",
"(",
"1",
",",
"positions",
")",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L519-L534 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | RNNDecoderState.__init__ | (self, hidden_size, rnnstate) | Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim). | Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim). | [
"Args",
":",
"hidden_size",
"(",
"int",
")",
":",
"the",
"size",
"of",
"hidden",
"layer",
"of",
"the",
"decoder",
".",
"rnnstate",
":",
"final",
"hidden",
"state",
"from",
"the",
"encoder",
".",
"transformed",
"to",
"shape",
":",
"layers",
"x",
"batch",
"x",
"(",
"directions",
"*",
"dim",
")",
"."
] | def __init__(self, hidden_size, rnnstate):
"""
Args:
hidden_size (int): the size of hidden layer of the decoder.
rnnstate: final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim).
"""
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.coverage = None
# Init the input feed.
batch_size = self.hidden[0].size(1)
h_size = (batch_size, hidden_size)
self.input_feed = self.hidden[0].data.new(*h_size).zero_() \
.unsqueeze(0) | [
"def",
"__init__",
"(",
"self",
",",
"hidden_size",
",",
"rnnstate",
")",
":",
"if",
"not",
"isinstance",
"(",
"rnnstate",
",",
"tuple",
")",
":",
"self",
".",
"hidden",
"=",
"(",
"rnnstate",
",",
")",
"else",
":",
"self",
".",
"hidden",
"=",
"rnnstate",
"self",
".",
"coverage",
"=",
"None",
"# Init the input feed.",
"batch_size",
"=",
"self",
".",
"hidden",
"[",
"0",
"]",
".",
"size",
"(",
"1",
")",
"h_size",
"=",
"(",
"batch_size",
",",
"hidden_size",
")",
"self",
".",
"input_feed",
"=",
"self",
".",
"hidden",
"[",
"0",
"]",
".",
"data",
".",
"new",
"(",
"*",
"h_size",
")",
".",
"zero_",
"(",
")",
".",
"unsqueeze",
"(",
"0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L543-L560 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | RNNDecoderState.update_state | (self, rnnstate, input_feed, coverage) | Update decoder state | Update decoder state | [
"Update",
"decoder",
"state"
] | def update_state(self, rnnstate, input_feed, coverage):
""" Update decoder state """
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.input_feed = input_feed
self.coverage = coverage | [
"def",
"update_state",
"(",
"self",
",",
"rnnstate",
",",
"input_feed",
",",
"coverage",
")",
":",
"if",
"not",
"isinstance",
"(",
"rnnstate",
",",
"tuple",
")",
":",
"self",
".",
"hidden",
"=",
"(",
"rnnstate",
",",
")",
"else",
":",
"self",
".",
"hidden",
"=",
"rnnstate",
"self",
".",
"input_feed",
"=",
"input_feed",
"self",
".",
"coverage",
"=",
"coverage"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L566-L573 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/decoder.py | python | RNNDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
vars = [e.data.repeat(1, beam_size, 1)
for e in self._all]
self.hidden = tuple(vars[:-1])
self.input_feed = vars[-1] | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"vars",
"=",
"[",
"e",
".",
"data",
".",
"repeat",
"(",
"1",
",",
"beam_size",
",",
"1",
")",
"for",
"e",
"in",
"self",
".",
"_all",
"]",
"self",
".",
"hidden",
"=",
"tuple",
"(",
"vars",
"[",
":",
"-",
"1",
"]",
")",
"self",
".",
"input_feed",
"=",
"vars",
"[",
"-",
"1",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/decoder.py#L575-L580 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/ensemble.py | python | load_test_model | (opt, dummy_opt) | return shared_fields, ensemble_model, shared_model_opt | Read in multiple models for ensemble | Read in multiple models for ensemble | [
"Read",
"in",
"multiple",
"models",
"for",
"ensemble"
] | def load_test_model(opt, dummy_opt):
""" Read in multiple models for ensemble """
shared_fields = None
shared_model_opt = None
models = []
for model_path in opt.models:
fields, model, model_opt = \
onmt.model_builder.load_test_model(opt,
dummy_opt,
model_path=model_path)
import pdb;pdb.set_trace()
if shared_fields is None:
shared_fields = fields
else:
for key, field in fields.items():
if field is not None and 'vocab' in field.__dict__:
assert field.vocab.stoi == shared_fields[key].vocab.stoi, \
'Ensemble models must use the same preprocessed data'
models.append(model)
if shared_model_opt is None:
shared_model_opt = model_opt
ensemble_model = EnsembleModel(models)
return shared_fields, ensemble_model, shared_model_opt | [
"def",
"load_test_model",
"(",
"opt",
",",
"dummy_opt",
")",
":",
"shared_fields",
"=",
"None",
"shared_model_opt",
"=",
"None",
"models",
"=",
"[",
"]",
"for",
"model_path",
"in",
"opt",
".",
"models",
":",
"fields",
",",
"model",
",",
"model_opt",
"=",
"onmt",
".",
"model_builder",
".",
"load_test_model",
"(",
"opt",
",",
"dummy_opt",
",",
"model_path",
"=",
"model_path",
")",
"import",
"pdb",
"pdb",
".",
"set_trace",
"(",
")",
"if",
"shared_fields",
"is",
"None",
":",
"shared_fields",
"=",
"fields",
"else",
":",
"for",
"key",
",",
"field",
"in",
"fields",
".",
"items",
"(",
")",
":",
"if",
"field",
"is",
"not",
"None",
"and",
"'vocab'",
"in",
"field",
".",
"__dict__",
":",
"assert",
"field",
".",
"vocab",
".",
"stoi",
"==",
"shared_fields",
"[",
"key",
"]",
".",
"vocab",
".",
"stoi",
",",
"'Ensemble models must use the same preprocessed data'",
"models",
".",
"append",
"(",
"model",
")",
"if",
"shared_model_opt",
"is",
"None",
":",
"shared_model_opt",
"=",
"model_opt",
"ensemble_model",
"=",
"EnsembleModel",
"(",
"models",
")",
"return",
"shared_fields",
",",
"ensemble_model",
",",
"shared_model_opt"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/ensemble.py#L135-L157 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/ensemble.py | python | EnsembleDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
for model_state in self.model_decoder_states:
model_state.repeat_beam_size_times(beam_size) | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"for",
"model_state",
"in",
"self",
".",
"model_decoder_states",
":",
"model_state",
".",
"repeat_beam_size_times",
"(",
"beam_size",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/ensemble.py#L27-L30 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/ensemble.py | python | EnsembleDecoderOutput.squeeze | (self, dim=None) | return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_outputs]) | Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()` | Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()` | [
"Delegate",
"squeeze",
"to",
"avoid",
"modifying",
":",
"obj",
":",
"Translator",
".",
"translate_batch",
"()"
] | def squeeze(self, dim=None):
"""
Delegate squeeze to avoid modifying
:obj:`Translator.translate_batch()`
"""
return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_outputs]) | [
"def",
"squeeze",
"(",
"self",
",",
"dim",
"=",
"None",
")",
":",
"return",
"EnsembleDecoderOutput",
"(",
"[",
"x",
".",
"squeeze",
"(",
"dim",
")",
"for",
"x",
"in",
"self",
".",
"model_outputs",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/ensemble.py#L41-L47 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/ensemble.py | python | EnsembleDecoder.forward | (self, tgt, memory_bank, state, memory_lengths=None,
step=None) | return (EnsembleDecoderOutput(outputs),
EnsembleDecoderState(states),
mean_attns) | See :obj:`RNNDecoderBase.forward()` | See :obj:`RNNDecoderBase.forward()` | [
"See",
":",
"obj",
":",
"RNNDecoderBase",
".",
"forward",
"()"
] | def forward(self, tgt, memory_bank, state, memory_lengths=None,
step=None):
""" See :obj:`RNNDecoderBase.forward()` """
# Memory_lengths is a single tensor shared between all models.
# This assumption will not hold if Translator is modified
# to calculate memory_lengths as something other than the length
# of the input.
outputs, states, attns = zip(*[
model_decoder.forward(
tgt, memory_bank[i], state[i], memory_lengths, step=step)
for (i, model_decoder)
in enumerate(self.model_decoders)])
mean_attns = self.combine_attns(attns)
return (EnsembleDecoderOutput(outputs),
EnsembleDecoderState(states),
mean_attns) | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"# Memory_lengths is a single tensor shared between all models.",
"# This assumption will not hold if Translator is modified",
"# to calculate memory_lengths as something other than the length",
"# of the input.",
"outputs",
",",
"states",
",",
"attns",
"=",
"zip",
"(",
"*",
"[",
"model_decoder",
".",
"forward",
"(",
"tgt",
",",
"memory_bank",
"[",
"i",
"]",
",",
"state",
"[",
"i",
"]",
",",
"memory_lengths",
",",
"step",
"=",
"step",
")",
"for",
"(",
"i",
",",
"model_decoder",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_decoders",
")",
"]",
")",
"mean_attns",
"=",
"self",
".",
"combine_attns",
"(",
"attns",
")",
"return",
"(",
"EnsembleDecoderOutput",
"(",
"outputs",
")",
",",
"EnsembleDecoderState",
"(",
"states",
")",
",",
"mean_attns",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/ensemble.py#L72-L87 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/ensemble.py | python | EnsembleDecoder.init_decoder_state | (self, src, memory_bank, enc_hidden) | return EnsembleDecoderState(
[model_decoder.init_decoder_state(src,
memory_bank[i],
enc_hidden[i])
for (i, model_decoder) in enumerate(self.model_decoders)]) | See :obj:`RNNDecoderBase.init_decoder_state()` | See :obj:`RNNDecoderBase.init_decoder_state()` | [
"See",
":",
"obj",
":",
"RNNDecoderBase",
".",
"init_decoder_state",
"()"
] | def init_decoder_state(self, src, memory_bank, enc_hidden):
""" See :obj:`RNNDecoderBase.init_decoder_state()` """
return EnsembleDecoderState(
[model_decoder.init_decoder_state(src,
memory_bank[i],
enc_hidden[i])
for (i, model_decoder) in enumerate(self.model_decoders)]) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"src",
",",
"memory_bank",
",",
"enc_hidden",
")",
":",
"return",
"EnsembleDecoderState",
"(",
"[",
"model_decoder",
".",
"init_decoder_state",
"(",
"src",
",",
"memory_bank",
"[",
"i",
"]",
",",
"enc_hidden",
"[",
"i",
"]",
")",
"for",
"(",
"i",
",",
"model_decoder",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_decoders",
")",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/ensemble.py#L95-L101 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/ensemble.py | python | EnsembleGenerator.forward | (self, hidden) | return torch.stack(distributions).mean(0) | Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary. | Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary. | [
"Compute",
"a",
"distribution",
"over",
"the",
"target",
"dictionary",
"by",
"averaging",
"distributions",
"from",
"models",
"in",
"the",
"ensemble",
".",
"All",
"models",
"in",
"the",
"ensemble",
"must",
"share",
"a",
"target",
"vocabulary",
"."
] | def forward(self, hidden):
"""
Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary.
"""
distributions = [model_generator.forward(hidden[i])
for (i, model_generator)
in enumerate(self.model_generators)]
return torch.stack(distributions).mean(0) | [
"def",
"forward",
"(",
"self",
",",
"hidden",
")",
":",
"distributions",
"=",
"[",
"model_generator",
".",
"forward",
"(",
"hidden",
"[",
"i",
"]",
")",
"for",
"(",
"i",
",",
"model_generator",
")",
"in",
"enumerate",
"(",
"self",
".",
"model_generators",
")",
"]",
"return",
"torch",
".",
"stack",
"(",
"distributions",
")",
".",
"mean",
"(",
"0",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/ensemble.py#L113-L122 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/transformer.py | python | TransformerDecoderLayer.forward | (self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,
previous_input=None, layer_cache=None, step=None) | return output, attn, all_input | Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`
Returns:
(`FloatTensor`, `FloatTensor`, `FloatTensor`):
* output `[batch_size x 1 x model_dim]`
* attn `[batch_size x 1 x src_len]`
* all_input `[batch_size x current_step x model_dim]` | Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]` | [
"Args",
":",
"inputs",
"(",
"FloatTensor",
")",
":",
"[",
"batch_size",
"x",
"1",
"x",
"model_dim",
"]",
"memory_bank",
"(",
"FloatTensor",
")",
":",
"[",
"batch_size",
"x",
"src_len",
"x",
"model_dim",
"]",
"src_pad_mask",
"(",
"LongTensor",
")",
":",
"[",
"batch_size",
"x",
"1",
"x",
"src_len",
"]",
"tgt_pad_mask",
"(",
"LongTensor",
")",
":",
"[",
"batch_size",
"x",
"1",
"x",
"1",
"]"
] | def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,
previous_input=None, layer_cache=None, step=None):
"""
Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`
Returns:
(`FloatTensor`, `FloatTensor`, `FloatTensor`):
* output `[batch_size x 1 x model_dim]`
* attn `[batch_size x 1 x src_len]`
* all_input `[batch_size x current_step x model_dim]`
"""
dec_mask = torch.gt(tgt_pad_mask +
self.mask[:, :tgt_pad_mask.size(1),
:tgt_pad_mask.size(1)], 0)
input_norm = self.layer_norm_1(inputs)
all_input = input_norm
if previous_input is not None:
all_input = torch.cat((previous_input, input_norm), dim=1)
dec_mask = None
if self.self_attn_type == "scaled-dot":
query, attn = self.self_attn(all_input, all_input, input_norm,
mask=dec_mask,
layer_cache=layer_cache,
type="self")
elif self.self_attn_type == "average":
query, attn = self.self_attn(input_norm, mask=dec_mask,
layer_cache=layer_cache, step=step)
query = self.drop(query) + inputs
query_norm = self.layer_norm_2(query)
mid, attn = self.context_attn(memory_bank, memory_bank, query_norm,
mask=src_pad_mask,
layer_cache=layer_cache,
type="context")
output = self.feed_forward(self.drop(mid) + query)
return output, attn, all_input | [
"def",
"forward",
"(",
"self",
",",
"inputs",
",",
"memory_bank",
",",
"src_pad_mask",
",",
"tgt_pad_mask",
",",
"previous_input",
"=",
"None",
",",
"layer_cache",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"dec_mask",
"=",
"torch",
".",
"gt",
"(",
"tgt_pad_mask",
"+",
"self",
".",
"mask",
"[",
":",
",",
":",
"tgt_pad_mask",
".",
"size",
"(",
"1",
")",
",",
":",
"tgt_pad_mask",
".",
"size",
"(",
"1",
")",
"]",
",",
"0",
")",
"input_norm",
"=",
"self",
".",
"layer_norm_1",
"(",
"inputs",
")",
"all_input",
"=",
"input_norm",
"if",
"previous_input",
"is",
"not",
"None",
":",
"all_input",
"=",
"torch",
".",
"cat",
"(",
"(",
"previous_input",
",",
"input_norm",
")",
",",
"dim",
"=",
"1",
")",
"dec_mask",
"=",
"None",
"if",
"self",
".",
"self_attn_type",
"==",
"\"scaled-dot\"",
":",
"query",
",",
"attn",
"=",
"self",
".",
"self_attn",
"(",
"all_input",
",",
"all_input",
",",
"input_norm",
",",
"mask",
"=",
"dec_mask",
",",
"layer_cache",
"=",
"layer_cache",
",",
"type",
"=",
"\"self\"",
")",
"elif",
"self",
".",
"self_attn_type",
"==",
"\"average\"",
":",
"query",
",",
"attn",
"=",
"self",
".",
"self_attn",
"(",
"input_norm",
",",
"mask",
"=",
"dec_mask",
",",
"layer_cache",
"=",
"layer_cache",
",",
"step",
"=",
"step",
")",
"query",
"=",
"self",
".",
"drop",
"(",
"query",
")",
"+",
"inputs",
"query_norm",
"=",
"self",
".",
"layer_norm_2",
"(",
"query",
")",
"mid",
",",
"attn",
"=",
"self",
".",
"context_attn",
"(",
"memory_bank",
",",
"memory_bank",
",",
"query_norm",
",",
"mask",
"=",
"src_pad_mask",
",",
"layer_cache",
"=",
"layer_cache",
",",
"type",
"=",
"\"context\"",
")",
"output",
"=",
"self",
".",
"feed_forward",
"(",
"self",
".",
"drop",
"(",
"mid",
")",
"+",
"query",
")",
"return",
"output",
",",
"attn",
",",
"all_input"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/transformer.py#L53-L97 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/transformer.py | python | TransformerDecoderLayer._get_attn_subsequent_mask | (self, size) | return subsequent_mask | Get an attention mask to avoid using the subsequent info.
Args:
size: int
Returns:
(`LongTensor`):
* subsequent_mask `[1 x size x size]` | Get an attention mask to avoid using the subsequent info. | [
"Get",
"an",
"attention",
"mask",
"to",
"avoid",
"using",
"the",
"subsequent",
"info",
"."
] | def _get_attn_subsequent_mask(self, size):
"""
Get an attention mask to avoid using the subsequent info.
Args:
size: int
Returns:
(`LongTensor`):
* subsequent_mask `[1 x size x size]`
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
subsequent_mask = torch.from_numpy(subsequent_mask)
return subsequent_mask | [
"def",
"_get_attn_subsequent_mask",
"(",
"self",
",",
"size",
")",
":",
"attn_shape",
"=",
"(",
"1",
",",
"size",
",",
"size",
")",
"subsequent_mask",
"=",
"np",
".",
"triu",
"(",
"np",
".",
"ones",
"(",
"attn_shape",
")",
",",
"k",
"=",
"1",
")",
".",
"astype",
"(",
"'uint8'",
")",
"subsequent_mask",
"=",
"torch",
".",
"from_numpy",
"(",
"subsequent_mask",
")",
"return",
"subsequent_mask"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/transformer.py#L99-L114 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/transformer.py | python | TransformerDecoder.forward | (self, tgt, memory_bank, state, memory_lengths=None,
step=None, cache=None) | return outputs, state, attns | See :obj:`onmt.modules.RNNDecoderBase.forward()` | See :obj:`onmt.modules.RNNDecoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"modules",
".",
"RNNDecoderBase",
".",
"forward",
"()"
] | def forward(self, tgt, memory_bank, state, memory_lengths=None,
step=None, cache=None):
"""
See :obj:`onmt.modules.RNNDecoderBase.forward()`
"""
src = state.src
src_words = src[:, :, 0].transpose(0, 1)
tgt_words = tgt[:, :, 0].transpose(0, 1)
src_batch, src_len = src_words.size()
tgt_batch, tgt_len = tgt_words.size()
# Initialize return variables.
outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
# Run the forward pass of the TransformerDecoder.
emb = self.embeddings(tgt, step=step)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_memory_bank = memory_bank.transpose(0, 1).contiguous()
padding_idx = self.embeddings.word_padding_idx
src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1) \
.expand(src_batch, tgt_len, src_len)
tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1) \
.expand(tgt_batch, tgt_len, tgt_len)
if state.cache is None:
saved_inputs = []
for i in range(self.num_layers):
prev_layer_input = None
if state.cache is None:
if state.previous_input is not None:
prev_layer_input = state.previous_layer_inputs[i]
output, attn, all_input \
= self.transformer_layers[i](
output, src_memory_bank,
src_pad_mask, tgt_pad_mask,
previous_input=prev_layer_input,
layer_cache=state.cache["layer_{}".format(i)]
if state.cache is not None else None,
step=step)
if state.cache is None:
saved_inputs.append(all_input)
if state.cache is None:
saved_inputs = torch.stack(saved_inputs)
output = self.layer_norm(output)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
attn = attn.transpose(0, 1).contiguous()
attns["std"] = attn
if self._copy:
attns["copy"] = attn
if state.cache is None:
state = state.update_state(tgt, saved_inputs)
return outputs, state, attns | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
",",
"cache",
"=",
"None",
")",
":",
"src",
"=",
"state",
".",
"src",
"src_words",
"=",
"src",
"[",
":",
",",
":",
",",
"0",
"]",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"tgt_words",
"=",
"tgt",
"[",
":",
",",
":",
",",
"0",
"]",
".",
"transpose",
"(",
"0",
",",
"1",
")",
"src_batch",
",",
"src_len",
"=",
"src_words",
".",
"size",
"(",
")",
"tgt_batch",
",",
"tgt_len",
"=",
"tgt_words",
".",
"size",
"(",
")",
"# Initialize return variables.",
"outputs",
"=",
"[",
"]",
"attns",
"=",
"{",
"\"std\"",
":",
"[",
"]",
"}",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"[",
"]",
"# Run the forward pass of the TransformerDecoder.",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
",",
"step",
"=",
"step",
")",
"assert",
"emb",
".",
"dim",
"(",
")",
"==",
"3",
"# len x batch x embedding_dim",
"output",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"src_memory_bank",
"=",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"padding_idx",
"=",
"self",
".",
"embeddings",
".",
"word_padding_idx",
"src_pad_mask",
"=",
"src_words",
".",
"data",
".",
"eq",
"(",
"padding_idx",
")",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand",
"(",
"src_batch",
",",
"tgt_len",
",",
"src_len",
")",
"tgt_pad_mask",
"=",
"tgt_words",
".",
"data",
".",
"eq",
"(",
"padding_idx",
")",
".",
"unsqueeze",
"(",
"1",
")",
".",
"expand",
"(",
"tgt_batch",
",",
"tgt_len",
",",
"tgt_len",
")",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"saved_inputs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_layers",
")",
":",
"prev_layer_input",
"=",
"None",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"if",
"state",
".",
"previous_input",
"is",
"not",
"None",
":",
"prev_layer_input",
"=",
"state",
".",
"previous_layer_inputs",
"[",
"i",
"]",
"output",
",",
"attn",
",",
"all_input",
"=",
"self",
".",
"transformer_layers",
"[",
"i",
"]",
"(",
"output",
",",
"src_memory_bank",
",",
"src_pad_mask",
",",
"tgt_pad_mask",
",",
"previous_input",
"=",
"prev_layer_input",
",",
"layer_cache",
"=",
"state",
".",
"cache",
"[",
"\"layer_{}\"",
".",
"format",
"(",
"i",
")",
"]",
"if",
"state",
".",
"cache",
"is",
"not",
"None",
"else",
"None",
",",
"step",
"=",
"step",
")",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"saved_inputs",
".",
"append",
"(",
"all_input",
")",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"saved_inputs",
"=",
"torch",
".",
"stack",
"(",
"saved_inputs",
")",
"output",
"=",
"self",
".",
"layer_norm",
"(",
"output",
")",
"# Process the result and update the attentions.",
"outputs",
"=",
"output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"attn",
"=",
"attn",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"attns",
"[",
"\"std\"",
"]",
"=",
"attn",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"attn",
"if",
"state",
".",
"cache",
"is",
"None",
":",
"state",
"=",
"state",
".",
"update_state",
"(",
"tgt",
",",
"saved_inputs",
")",
"return",
"outputs",
",",
"state",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/transformer.py#L172-L237 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/transformer.py | python | TransformerDecoder.init_decoder_state | (self, src, memory_bank, enc_hidden,
with_cache=False) | return state | Init decoder state | Init decoder state | [
"Init",
"decoder",
"state"
] | def init_decoder_state(self, src, memory_bank, enc_hidden,
with_cache=False):
""" Init decoder state """
state = TransformerDecoderState(src)
if with_cache:
state._init_cache(memory_bank, self.num_layers,
self.self_attn_type)
return state | [
"def",
"init_decoder_state",
"(",
"self",
",",
"src",
",",
"memory_bank",
",",
"enc_hidden",
",",
"with_cache",
"=",
"False",
")",
":",
"state",
"=",
"TransformerDecoderState",
"(",
"src",
")",
"if",
"with_cache",
":",
"state",
".",
"_init_cache",
"(",
"memory_bank",
",",
"self",
".",
"num_layers",
",",
"self",
".",
"self_attn_type",
")",
"return",
"state"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/transformer.py#L239-L246 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/transformer.py | python | TransformerDecoderState.__init__ | (self, src) | Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch). | Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch). | [
"Args",
":",
"src",
"(",
"FloatTensor",
")",
":",
"a",
"sequence",
"of",
"source",
"words",
"tensors",
"with",
"optional",
"feature",
"tensors",
"of",
"size",
"(",
"len",
"x",
"batch",
")",
"."
] | def __init__(self, src):
"""
Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch).
"""
self.src = src
self.previous_input = None
self.previous_layer_inputs = None
self.cache = None | [
"def",
"__init__",
"(",
"self",
",",
"src",
")",
":",
"self",
".",
"src",
"=",
"src",
"self",
".",
"previous_input",
"=",
"None",
"self",
".",
"previous_layer_inputs",
"=",
"None",
"self",
".",
"cache",
"=",
"None"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/transformer.py#L252-L261 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/transformer.py | python | TransformerDecoderState._all | (self) | Contains attributes that need to be updated in self.beam_update(). | Contains attributes that need to be updated in self.beam_update(). | [
"Contains",
"attributes",
"that",
"need",
"to",
"be",
"updated",
"in",
"self",
".",
"beam_update",
"()",
"."
] | def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
if (self.previous_input is not None
and self.previous_layer_inputs is not None):
return (self.previous_input,
self.previous_layer_inputs,
self.src)
else:
return (self.src,) | [
"def",
"_all",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"previous_input",
"is",
"not",
"None",
"and",
"self",
".",
"previous_layer_inputs",
"is",
"not",
"None",
")",
":",
"return",
"(",
"self",
".",
"previous_input",
",",
"self",
".",
"previous_layer_inputs",
",",
"self",
".",
"src",
")",
"else",
":",
"return",
"(",
"self",
".",
"src",
",",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/transformer.py#L264-L274 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/transformer.py | python | TransformerDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.src = self.src.data.repeat(1, beam_size, 1) | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"self",
".",
"src",
"=",
"self",
".",
"src",
".",
"data",
".",
"repeat",
"(",
"1",
",",
"beam_size",
",",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/transformer.py#L309-L311 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/cnn_decoder.py | python | CNNDecoder.forward | (self, tgt, memory_bank, state, memory_lengths=None, step=None) | return outputs, state, attns | See :obj:`onmt.modules.RNNDecoderBase.forward()` | See :obj:`onmt.modules.RNNDecoderBase.forward()` | [
"See",
":",
"obj",
":",
"onmt",
".",
"modules",
".",
"RNNDecoderBase",
".",
"forward",
"()"
] | def forward(self, tgt, memory_bank, state, memory_lengths=None, step=None):
""" See :obj:`onmt.modules.RNNDecoderBase.forward()`"""
# NOTE: memory_lengths is only here for compatibility reasons
# with onmt.modules.RNNDecoderBase.forward()
# CHECKS
assert isinstance(state, CNNDecoderState)
_, tgt_batch, _ = tgt.size()
_, contxt_batch, _ = memory_bank.size()
aeq(tgt_batch, contxt_batch)
# END CHECKS
if state.previous_input is not None:
tgt = torch.cat([state.previous_input, tgt], 0)
# Initialize return variables.
outputs = []
attns = {"std": []}
assert not self._copy, "Copy mechanism not yet tested in conv2conv"
if self._copy:
attns["copy"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_memory_bank_c = state.init_src.transpose(0, 1).contiguous()
# Run the forward pass of the CNNDecoder.
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = torch.zeros(x.size(0), x.size(1),
self.cnn_kernel_width - 1, 1)
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_memory_bank_t, src_memory_bank_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(tgt)
return outputs, state, attns | [
"def",
"forward",
"(",
"self",
",",
"tgt",
",",
"memory_bank",
",",
"state",
",",
"memory_lengths",
"=",
"None",
",",
"step",
"=",
"None",
")",
":",
"# NOTE: memory_lengths is only here for compatibility reasons",
"# with onmt.modules.RNNDecoderBase.forward()",
"# CHECKS",
"assert",
"isinstance",
"(",
"state",
",",
"CNNDecoderState",
")",
"_",
",",
"tgt_batch",
",",
"_",
"=",
"tgt",
".",
"size",
"(",
")",
"_",
",",
"contxt_batch",
",",
"_",
"=",
"memory_bank",
".",
"size",
"(",
")",
"aeq",
"(",
"tgt_batch",
",",
"contxt_batch",
")",
"# END CHECKS",
"if",
"state",
".",
"previous_input",
"is",
"not",
"None",
":",
"tgt",
"=",
"torch",
".",
"cat",
"(",
"[",
"state",
".",
"previous_input",
",",
"tgt",
"]",
",",
"0",
")",
"# Initialize return variables.",
"outputs",
"=",
"[",
"]",
"attns",
"=",
"{",
"\"std\"",
":",
"[",
"]",
"}",
"assert",
"not",
"self",
".",
"_copy",
",",
"\"Copy mechanism not yet tested in conv2conv\"",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"[",
"]",
"emb",
"=",
"self",
".",
"embeddings",
"(",
"tgt",
")",
"assert",
"emb",
".",
"dim",
"(",
")",
"==",
"3",
"# len x batch x embedding_dim",
"tgt_emb",
"=",
"emb",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# The output of CNNEncoder.",
"src_memory_bank_t",
"=",
"memory_bank",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# The combination of output of CNNEncoder and source embeddings.",
"src_memory_bank_c",
"=",
"state",
".",
"init_src",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"# Run the forward pass of the CNNDecoder.",
"emb_reshape",
"=",
"tgt_emb",
".",
"contiguous",
"(",
")",
".",
"view",
"(",
"tgt_emb",
".",
"size",
"(",
"0",
")",
"*",
"tgt_emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"linear_out",
"=",
"self",
".",
"linear",
"(",
"emb_reshape",
")",
"x",
"=",
"linear_out",
".",
"view",
"(",
"tgt_emb",
".",
"size",
"(",
"0",
")",
",",
"tgt_emb",
".",
"size",
"(",
"1",
")",
",",
"-",
"1",
")",
"x",
"=",
"shape_transform",
"(",
"x",
")",
"pad",
"=",
"torch",
".",
"zeros",
"(",
"x",
".",
"size",
"(",
"0",
")",
",",
"x",
".",
"size",
"(",
"1",
")",
",",
"self",
".",
"cnn_kernel_width",
"-",
"1",
",",
"1",
")",
"pad",
"=",
"pad",
".",
"type_as",
"(",
"x",
")",
"base_target_emb",
"=",
"x",
"for",
"conv",
",",
"attention",
"in",
"zip",
"(",
"self",
".",
"conv_layers",
",",
"self",
".",
"attn_layers",
")",
":",
"new_target_input",
"=",
"torch",
".",
"cat",
"(",
"[",
"pad",
",",
"x",
"]",
",",
"2",
")",
"out",
"=",
"conv",
"(",
"new_target_input",
")",
"c",
",",
"attn",
"=",
"attention",
"(",
"base_target_emb",
",",
"out",
",",
"src_memory_bank_t",
",",
"src_memory_bank_c",
")",
"x",
"=",
"(",
"x",
"+",
"(",
"c",
"+",
"out",
")",
"*",
"SCALE_WEIGHT",
")",
"*",
"SCALE_WEIGHT",
"output",
"=",
"x",
".",
"squeeze",
"(",
"3",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
"# Process the result and update the attentions.",
"outputs",
"=",
"output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
".",
"contiguous",
"(",
")",
"if",
"state",
".",
"previous_input",
"is",
"not",
"None",
":",
"outputs",
"=",
"outputs",
"[",
"state",
".",
"previous_input",
".",
"size",
"(",
"0",
")",
":",
"]",
"attn",
"=",
"attn",
"[",
":",
",",
"state",
".",
"previous_input",
".",
"size",
"(",
"0",
")",
":",
"]",
".",
"squeeze",
"(",
")",
"attn",
"=",
"torch",
".",
"stack",
"(",
"[",
"attn",
"]",
")",
"attns",
"[",
"\"std\"",
"]",
"=",
"attn",
"if",
"self",
".",
"_copy",
":",
"attns",
"[",
"\"copy\"",
"]",
"=",
"attn",
"# Update the state.",
"state",
".",
"update_state",
"(",
"tgt",
")",
"return",
"outputs",
",",
"state",
",",
"attns"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/cnn_decoder.py#L58-L122 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/cnn_decoder.py | python | CNNDecoder.init_decoder_state | (self, _, memory_bank, enc_hidden, with_cache=False) | return CNNDecoderState(memory_bank, enc_hidden) | Init decoder state. | Init decoder state. | [
"Init",
"decoder",
"state",
"."
] | def init_decoder_state(self, _, memory_bank, enc_hidden, with_cache=False):
"""
Init decoder state.
"""
return CNNDecoderState(memory_bank, enc_hidden) | [
"def",
"init_decoder_state",
"(",
"self",
",",
"_",
",",
"memory_bank",
",",
"enc_hidden",
",",
"with_cache",
"=",
"False",
")",
":",
"return",
"CNNDecoderState",
"(",
"memory_bank",
",",
"enc_hidden",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/cnn_decoder.py#L124-L128 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/cnn_decoder.py | python | CNNDecoderState._all | (self) | return (self.previous_input,) | Contains attributes that need to be updated in self.beam_update(). | Contains attributes that need to be updated in self.beam_update(). | [
"Contains",
"attributes",
"that",
"need",
"to",
"be",
"updated",
"in",
"self",
".",
"beam_update",
"()",
"."
] | def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input,) | [
"def",
"_all",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"previous_input",
",",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/cnn_decoder.py#L141-L145 |
|
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/cnn_decoder.py | python | CNNDecoderState.update_state | (self, new_input) | Called for every decoder forward pass. | Called for every decoder forward pass. | [
"Called",
"for",
"every",
"decoder",
"forward",
"pass",
"."
] | def update_state(self, new_input):
""" Called for every decoder forward pass. """
self.previous_input = new_input | [
"def",
"update_state",
"(",
"self",
",",
"new_input",
")",
":",
"self",
".",
"previous_input",
"=",
"new_input"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/cnn_decoder.py#L150-L152 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | code/Hi_MAP/onmt/decoders/cnn_decoder.py | python | CNNDecoderState.repeat_beam_size_times | (self, beam_size) | Repeat beam_size times along batch dimension. | Repeat beam_size times along batch dimension. | [
"Repeat",
"beam_size",
"times",
"along",
"batch",
"dimension",
"."
] | def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.init_src = self.init_src.data.repeat(1, beam_size, 1) | [
"def",
"repeat_beam_size_times",
"(",
"self",
",",
"beam_size",
")",
":",
"self",
".",
"init_src",
"=",
"self",
".",
"init_src",
".",
"data",
".",
"repeat",
"(",
"1",
",",
"beam_size",
",",
"1",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/code/Hi_MAP/onmt/decoders/cnn_decoder.py#L154-L156 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments._tokenize | (self, text) | return self._en(text, disable = ["tagger", "parser", "ner", "textcat"]) | Tokenizes input using the fastest possible SpaCy configuration.
This is optional, can be disabled in constructor. | [] | def _tokenize(self, text):
"""
Tokenizes input using the fastest possible SpaCy configuration.
This is optional, can be disabled in constructor.
"""
return self._en(text, disable = ["tagger", "parser", "ner", "textcat"]) | [
"def",
"_tokenize",
"(",
"self",
",",
"text",
")",
":",
"return",
"self",
".",
"_en",
"(",
"text",
",",
"disable",
"=",
"[",
"\"tagger\"",
",",
"\"parser\"",
",",
"\"ner\"",
",",
"\"textcat\"",
"]",
")"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L49-L58 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments._normalize | (self, tokens, case = False) | return [
str(t).lower()
if not case
else str(t)
for t in tokens
] | Lowercases and turns tokens into distinct words. | [] | def _normalize(self, tokens, case = False):
"""
Lowercases and turns tokens into distinct words.
"""
return [
str(t).lower()
if not case
else str(t)
for t in tokens
] | [
"def",
"_normalize",
"(",
"self",
",",
"tokens",
",",
"case",
"=",
"False",
")",
":",
"return",
"[",
"str",
"(",
"t",
")",
".",
"lower",
"(",
")",
"if",
"not",
"case",
"else",
"str",
"(",
"t",
")",
"for",
"t",
"in",
"tokens",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L61-L74 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments.overlaps | (self) | return self._matches | Return a list of Fragments.Match objects between summary and text.
This is a list of named tuples of the form (summary, text, length):
- summary (int): the start index of the match in the summary
- text (int): the start index of the match in the reference
- length (int): the length of the extractive fragment | [] | def overlaps(self):
"""
Return a list of Fragments.Match objects between summary and text.
This is a list of named tuples of the form (summary, text, length):
- summary (int): the start index of the match in the summary
- text (int): the start index of the match in the reference
- length (int): the length of the extractive fragment
"""
return self._matches | [
"def",
"overlaps",
"(",
"self",
")",
":",
"return",
"self",
".",
"_matches"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L77-L90 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments.strings | (self, min_length = 0, raw = None, summary_base = True) | return strings | Return a list of explicit match strings between the summary and reference.
Note that this will be in the same format as the strings are input. This is
important to remember if tokenization is done manually. If tokenization is
specified automatically on the raw strings, raw strings will automatically
be returned rather than SpaCy tokenized sequences.
Arguments:
- min_length (int): filter out overlaps shorter than this (default = 0)
- raw (bool): return raw input rather than stringified
- (default = False if automatic tokenization, True otherwise)
- summary_base (true): strings are based of summary text (default = True)
Returns:
- list of overlaps, where overlaps are strings or token sequences | [] | def strings(self, min_length = 0, raw = None, summary_base = True):
"""
Return a list of explicit match strings between the summary and reference.
Note that this will be in the same format as the strings are input. This is
important to remember if tokenization is done manually. If tokenization is
specified automatically on the raw strings, raw strings will automatically
be returned rather than SpaCy tokenized sequences.
Arguments:
- min_length (int): filter out overlaps shorter than this (default = 0)
- raw (bool): return raw input rather than stringified
- (default = False if automatic tokenization, True otherwise)
- summary_base (true): strings are based of summary text (default = True)
Returns:
- list of overlaps, where overlaps are strings or token sequences
"""
# Compute the strings against the summary or the text?
base = self.summary if summary_base else self.text
# Generate strings, filtering out strings below the minimum length.
strings = [
base[i : i + length]
for i, j, length
in self.overlaps()
if length > min_length
]
# By default, we just return the tokenization being used.
# But if they user wants a raw string, then we convert.
# Mostly, this will be used along with spacy.
if self._tokens and raw:
for i, s in enumerate(strings):
strings[i] = str(s)
# Return the list of strings.
return strings | [
"def",
"strings",
"(",
"self",
",",
"min_length",
"=",
"0",
",",
"raw",
"=",
"None",
",",
"summary_base",
"=",
"True",
")",
":",
"# Compute the strings against the summary or the text?",
"base",
"=",
"self",
".",
"summary",
"if",
"summary_base",
"else",
"self",
".",
"text",
"# Generate strings, filtering out strings below the minimum length.",
"strings",
"=",
"[",
"base",
"[",
"i",
":",
"i",
"+",
"length",
"]",
"for",
"i",
",",
"j",
",",
"length",
"in",
"self",
".",
"overlaps",
"(",
")",
"if",
"length",
">",
"min_length",
"]",
"# By default, we just return the tokenization being used.",
"# But if they user wants a raw string, then we convert.",
"# Mostly, this will be used along with spacy.",
"if",
"self",
".",
"_tokens",
"and",
"raw",
":",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"strings",
")",
":",
"strings",
"[",
"i",
"]",
"=",
"str",
"(",
"s",
")",
"# Return the list of strings.",
"return",
"strings"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L93-L140 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments.coverage | (self, summary_base = True) | Return the COVERAGE score of the summary and text.
Arguments:
- summary_base (bool): use summary as numerator (default = True)
Returns:
- decimal COVERAGE score within [0, 1] | [] | def coverage(self, summary_base = True):
"""
Return the COVERAGE score of the summary and text.
Arguments:
- summary_base (bool): use summary as numerator (default = True)
Returns:
- decimal COVERAGE score within [0, 1]
"""
numerator = sum(o.length for o in self.overlaps())
if summary_base: denominator = len(self.summary)
else: denominator = len(self.reference)
if denominator == 0: return 0
else: return numerator / denominator | [
"def",
"coverage",
"(",
"self",
",",
"summary_base",
"=",
"True",
")",
":",
"numerator",
"=",
"sum",
"(",
"o",
".",
"length",
"for",
"o",
"in",
"self",
".",
"overlaps",
"(",
")",
")",
"if",
"summary_base",
":",
"denominator",
"=",
"len",
"(",
"self",
".",
"summary",
")",
"else",
":",
"denominator",
"=",
"len",
"(",
"self",
".",
"reference",
")",
"if",
"denominator",
"==",
"0",
":",
"return",
"0",
"else",
":",
"return",
"numerator",
"/",
"denominator"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L143-L165 |
|||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments.density | (self, summary_base = True) | Return the DENSITY score of summary and text.
Arguments:
- summary_base (bool): use summary as numerator (default = True)
Returns:
- decimal DENSITY score within [0, ...] | [] | def density(self, summary_base = True):
"""
Return the DENSITY score of summary and text.
Arguments:
- summary_base (bool): use summary as numerator (default = True)
Returns:
- decimal DENSITY score within [0, ...]
"""
numerator = sum(o.length ** 2 for o in self.overlaps())
if summary_base: denominator = len(self.summary)
else: denominator = len(self.reference)
if denominator == 0: return 0
else: return numerator / denominator | [
"def",
"density",
"(",
"self",
",",
"summary_base",
"=",
"True",
")",
":",
"numerator",
"=",
"sum",
"(",
"o",
".",
"length",
"**",
"2",
"for",
"o",
"in",
"self",
".",
"overlaps",
"(",
")",
")",
"if",
"summary_base",
":",
"denominator",
"=",
"len",
"(",
"self",
".",
"summary",
")",
"else",
":",
"denominator",
"=",
"len",
"(",
"self",
".",
"reference",
")",
"if",
"denominator",
"==",
"0",
":",
"return",
"0",
"else",
":",
"return",
"numerator",
"/",
"denominator"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L168-L190 |
|||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments.compression | (self, text_to_summary = True) | Return compression ratio between summary and text.
Arguments:
- text_to_summary (bool): compute text/summary ratio (default = True)
Returns:
- decimal compression score within [0, ...] | [] | def compression(self, text_to_summary = True):
"""
Return compression ratio between summary and text.
Arguments:
- text_to_summary (bool): compute text/summary ratio (default = True)
Returns:
- decimal compression score within [0, ...]
"""
ratio = [len(self.text), len(self.summary)]
try:
if text_to_summary: return ratio[0] / ratio[1]
else: return ratio[1] / ratio[0]
except ZeroDivisionError:
return 0 | [
"def",
"compression",
"(",
"self",
",",
"text_to_summary",
"=",
"True",
")",
":",
"ratio",
"=",
"[",
"len",
"(",
"self",
".",
"text",
")",
",",
"len",
"(",
"self",
".",
"summary",
")",
"]",
"try",
":",
"if",
"text_to_summary",
":",
"return",
"ratio",
"[",
"0",
"]",
"/",
"ratio",
"[",
"1",
"]",
"else",
":",
"return",
"ratio",
"[",
"1",
"]",
"/",
"ratio",
"[",
"0",
"]",
"except",
"ZeroDivisionError",
":",
"return",
"0"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L193-L218 |
|||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments._match | (self, a, b) | Raw procedure for matching summary in text, described in paper. | [] | def _match(self, a, b):
"""
Raw procedure for matching summary in text, described in paper.
"""
self._matches = []
a_start = b_start = 0
while a_start < len(a):
best_match = None
best_match_length = 0
while b_start < len(b):
if a[a_start] == b[b_start]:
a_end = a_start
b_end = b_start
while a_end < len(a) and b_end < len(b) \
and b[b_end] == a[a_end]:
b_end += 1
a_end += 1
length = a_end - a_start
if length > best_match_length:
best_match = Fragments.Match(a_start, b_start, length)
best_match_length = length
b_start = b_end
else:
b_start += 1
b_start = 0
if best_match:
if best_match_length > 0:
self._matches.append(best_match)
a_start += best_match_length
else:
a_start += 1 | [
"def",
"_match",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"self",
".",
"_matches",
"=",
"[",
"]",
"a_start",
"=",
"b_start",
"=",
"0",
"while",
"a_start",
"<",
"len",
"(",
"a",
")",
":",
"best_match",
"=",
"None",
"best_match_length",
"=",
"0",
"while",
"b_start",
"<",
"len",
"(",
"b",
")",
":",
"if",
"a",
"[",
"a_start",
"]",
"==",
"b",
"[",
"b_start",
"]",
":",
"a_end",
"=",
"a_start",
"b_end",
"=",
"b_start",
"while",
"a_end",
"<",
"len",
"(",
"a",
")",
"and",
"b_end",
"<",
"len",
"(",
"b",
")",
"and",
"b",
"[",
"b_end",
"]",
"==",
"a",
"[",
"a_end",
"]",
":",
"b_end",
"+=",
"1",
"a_end",
"+=",
"1",
"length",
"=",
"a_end",
"-",
"a_start",
"if",
"length",
">",
"best_match_length",
":",
"best_match",
"=",
"Fragments",
".",
"Match",
"(",
"a_start",
",",
"b_start",
",",
"length",
")",
"best_match_length",
"=",
"length",
"b_start",
"=",
"b_end",
"else",
":",
"b_start",
"+=",
"1",
"b_start",
"=",
"0",
"if",
"best_match",
":",
"if",
"best_match_length",
">",
"0",
":",
"self",
".",
"_matches",
".",
"append",
"(",
"best_match",
")",
"a_start",
"+=",
"best_match_length",
"else",
":",
"a_start",
"+=",
"1"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L221-L275 |
|||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments._htmltokens | (self, tokens) | return [
[
_html.escape(t.text).replace("\n", "<br/>"),
_html.escape(t.whitespace_).replace("\n", "<br/>")
]
for t in tokens
] | Carefully process tokens to handle whitespace and HTML characters. | [] | def _htmltokens(self, tokens):
"""
Carefully process tokens to handle whitespace and HTML characters.
"""
return [
[
_html.escape(t.text).replace("\n", "<br/>"),
_html.escape(t.whitespace_).replace("\n", "<br/>")
]
for t in tokens
] | [
"def",
"_htmltokens",
"(",
"self",
",",
"tokens",
")",
":",
"return",
"[",
"[",
"_html",
".",
"escape",
"(",
"t",
".",
"text",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"<br/>\"",
")",
",",
"_html",
".",
"escape",
"(",
"t",
".",
"whitespace_",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"<br/>\"",
")",
"]",
"for",
"t",
"in",
"tokens",
"]"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L278-L293 |
||
Alex-Fabbri/Multi-News | f6476d1f114662eb93db32e9b704b7c4fe047217 | data/scripts/fragments.py | python | Fragments.annotate | (self, min_length = 0, text_truncation = None, novel_italics = False) | return summary, text | Used to annotate fragments for website visualization.
Arguments:
- min_length (int): minimum length overlap to count (default = 0)
- text_truncation (int): tuncated text length (default = None)
- novel_italics (bool): italicize novel words (default = True)
Returns:
- a tuple of strings: (summary HTML, text HTML) | [] | def annotate(self, min_length = 0, text_truncation = None, novel_italics = False):
"""
Used to annotate fragments for website visualization.
Arguments:
- min_length (int): minimum length overlap to count (default = 0)
- text_truncation (int): tuncated text length (default = None)
- novel_italics (bool): italicize novel words (default = True)
Returns:
- a tuple of strings: (summary HTML, text HTML)
"""
start = """
<u
style="color: {color}; border-color: {color};"
data-ref="{ref}" title="Length: {length}"
>
""".strip()
end = """
</u>
""".strip()
# Here we tokenize carefully to preserve sane-looking whitespace.
# (This part does require text to use a SpaCy tokenization.)
summary = self._htmltokens(self.summary)
text = self._htmltokens(self.text)
# Compute novel word set, if requested.
if novel_italics:
novel = set(self._norm_summary) - set(self._norm_text)
for word_whitespace in summary:
if word_whitespace[0].lower() in novel:
word_whitespace[0] = "<em>" + word_whitespace[0] + "</em>"
# Truncate text, if requested.
# Must be careful later on with this.
if text_truncation is not None:
text = text[:text_truncation]
# March through overlaps, replacing tokens with HTML-tagged strings.
colors = self._itercolors()
for overlap in self.overlaps():
# Skip overlaps that are too short.
if overlap.length < min_length:
continue
# Reference ID for JavaScript highlighting.
# This is random, but shared between corresponding fragments.
ref = _random.randint(0, 1e10)
color = next(colors)
# Summary starting tag.
summary[overlap.summary][0] = start.format(
color = color,
ref = ref,
length = overlap.length,
) + summary[overlap.summary][0]
# Text starting tag.
text[overlap.text][0] = start.format(
color = color,
ref = ref,
length = overlap.length,
) + text[overlap.text][0]
# Summary ending tag.
summary[overlap.summary + overlap.length - 1][0] += end
# Text ending tag.
text[overlap.text + overlap.length - 1][0] += end
# Carefully join tokens and whitespace to reconstruct the string.
summary = " ".join("".join("".join(tw) for tw in summary).split())
text = " ".join("".join("".join(tw) for tw in text).split())
# Return the tuple.
return summary, text | [
"def",
"annotate",
"(",
"self",
",",
"min_length",
"=",
"0",
",",
"text_truncation",
"=",
"None",
",",
"novel_italics",
"=",
"False",
")",
":",
"start",
"=",
"\"\"\"\n <u\n style=\"color: {color}; border-color: {color};\"\n data-ref=\"{ref}\" title=\"Length: {length}\"\n >\n \"\"\"",
".",
"strip",
"(",
")",
"end",
"=",
"\"\"\"\n </u>\n \"\"\"",
".",
"strip",
"(",
")",
"# Here we tokenize carefully to preserve sane-looking whitespace.",
"# (This part does require text to use a SpaCy tokenization.)",
"summary",
"=",
"self",
".",
"_htmltokens",
"(",
"self",
".",
"summary",
")",
"text",
"=",
"self",
".",
"_htmltokens",
"(",
"self",
".",
"text",
")",
"# Compute novel word set, if requested.",
"if",
"novel_italics",
":",
"novel",
"=",
"set",
"(",
"self",
".",
"_norm_summary",
")",
"-",
"set",
"(",
"self",
".",
"_norm_text",
")",
"for",
"word_whitespace",
"in",
"summary",
":",
"if",
"word_whitespace",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"in",
"novel",
":",
"word_whitespace",
"[",
"0",
"]",
"=",
"\"<em>\"",
"+",
"word_whitespace",
"[",
"0",
"]",
"+",
"\"</em>\"",
"# Truncate text, if requested.",
"# Must be careful later on with this.",
"if",
"text_truncation",
"is",
"not",
"None",
":",
"text",
"=",
"text",
"[",
":",
"text_truncation",
"]",
"# March through overlaps, replacing tokens with HTML-tagged strings.",
"colors",
"=",
"self",
".",
"_itercolors",
"(",
")",
"for",
"overlap",
"in",
"self",
".",
"overlaps",
"(",
")",
":",
"# Skip overlaps that are too short.",
"if",
"overlap",
".",
"length",
"<",
"min_length",
":",
"continue",
"# Reference ID for JavaScript highlighting.",
"# This is random, but shared between corresponding fragments.",
"ref",
"=",
"_random",
".",
"randint",
"(",
"0",
",",
"1e10",
")",
"color",
"=",
"next",
"(",
"colors",
")",
"# Summary starting tag.",
"summary",
"[",
"overlap",
".",
"summary",
"]",
"[",
"0",
"]",
"=",
"start",
".",
"format",
"(",
"color",
"=",
"color",
",",
"ref",
"=",
"ref",
",",
"length",
"=",
"overlap",
".",
"length",
",",
")",
"+",
"summary",
"[",
"overlap",
".",
"summary",
"]",
"[",
"0",
"]",
"# Text starting tag.",
"text",
"[",
"overlap",
".",
"text",
"]",
"[",
"0",
"]",
"=",
"start",
".",
"format",
"(",
"color",
"=",
"color",
",",
"ref",
"=",
"ref",
",",
"length",
"=",
"overlap",
".",
"length",
",",
")",
"+",
"text",
"[",
"overlap",
".",
"text",
"]",
"[",
"0",
"]",
"# Summary ending tag.",
"summary",
"[",
"overlap",
".",
"summary",
"+",
"overlap",
".",
"length",
"-",
"1",
"]",
"[",
"0",
"]",
"+=",
"end",
"# Text ending tag.",
"text",
"[",
"overlap",
".",
"text",
"+",
"overlap",
".",
"length",
"-",
"1",
"]",
"[",
"0",
"]",
"+=",
"end",
"# Carefully join tokens and whitespace to reconstruct the string.",
"summary",
"=",
"\" \"",
".",
"join",
"(",
"\"\"",
".",
"join",
"(",
"\"\"",
".",
"join",
"(",
"tw",
")",
"for",
"tw",
"in",
"summary",
")",
".",
"split",
"(",
")",
")",
"text",
"=",
"\" \"",
".",
"join",
"(",
"\"\"",
".",
"join",
"(",
"\"\"",
".",
"join",
"(",
"tw",
")",
"for",
"tw",
"in",
"text",
")",
".",
"split",
"(",
")",
")",
"# Return the tuple.",
"return",
"summary",
",",
"text"
] | https://github.com/Alex-Fabbri/Multi-News/blob/f6476d1f114662eb93db32e9b704b7c4fe047217/data/scripts/fragments.py#L296-L396 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/proxy.py | python | GetIPPOOLS | (num) | return IPPOOL | #自己获取的ip
IPPOOLS1=urllib.request.urlopen("http://127.0.0.1:8000/?types=0&count=20&country=%E5%9B%BD%E5%86%85").read().decode("utf-8",'ignore')
IPPOOLS2=re.findall('\"(\d+\.\d+\.\d+\.\d+\"\,\s*\d+)',IPPOOLS1)
IPPOOL=[i.replace('", ',':') for i in IPPOOLS2] | #自己获取的ip
IPPOOLS1=urllib.request.urlopen("http://127.0.0.1:8000/?types=0&count=20&country=%E5%9B%BD%E5%86%85").read().decode("utf-8",'ignore')
IPPOOLS2=re.findall('\"(\d+\.\d+\.\d+\.\d+\"\,\s*\d+)',IPPOOLS1)
IPPOOL=[i.replace('", ',':') for i in IPPOOLS2] | [
"#自己获取的ip",
"IPPOOLS1",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"http",
":",
"//",
"127",
".",
"0",
".",
"0",
".",
"1",
":",
"8000",
"/",
"?types",
"=",
"0&count",
"=",
"20&country",
"=",
"%E5%9B%BD%E5%86%85",
")",
".",
"read",
"()",
".",
"decode",
"(",
"utf",
"-",
"8",
"ignore",
")",
"IPPOOLS2",
"=",
"re",
".",
"findall",
"(",
"\\",
"(",
"\\",
"d",
"+",
"\\",
".",
"\\",
"d",
"+",
"\\",
".",
"\\",
"d",
"+",
"\\",
".",
"\\",
"d",
"+",
"\\",
"\\",
"\\",
"s",
"*",
"\\",
"d",
"+",
")",
"IPPOOLS1",
")",
"IPPOOL",
"=",
"[",
"i",
".",
"replace",
"(",
":",
")",
"for",
"i",
"in",
"IPPOOLS2",
"]"
] | def GetIPPOOLS(num):
#大象代理买的ip,5元20000个,每十个差不多有一个能用
IPPOOL=urllib.request.urlopen("http://tpv.daxiangdaili.com/ip/?tid=559480480576119&num="+str(num)+"&operator=1&filter=on&protocol=http&category=2&delay=1").read().decode("utf-8","ignore").split('\r\n')
'''
#自己获取的ip
IPPOOLS1=urllib.request.urlopen("http://127.0.0.1:8000/?types=0&count=20&country=%E5%9B%BD%E5%86%85").read().decode("utf-8",'ignore')
IPPOOLS2=re.findall('\"(\d+\.\d+\.\d+\.\d+\"\,\s*\d+)',IPPOOLS1)
IPPOOL=[i.replace('", ',':') for i in IPPOOLS2]
'''
return IPPOOL | [
"def",
"GetIPPOOLS",
"(",
"num",
")",
":",
"#大象代理买的ip,5元20000个,每十个差不多有一个能用",
"IPPOOL",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"\"http://tpv.daxiangdaili.com/ip/?tid=559480480576119&num=\"",
"+",
"str",
"(",
"num",
")",
"+",
"\"&operator=1&filter=on&protocol=http&category=2&delay=1\"",
")",
".",
"read",
"(",
")",
".",
"decode",
"(",
"\"utf-8\"",
",",
"\"ignore\"",
")",
".",
"split",
"(",
"'\\r\\n'",
")",
"return",
"IPPOOL"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/proxy.py#L17-L26 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/proxy.py | python | initIPPOOLS | (rconn) | 把有效的IP存入 REDIS数据库 | 把有效的IP存入 REDIS数据库 | [
"把有效的IP存入",
"REDIS数据库"
] | def initIPPOOLS(rconn):
"""把有效的IP存入 REDIS数据库"""
ipNum=len(rconn.keys('IP*'))
if ipNum<IPPOOLNUM:
IPPOOLS=GetIPPOOLS(IPPOOLNUM)
for ipall in IPPOOLS:
try:
ip=ipall.split(':')[0]
port=ipall.split(':')[1]
telnetlib.Telnet(ip,port=port,timeout=2) #检验代理ip是否有效
except:
logger.warning("The ip is not available !( IP:%s )" % ipall)
else:
logger.warning("Get ip Success!( IP:%s )" % ipall)
rconn.set("IP:%s:10"%(ipall),ipall) #10 is status
else:
logger.warning("The number of the IP is %s!" % str(ipNum)) | [
"def",
"initIPPOOLS",
"(",
"rconn",
")",
":",
"ipNum",
"=",
"len",
"(",
"rconn",
".",
"keys",
"(",
"'IP*'",
")",
")",
"if",
"ipNum",
"<",
"IPPOOLNUM",
":",
"IPPOOLS",
"=",
"GetIPPOOLS",
"(",
"IPPOOLNUM",
")",
"for",
"ipall",
"in",
"IPPOOLS",
":",
"try",
":",
"ip",
"=",
"ipall",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"port",
"=",
"ipall",
".",
"split",
"(",
"':'",
")",
"[",
"1",
"]",
"telnetlib",
".",
"Telnet",
"(",
"ip",
",",
"port",
"=",
"port",
",",
"timeout",
"=",
"2",
")",
"#检验代理ip是否有效",
"except",
":",
"logger",
".",
"warning",
"(",
"\"The ip is not available !( IP:%s )\"",
"%",
"ipall",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Get ip Success!( IP:%s )\"",
"%",
"ipall",
")",
"rconn",
".",
"set",
"(",
"\"IP:%s:10\"",
"%",
"(",
"ipall",
")",
",",
"ipall",
")",
"#10 is status",
"else",
":",
"logger",
".",
"warning",
"(",
"\"The number of the IP is %s!\"",
"%",
"str",
"(",
"ipNum",
")",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/proxy.py#L28-L45 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/proxy.py | python | updateIPPOOLS | (rconn,ip,status,flag=0) | update status | update status | [
"update",
"status"
] | def updateIPPOOLS(rconn,ip,status,flag=0): # 0代表对status减一,-1代表减2,1代表加1
if int(status) < 1:
removeIPPOOLS(rconn,ip,status)
return
'''update status'''
if flag == 1: #+status
if int(status) < 10:
rconn.delete('IP:'+ ip + ':' + status)
status = int(status) + 1
rconn.set("IP:%s:%s"%(ip,str(status)),ip)
elif flag == -1:
rconn.delete('IP:'+ ip + ':' + status)
status = int(status) - 2
rconn.set("IP:%s:%s"%(ip,str(status)),ip)
else:
rconn.delete('IP:'+ ip + ':' + status)
status = int(status) - 1
rconn.set("IP:%s:%s"%(ip,str(status)),ip) | [
"def",
"updateIPPOOLS",
"(",
"rconn",
",",
"ip",
",",
"status",
",",
"flag",
"=",
"0",
")",
":",
"# 0代表对status减一,-1代表减2,1代表加1",
"if",
"int",
"(",
"status",
")",
"<",
"1",
":",
"removeIPPOOLS",
"(",
"rconn",
",",
"ip",
",",
"status",
")",
"return",
"if",
"flag",
"==",
"1",
":",
"#+status",
"if",
"int",
"(",
"status",
")",
"<",
"10",
":",
"rconn",
".",
"delete",
"(",
"'IP:'",
"+",
"ip",
"+",
"':'",
"+",
"status",
")",
"status",
"=",
"int",
"(",
"status",
")",
"+",
"1",
"rconn",
".",
"set",
"(",
"\"IP:%s:%s\"",
"%",
"(",
"ip",
",",
"str",
"(",
"status",
")",
")",
",",
"ip",
")",
"elif",
"flag",
"==",
"-",
"1",
":",
"rconn",
".",
"delete",
"(",
"'IP:'",
"+",
"ip",
"+",
"':'",
"+",
"status",
")",
"status",
"=",
"int",
"(",
"status",
")",
"-",
"2",
"rconn",
".",
"set",
"(",
"\"IP:%s:%s\"",
"%",
"(",
"ip",
",",
"str",
"(",
"status",
")",
")",
",",
"ip",
")",
"else",
":",
"rconn",
".",
"delete",
"(",
"'IP:'",
"+",
"ip",
"+",
"':'",
"+",
"status",
")",
"status",
"=",
"int",
"(",
"status",
")",
"-",
"1",
"rconn",
".",
"set",
"(",
"\"IP:%s:%s\"",
"%",
"(",
"ip",
",",
"str",
"(",
"status",
")",
")",
",",
"ip",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/proxy.py#L47-L64 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/cookie.py | python | initCookie | (rconn, spiderName) | 获取所有账号的Cookies,存入Redis。如果Redis已有该账号的Cookie,则不再获取。 | 获取所有账号的Cookies,存入Redis。如果Redis已有该账号的Cookie,则不再获取。 | [
"获取所有账号的Cookies,存入Redis。如果Redis已有该账号的Cookie,则不再获取。"
] | def initCookie(rconn, spiderName):
""" 获取所有账号的Cookies,存入Redis。如果Redis已有该账号的Cookie,则不再获取。 """
for zhihu in myZhiHu:
if rconn.get("%s:Cookies:%s--%s" % (spiderName, zhihu[0], zhihu[1])) is None: # 'zhihuspider:Cookies:账号--密码',为None即不存在。
cookie = getCookie(zhihu[0], zhihu[1],zhihu[2])
if len(cookie) > 0:
rconn.set("%s:Cookies:%s--%s" % (spiderName, zhihu[0], zhihu[1]), cookie)
cookieNum = str(rconn.keys()).count("zhihuspider:Cookies")
logger.warning("The num of the cookies is %s" % cookieNum)
if cookieNum == 0:
logger.warning('Stopping...')
os.system("pause") | [
"def",
"initCookie",
"(",
"rconn",
",",
"spiderName",
")",
":",
"for",
"zhihu",
"in",
"myZhiHu",
":",
"if",
"rconn",
".",
"get",
"(",
"\"%s:Cookies:%s--%s\"",
"%",
"(",
"spiderName",
",",
"zhihu",
"[",
"0",
"]",
",",
"zhihu",
"[",
"1",
"]",
")",
")",
"is",
"None",
":",
"# 'zhihuspider:Cookies:账号--密码',为None即不存在。",
"cookie",
"=",
"getCookie",
"(",
"zhihu",
"[",
"0",
"]",
",",
"zhihu",
"[",
"1",
"]",
",",
"zhihu",
"[",
"2",
"]",
")",
"if",
"len",
"(",
"cookie",
")",
">",
"0",
":",
"rconn",
".",
"set",
"(",
"\"%s:Cookies:%s--%s\"",
"%",
"(",
"spiderName",
",",
"zhihu",
"[",
"0",
"]",
",",
"zhihu",
"[",
"1",
"]",
")",
",",
"cookie",
")",
"cookieNum",
"=",
"str",
"(",
"rconn",
".",
"keys",
"(",
")",
")",
".",
"count",
"(",
"\"zhihuspider:Cookies\"",
")",
"logger",
".",
"warning",
"(",
"\"The num of the cookies is %s\"",
"%",
"cookieNum",
")",
"if",
"cookieNum",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"'Stopping...'",
")",
"os",
".",
"system",
"(",
"\"pause\"",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/cookie.py#L145-L156 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/cookie.py | python | updateCookie | (accountText, rconn, spiderName, cookie) | 更新一个账号的Cookie | 更新一个账号的Cookie | [
"更新一个账号的Cookie"
] | def updateCookie(accountText, rconn, spiderName, cookie):
""" 更新一个账号的Cookie """
account = accountText.split("--")[0]
#pdb.set_trace()
new_cookie = UpdateCookie(account, cookie)
if len(new_cookie) > 0:
logger.warning("The cookie of %s has been updated successfully!" % account)
rconn.set("%s:Cookies:%s" % (spiderName, accountText), new_cookie)
else:
logger.warning("The cookie of %s updated failed! Remove it!" % accountText)
removeCookie(accountText, rconn, spiderName) | [
"def",
"updateCookie",
"(",
"accountText",
",",
"rconn",
",",
"spiderName",
",",
"cookie",
")",
":",
"account",
"=",
"accountText",
".",
"split",
"(",
"\"--\"",
")",
"[",
"0",
"]",
"#pdb.set_trace()",
"new_cookie",
"=",
"UpdateCookie",
"(",
"account",
",",
"cookie",
")",
"if",
"len",
"(",
"new_cookie",
")",
">",
"0",
":",
"logger",
".",
"warning",
"(",
"\"The cookie of %s has been updated successfully!\"",
"%",
"account",
")",
"rconn",
".",
"set",
"(",
"\"%s:Cookies:%s\"",
"%",
"(",
"spiderName",
",",
"accountText",
")",
",",
"new_cookie",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"The cookie of %s updated failed! Remove it!\"",
"%",
"accountText",
")",
"removeCookie",
"(",
"accountText",
",",
"rconn",
",",
"spiderName",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/cookie.py#L158-L168 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/cookie.py | python | removeCookie | (accountText, rconn, spiderName) | 删除某个账号的Cookie | 删除某个账号的Cookie | [
"删除某个账号的Cookie"
] | def removeCookie(accountText, rconn, spiderName):
""" 删除某个账号的Cookie """
rconn.delete("%s:Cookies:%s" % (spiderName, accountText))
cookieNum = str(rconn.keys()).count("zhihuspider:Cookies")
logger.warning("The num of the cookies left is %s" % cookieNum)
if cookieNum == 0:
logger.warning("Stopping...")
os.system("pause") | [
"def",
"removeCookie",
"(",
"accountText",
",",
"rconn",
",",
"spiderName",
")",
":",
"rconn",
".",
"delete",
"(",
"\"%s:Cookies:%s\"",
"%",
"(",
"spiderName",
",",
"accountText",
")",
")",
"cookieNum",
"=",
"str",
"(",
"rconn",
".",
"keys",
"(",
")",
")",
".",
"count",
"(",
"\"zhihuspider:Cookies\"",
")",
"logger",
".",
"warning",
"(",
"\"The num of the cookies left is %s\"",
"%",
"cookieNum",
")",
"if",
"cookieNum",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Stopping...\"",
")",
"os",
".",
"system",
"(",
"\"pause\"",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/cookie.py#L170-L177 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/scheduler.py | python | Scheduler.__init__ | (self, server,
persist=False,
flush_on_start=False,
queue_key=defaults.SCHEDULER_QUEUE_KEY,
queue_cls=defaults.SCHEDULER_QUEUE_CLASS,
dupefilter_key=defaults.SCHEDULER_DUPEFILTER_KEY,
dupefilter_cls=defaults.SCHEDULER_DUPEFILTER_CLASS,
idle_before_close=0,
serializer=None) | Initialize scheduler.
Parameters
----------
server : Redis
The redis server instance.
persist : bool
Whether to flush requests when closing. Default is False.
flush_on_start : bool
Whether to flush requests on start. Default is False.
queue_key : str
Requests queue key.
queue_cls : str
Importable path to the queue class.
dupefilter_key : str
Duplicates filter key.
dupefilter_cls : str
Importable path to the dupefilter class.
idle_before_close : int
Timeout before giving up. | Initialize scheduler. | [
"Initialize",
"scheduler",
"."
] | def __init__(self, server,
persist=False,
flush_on_start=False,
queue_key=defaults.SCHEDULER_QUEUE_KEY,
queue_cls=defaults.SCHEDULER_QUEUE_CLASS,
dupefilter_key=defaults.SCHEDULER_DUPEFILTER_KEY,
dupefilter_cls=defaults.SCHEDULER_DUPEFILTER_CLASS,
idle_before_close=0,
serializer=None):
"""Initialize scheduler.
Parameters
----------
server : Redis
The redis server instance.
persist : bool
Whether to flush requests when closing. Default is False.
flush_on_start : bool
Whether to flush requests on start. Default is False.
queue_key : str
Requests queue key.
queue_cls : str
Importable path to the queue class.
dupefilter_key : str
Duplicates filter key.
dupefilter_cls : str
Importable path to the dupefilter class.
idle_before_close : int
Timeout before giving up.
"""
if idle_before_close < 0:
raise TypeError("idle_before_close cannot be negative")
self.server = server
self.persist = persist
self.flush_on_start = flush_on_start
self.queue_key = queue_key
self.queue_cls = queue_cls
self.dupefilter_cls = dupefilter_cls
self.dupefilter_key = dupefilter_key
self.idle_before_close = idle_before_close
self.serializer = serializer
self.stats = None | [
"def",
"__init__",
"(",
"self",
",",
"server",
",",
"persist",
"=",
"False",
",",
"flush_on_start",
"=",
"False",
",",
"queue_key",
"=",
"defaults",
".",
"SCHEDULER_QUEUE_KEY",
",",
"queue_cls",
"=",
"defaults",
".",
"SCHEDULER_QUEUE_CLASS",
",",
"dupefilter_key",
"=",
"defaults",
".",
"SCHEDULER_DUPEFILTER_KEY",
",",
"dupefilter_cls",
"=",
"defaults",
".",
"SCHEDULER_DUPEFILTER_CLASS",
",",
"idle_before_close",
"=",
"0",
",",
"serializer",
"=",
"None",
")",
":",
"if",
"idle_before_close",
"<",
"0",
":",
"raise",
"TypeError",
"(",
"\"idle_before_close cannot be negative\"",
")",
"self",
".",
"server",
"=",
"server",
"self",
".",
"persist",
"=",
"persist",
"self",
".",
"flush_on_start",
"=",
"flush_on_start",
"self",
".",
"queue_key",
"=",
"queue_key",
"self",
".",
"queue_cls",
"=",
"queue_cls",
"self",
".",
"dupefilter_cls",
"=",
"dupefilter_cls",
"self",
".",
"dupefilter_key",
"=",
"dupefilter_key",
"self",
".",
"idle_before_close",
"=",
"idle_before_close",
"self",
".",
"serializer",
"=",
"serializer",
"self",
".",
"stats",
"=",
"None"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/scheduler.py#L34-L77 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/pipelines.py | python | RedisPipeline.__init__ | (self, server,
key=defaults.PIPELINE_KEY,
serialize_func=default_serialize) | Initialize pipeline.
Parameters
----------
server : StrictRedis
Redis client instance.
key : str
Redis key where to store items.
serialize_func : callable
Items serializer function. | Initialize pipeline. | [
"Initialize",
"pipeline",
"."
] | def __init__(self, server,
key=defaults.PIPELINE_KEY,
serialize_func=default_serialize):
"""Initialize pipeline.
Parameters
----------
server : StrictRedis
Redis client instance.
key : str
Redis key where to store items.
serialize_func : callable
Items serializer function.
"""
self.server = server
self.key = key
self.serialize = serialize_func | [
"def",
"__init__",
"(",
"self",
",",
"server",
",",
"key",
"=",
"defaults",
".",
"PIPELINE_KEY",
",",
"serialize_func",
"=",
"default_serialize",
")",
":",
"self",
".",
"server",
"=",
"server",
"self",
".",
"key",
"=",
"key",
"self",
".",
"serialize",
"=",
"serialize_func"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/pipelines.py#L23-L40 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/pipelines.py | python | RedisPipeline.item_key | (self, item, spider) | return self.key % {'spider': spider.name} | Returns redis key based on given spider.
Override this function to use a different key depending on the item
and/or spider. | Returns redis key based on given spider. | [
"Returns",
"redis",
"key",
"based",
"on",
"given",
"spider",
"."
] | def item_key(self, item, spider):
"""Returns redis key based on given spider.
Override this function to use a different key depending on the item
and/or spider.
"""
return self.key % {'spider': spider.name} | [
"def",
"item_key",
"(",
"self",
",",
"item",
",",
"spider",
")",
":",
"return",
"self",
".",
"key",
"%",
"{",
"'spider'",
":",
"spider",
".",
"name",
"}"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/pipelines.py#L69-L76 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | Base.__init__ | (self, server, spider, key, serializer=None) | Initialize per-spider redis queue.
Parameters
----------
server : StrictRedis
Redis client instance.
spider : Spider
Scrapy spider instance.
key: str
Redis key where to put and get messages.
serializer : object
Serializer object with ``loads`` and ``dumps`` methods. | Initialize per-spider redis queue. | [
"Initialize",
"per",
"-",
"spider",
"redis",
"queue",
"."
] | def __init__(self, server, spider, key, serializer=None):
"""Initialize per-spider redis queue.
Parameters
----------
server : StrictRedis
Redis client instance.
spider : Spider
Scrapy spider instance.
key: str
Redis key where to put and get messages.
serializer : object
Serializer object with ``loads`` and ``dumps`` methods.
"""
if serializer is None:
# Backward compatibility.
# TODO: deprecate pickle.
serializer = picklecompat
if not hasattr(serializer, 'loads'):
raise TypeError("serializer does not implement 'loads' function: %r"
% serializer)
if not hasattr(serializer, 'dumps'):
raise TypeError("serializer '%s' does not implement 'dumps' function: %r"
% serializer)
self.server = server
self.spider = spider
self.key = key % {'spider': spider.name}
self.serializer = serializer | [
"def",
"__init__",
"(",
"self",
",",
"server",
",",
"spider",
",",
"key",
",",
"serializer",
"=",
"None",
")",
":",
"if",
"serializer",
"is",
"None",
":",
"# Backward compatibility.",
"# TODO: deprecate pickle.",
"serializer",
"=",
"picklecompat",
"if",
"not",
"hasattr",
"(",
"serializer",
",",
"'loads'",
")",
":",
"raise",
"TypeError",
"(",
"\"serializer does not implement 'loads' function: %r\"",
"%",
"serializer",
")",
"if",
"not",
"hasattr",
"(",
"serializer",
",",
"'dumps'",
")",
":",
"raise",
"TypeError",
"(",
"\"serializer '%s' does not implement 'dumps' function: %r\"",
"%",
"serializer",
")",
"self",
".",
"server",
"=",
"server",
"self",
".",
"spider",
"=",
"spider",
"self",
".",
"key",
"=",
"key",
"%",
"{",
"'spider'",
":",
"spider",
".",
"name",
"}",
"self",
".",
"serializer",
"=",
"serializer"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L9-L38 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | Base._encode_request | (self, request) | return self.serializer.dumps(obj) | Encode a request object | Encode a request object | [
"Encode",
"a",
"request",
"object"
] | def _encode_request(self, request):
"""Encode a request object"""
obj = request_to_dict(request, self.spider)
return self.serializer.dumps(obj) | [
"def",
"_encode_request",
"(",
"self",
",",
"request",
")",
":",
"obj",
"=",
"request_to_dict",
"(",
"request",
",",
"self",
".",
"spider",
")",
"return",
"self",
".",
"serializer",
".",
"dumps",
"(",
"obj",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L40-L43 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | Base._decode_request | (self, encoded_request) | return request_from_dict(obj, self.spider) | Decode an request previously encoded | Decode an request previously encoded | [
"Decode",
"an",
"request",
"previously",
"encoded"
] | def _decode_request(self, encoded_request):
"""Decode an request previously encoded"""
obj = self.serializer.loads(encoded_request)
return request_from_dict(obj, self.spider) | [
"def",
"_decode_request",
"(",
"self",
",",
"encoded_request",
")",
":",
"obj",
"=",
"self",
".",
"serializer",
".",
"loads",
"(",
"encoded_request",
")",
"return",
"request_from_dict",
"(",
"obj",
",",
"self",
".",
"spider",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L45-L48 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | Base.__len__ | (self) | Return the length of the queue | Return the length of the queue | [
"Return",
"the",
"length",
"of",
"the",
"queue"
] | def __len__(self):
"""Return the length of the queue"""
raise NotImplementedError | [
"def",
"__len__",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L50-L52 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | Base.push | (self, request) | Push a request | Push a request | [
"Push",
"a",
"request"
] | def push(self, request):
"""Push a request"""
raise NotImplementedError | [
"def",
"push",
"(",
"self",
",",
"request",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L54-L56 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | Base.pop | (self, timeout=0) | Pop a request | Pop a request | [
"Pop",
"a",
"request"
] | def pop(self, timeout=0):
"""Pop a request"""
raise NotImplementedError | [
"def",
"pop",
"(",
"self",
",",
"timeout",
"=",
"0",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L58-L60 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | Base.clear | (self) | Clear queue/stack | Clear queue/stack | [
"Clear",
"queue",
"/",
"stack"
] | def clear(self):
"""Clear queue/stack"""
self.server.delete(self.key) | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"server",
".",
"delete",
"(",
"self",
".",
"key",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L62-L64 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | FifoQueue.__len__ | (self) | return self.server.llen(self.key) | Return the length of the queue | Return the length of the queue | [
"Return",
"the",
"length",
"of",
"the",
"queue"
] | def __len__(self):
"""Return the length of the queue"""
return self.server.llen(self.key) | [
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"self",
".",
"server",
".",
"llen",
"(",
"self",
".",
"key",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L70-L72 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | FifoQueue.push | (self, request) | Push a request | Push a request | [
"Push",
"a",
"request"
] | def push(self, request):
"""Push a request"""
self.server.lpush(self.key, self._encode_request(request)) | [
"def",
"push",
"(",
"self",
",",
"request",
")",
":",
"self",
".",
"server",
".",
"lpush",
"(",
"self",
".",
"key",
",",
"self",
".",
"_encode_request",
"(",
"request",
")",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L74-L76 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | FifoQueue.pop | (self, timeout=0) | Pop a request | Pop a request | [
"Pop",
"a",
"request"
] | def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = self.server.brpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = self.server.rpop(self.key)
if data:
return self._decode_request(data) | [
"def",
"pop",
"(",
"self",
",",
"timeout",
"=",
"0",
")",
":",
"if",
"timeout",
">",
"0",
":",
"data",
"=",
"self",
".",
"server",
".",
"brpop",
"(",
"self",
".",
"key",
",",
"timeout",
")",
"if",
"isinstance",
"(",
"data",
",",
"tuple",
")",
":",
"data",
"=",
"data",
"[",
"1",
"]",
"else",
":",
"data",
"=",
"self",
".",
"server",
".",
"rpop",
"(",
"self",
".",
"key",
")",
"if",
"data",
":",
"return",
"self",
".",
"_decode_request",
"(",
"data",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L78-L87 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | PriorityQueue.__len__ | (self) | return self.server.zcard(self.key) | Return the length of the queue | Return the length of the queue | [
"Return",
"the",
"length",
"of",
"the",
"queue"
] | def __len__(self):
"""Return the length of the queue"""
return self.server.zcard(self.key) | [
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"self",
".",
"server",
".",
"zcard",
"(",
"self",
".",
"key",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L93-L95 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | PriorityQueue.push | (self, request) | Push a request | Push a request | [
"Push",
"a",
"request"
] | def push(self, request):
"""Push a request"""
data = self._encode_request(request)
score = -request.priority
# We don't use zadd method as the order of arguments change depending on
# whether the class is Redis or StrictRedis, and the option of using
# kwargs only accepts strings, not bytes.
self.server.execute_command('ZADD', self.key, score, data) | [
"def",
"push",
"(",
"self",
",",
"request",
")",
":",
"data",
"=",
"self",
".",
"_encode_request",
"(",
"request",
")",
"score",
"=",
"-",
"request",
".",
"priority",
"# We don't use zadd method as the order of arguments change depending on",
"# whether the class is Redis or StrictRedis, and the option of using",
"# kwargs only accepts strings, not bytes.",
"self",
".",
"server",
".",
"execute_command",
"(",
"'ZADD'",
",",
"self",
".",
"key",
",",
"score",
",",
"data",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L97-L104 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | PriorityQueue.pop | (self, timeout=0) | Pop a request
timeout not support in this queue class | Pop a request
timeout not support in this queue class | [
"Pop",
"a",
"request",
"timeout",
"not",
"support",
"in",
"this",
"queue",
"class"
] | def pop(self, timeout=0):
"""
Pop a request
timeout not support in this queue class
"""
# use atomic range/remove using multi/exec
pipe = self.server.pipeline()
pipe.multi()
pipe.zrange(self.key, 0, 0).zremrangebyrank(self.key, 0, 0)
results, count = pipe.execute()
if results:
return self._decode_request(results[0]) | [
"def",
"pop",
"(",
"self",
",",
"timeout",
"=",
"0",
")",
":",
"# use atomic range/remove using multi/exec",
"pipe",
"=",
"self",
".",
"server",
".",
"pipeline",
"(",
")",
"pipe",
".",
"multi",
"(",
")",
"pipe",
".",
"zrange",
"(",
"self",
".",
"key",
",",
"0",
",",
"0",
")",
".",
"zremrangebyrank",
"(",
"self",
".",
"key",
",",
"0",
",",
"0",
")",
"results",
",",
"count",
"=",
"pipe",
".",
"execute",
"(",
")",
"if",
"results",
":",
"return",
"self",
".",
"_decode_request",
"(",
"results",
"[",
"0",
"]",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L106-L117 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | LifoQueue.__len__ | (self) | return self.server.llen(self.key) | Return the length of the stack | Return the length of the stack | [
"Return",
"the",
"length",
"of",
"the",
"stack"
] | def __len__(self):
"""Return the length of the stack"""
return self.server.llen(self.key) | [
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"self",
".",
"server",
".",
"llen",
"(",
"self",
".",
"key",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L123-L125 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | LifoQueue.push | (self, request) | Push a request | Push a request | [
"Push",
"a",
"request"
] | def push(self, request):
"""Push a request"""
self.server.lpush(self.key, self._encode_request(request)) | [
"def",
"push",
"(",
"self",
",",
"request",
")",
":",
"self",
".",
"server",
".",
"lpush",
"(",
"self",
".",
"key",
",",
"self",
".",
"_encode_request",
"(",
"request",
")",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L127-L129 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/queue.py | python | LifoQueue.pop | (self, timeout=0) | Pop a request | Pop a request | [
"Pop",
"a",
"request"
] | def pop(self, timeout=0):
"""Pop a request"""
if timeout > 0:
data = self.server.blpop(self.key, timeout)
if isinstance(data, tuple):
data = data[1]
else:
data = self.server.lpop(self.key)
if data:
return self._decode_request(data) | [
"def",
"pop",
"(",
"self",
",",
"timeout",
"=",
"0",
")",
":",
"if",
"timeout",
">",
"0",
":",
"data",
"=",
"self",
".",
"server",
".",
"blpop",
"(",
"self",
".",
"key",
",",
"timeout",
")",
"if",
"isinstance",
"(",
"data",
",",
"tuple",
")",
":",
"data",
"=",
"data",
"[",
"1",
"]",
"else",
":",
"data",
"=",
"self",
".",
"server",
".",
"lpop",
"(",
"self",
".",
"key",
")",
"if",
"data",
":",
"return",
"self",
".",
"_decode_request",
"(",
"data",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/queue.py#L131-L141 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/connection.py | python | get_redis_from_settings | (settings) | return get_redis(**params) | Returns a redis client instance from given Scrapy settings object.
This function uses ``get_client`` to instantiate the client and uses
``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You
can override them using the ``REDIS_PARAMS`` setting.
Parameters
----------
settings : Settings
A scrapy settings object. See the supported settings below.
Returns
-------
server
Redis client instance.
Other Parameters
----------------
REDIS_URL : str, optional
Server connection URL.
REDIS_HOST : str, optional
Server host.
REDIS_PORT : str, optional
Server port.
REDIS_ENCODING : str, optional
Data encoding.
REDIS_PARAMS : dict, optional
Additional client parameters. | Returns a redis client instance from given Scrapy settings object. | [
"Returns",
"a",
"redis",
"client",
"instance",
"from",
"given",
"Scrapy",
"settings",
"object",
"."
] | def get_redis_from_settings(settings):
"""Returns a redis client instance from given Scrapy settings object.
This function uses ``get_client`` to instantiate the client and uses
``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You
can override them using the ``REDIS_PARAMS`` setting.
Parameters
----------
settings : Settings
A scrapy settings object. See the supported settings below.
Returns
-------
server
Redis client instance.
Other Parameters
----------------
REDIS_URL : str, optional
Server connection URL.
REDIS_HOST : str, optional
Server host.
REDIS_PORT : str, optional
Server port.
REDIS_ENCODING : str, optional
Data encoding.
REDIS_PARAMS : dict, optional
Additional client parameters.
"""
params = defaults.REDIS_PARAMS.copy()
params.update(settings.getdict('REDIS_PARAMS'))
# XXX: Deprecate REDIS_* settings.
for source, dest in SETTINGS_PARAMS_MAP.items():
val = settings.get(source)
if val:
params[dest] = val
# Allow ``redis_cls`` to be a path to a class.
if isinstance(params.get('redis_cls'), six.string_types):
params['redis_cls'] = load_object(params['redis_cls'])
return get_redis(**params) | [
"def",
"get_redis_from_settings",
"(",
"settings",
")",
":",
"params",
"=",
"defaults",
".",
"REDIS_PARAMS",
".",
"copy",
"(",
")",
"params",
".",
"update",
"(",
"settings",
".",
"getdict",
"(",
"'REDIS_PARAMS'",
")",
")",
"# XXX: Deprecate REDIS_* settings.",
"for",
"source",
",",
"dest",
"in",
"SETTINGS_PARAMS_MAP",
".",
"items",
"(",
")",
":",
"val",
"=",
"settings",
".",
"get",
"(",
"source",
")",
"if",
"val",
":",
"params",
"[",
"dest",
"]",
"=",
"val",
"# Allow ``redis_cls`` to be a path to a class.",
"if",
"isinstance",
"(",
"params",
".",
"get",
"(",
"'redis_cls'",
")",
",",
"six",
".",
"string_types",
")",
":",
"params",
"[",
"'redis_cls'",
"]",
"=",
"load_object",
"(",
"params",
"[",
"'redis_cls'",
"]",
")",
"return",
"get_redis",
"(",
"*",
"*",
"params",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/connection.py#L17-L60 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/connection.py | python | get_redis | (**kwargs) | Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance. | Returns a redis client instance. | [
"Returns",
"a",
"redis",
"client",
"instance",
"."
] | def get_redis(**kwargs):
"""Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance.
"""
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
url = kwargs.pop('url', None)
if url:
return redis_cls.from_url(url, **kwargs)
else:
return redis_cls(**kwargs) | [
"def",
"get_redis",
"(",
"*",
"*",
"kwargs",
")",
":",
"redis_cls",
"=",
"kwargs",
".",
"pop",
"(",
"'redis_cls'",
",",
"defaults",
".",
"REDIS_CLS",
")",
"url",
"=",
"kwargs",
".",
"pop",
"(",
"'url'",
",",
"None",
")",
"if",
"url",
":",
"return",
"redis_cls",
".",
"from_url",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"redis_cls",
"(",
"*",
"*",
"kwargs",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/connection.py#L67-L90 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/spiders.py | python | RedisMixin.start_requests | (self) | return self.next_requests() | Returns a batch of start requests from redis. | Returns a batch of start requests from redis. | [
"Returns",
"a",
"batch",
"of",
"start",
"requests",
"from",
"redis",
"."
] | def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests() | [
"def",
"start_requests",
"(",
"self",
")",
":",
"return",
"self",
".",
"next_requests",
"(",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/spiders.py#L18-L20 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/spiders.py | python | RedisMixin.setup_redis | (self, crawler=None) | Setup redis connection and idle signal.
This should be called after the spider has set its crawler object. | Setup redis connection and idle signal. | [
"Setup",
"redis",
"connection",
"and",
"idle",
"signal",
"."
] | def setup_redis(self, crawler=None):
"""Setup redis connection and idle signal.
This should be called after the spider has set its crawler object.
"""
if self.server is not None:
return
if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None)
if crawler is None:
raise ValueError("crawler is required")
settings = crawler.settings
if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
)
self.redis_key = self.redis_key % {'name': self.name}
if not self.redis_key.strip():
raise ValueError("redis_key must not be empty")
if self.redis_batch_size is None:
# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint(
'REDIS_START_URLS_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),
)
try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer")
if self.redis_encoding is None:
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)
self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
self.__dict__)
self.server = connection.from_settings(crawler.settings)
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle) | [
"def",
"setup_redis",
"(",
"self",
",",
"crawler",
"=",
"None",
")",
":",
"if",
"self",
".",
"server",
"is",
"not",
"None",
":",
"return",
"if",
"crawler",
"is",
"None",
":",
"# We allow optional crawler argument to keep backwards",
"# compatibility.",
"# XXX: Raise a deprecation warning.",
"crawler",
"=",
"getattr",
"(",
"self",
",",
"'crawler'",
",",
"None",
")",
"if",
"crawler",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"crawler is required\"",
")",
"settings",
"=",
"crawler",
".",
"settings",
"if",
"self",
".",
"redis_key",
"is",
"None",
":",
"self",
".",
"redis_key",
"=",
"settings",
".",
"get",
"(",
"'REDIS_START_URLS_KEY'",
",",
"defaults",
".",
"START_URLS_KEY",
",",
")",
"self",
".",
"redis_key",
"=",
"self",
".",
"redis_key",
"%",
"{",
"'name'",
":",
"self",
".",
"name",
"}",
"if",
"not",
"self",
".",
"redis_key",
".",
"strip",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"redis_key must not be empty\"",
")",
"if",
"self",
".",
"redis_batch_size",
"is",
"None",
":",
"# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).",
"self",
".",
"redis_batch_size",
"=",
"settings",
".",
"getint",
"(",
"'REDIS_START_URLS_BATCH_SIZE'",
",",
"settings",
".",
"getint",
"(",
"'CONCURRENT_REQUESTS'",
")",
",",
")",
"try",
":",
"self",
".",
"redis_batch_size",
"=",
"int",
"(",
"self",
".",
"redis_batch_size",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"\"redis_batch_size must be an integer\"",
")",
"if",
"self",
".",
"redis_encoding",
"is",
"None",
":",
"self",
".",
"redis_encoding",
"=",
"settings",
".",
"get",
"(",
"'REDIS_ENCODING'",
",",
"defaults",
".",
"REDIS_ENCODING",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Reading start URLs from redis key '%(redis_key)s' \"",
"\"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s\"",
",",
"self",
".",
"__dict__",
")",
"self",
".",
"server",
"=",
"connection",
".",
"from_settings",
"(",
"crawler",
".",
"settings",
")",
"# The idle signal is called when the spider has no requests left,",
"# that's when we will schedule new requests from redis queue",
"crawler",
".",
"signals",
".",
"connect",
"(",
"self",
".",
"spider_idle",
",",
"signal",
"=",
"signals",
".",
"spider_idle",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/spiders.py#L22-L73 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/spiders.py | python | RedisMixin.next_requests | (self) | Returns a request to be scheduled or none. | Returns a request to be scheduled or none. | [
"Returns",
"a",
"request",
"to",
"be",
"scheduled",
"or",
"none",
"."
] | def next_requests(self):
"""Returns a request to be scheduled or none."""
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)
fetch_one = self.server.spop if use_set else self.server.lpop
# XXX: Do we need to use a timeout here?
found = 0
# TODO: Use redis pipeline execution.
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)
if not data:
# Queue empty.
break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data)
if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key) | [
"def",
"next_requests",
"(",
"self",
")",
":",
"use_set",
"=",
"self",
".",
"settings",
".",
"getbool",
"(",
"'REDIS_START_URLS_AS_SET'",
",",
"defaults",
".",
"START_URLS_AS_SET",
")",
"fetch_one",
"=",
"self",
".",
"server",
".",
"spop",
"if",
"use_set",
"else",
"self",
".",
"server",
".",
"lpop",
"# XXX: Do we need to use a timeout here?",
"found",
"=",
"0",
"# TODO: Use redis pipeline execution.",
"while",
"found",
"<",
"self",
".",
"redis_batch_size",
":",
"data",
"=",
"fetch_one",
"(",
"self",
".",
"redis_key",
")",
"if",
"not",
"data",
":",
"# Queue empty.",
"break",
"req",
"=",
"self",
".",
"make_request_from_data",
"(",
"data",
")",
"if",
"req",
":",
"yield",
"req",
"found",
"+=",
"1",
"else",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Request not made from data: %r\"",
",",
"data",
")",
"if",
"found",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Read %s requests from '%s'\"",
",",
"found",
",",
"self",
".",
"redis_key",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/spiders.py#L75-L95 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/spiders.py | python | RedisMixin.make_request_from_data | (self, data) | return self.make_requests_from_url(url) | Returns a Request instance from data coming from Redis.
By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.
Parameters
----------
data : bytes
Message from redis. | Returns a Request instance from data coming from Redis. | [
"Returns",
"a",
"Request",
"instance",
"from",
"data",
"coming",
"from",
"Redis",
"."
] | def make_request_from_data(self, data):
"""Returns a Request instance from data coming from Redis.
By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.
Parameters
----------
data : bytes
Message from redis.
"""
url = bytes_to_str(data, self.redis_encoding)
return self.make_requests_from_url(url) | [
"def",
"make_request_from_data",
"(",
"self",
",",
"data",
")",
":",
"url",
"=",
"bytes_to_str",
"(",
"data",
",",
"self",
".",
"redis_encoding",
")",
"return",
"self",
".",
"make_requests_from_url",
"(",
"url",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/spiders.py#L97-L110 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/spiders.py | python | RedisMixin.schedule_next_requests | (self) | Schedules a request if available | Schedules a request if available | [
"Schedules",
"a",
"request",
"if",
"available"
] | def schedule_next_requests(self):
"""Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self) | [
"def",
"schedule_next_requests",
"(",
"self",
")",
":",
"# TODO: While there is capacity, schedule a batch of redis requests.",
"for",
"req",
"in",
"self",
".",
"next_requests",
"(",
")",
":",
"self",
".",
"crawler",
".",
"engine",
".",
"crawl",
"(",
"req",
",",
"spider",
"=",
"self",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/spiders.py#L112-L116 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/spiders.py | python | RedisMixin.spider_idle | (self) | Schedules a request if available, otherwise waits. | Schedules a request if available, otherwise waits. | [
"Schedules",
"a",
"request",
"if",
"available",
"otherwise",
"waits",
"."
] | def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider | [
"def",
"spider_idle",
"(",
"self",
")",
":",
"# XXX: Handle a sentinel to close the spider.",
"self",
".",
"schedule_next_requests",
"(",
")",
"raise",
"DontCloseSpider"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/spiders.py#L118-L122 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/utils.py | python | bytes_to_str | (s, encoding='utf-8') | return s | Returns a str if a bytes object is given. | Returns a str if a bytes object is given. | [
"Returns",
"a",
"str",
"if",
"a",
"bytes",
"object",
"is",
"given",
"."
] | def bytes_to_str(s, encoding='utf-8'):
"""Returns a str if a bytes object is given."""
if six.PY3 and isinstance(s, bytes):
return s.decode(encoding)
return s | [
"def",
"bytes_to_str",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"six",
".",
"PY3",
"and",
"isinstance",
"(",
"s",
",",
"bytes",
")",
":",
"return",
"s",
".",
"decode",
"(",
"encoding",
")",
"return",
"s"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/utils.py#L4-L8 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/dupefilter.py | python | RFPDupeFilter.__init__ | (self, server, key, debug=False) | Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
key : str
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests. | Initialize the duplicates filter. | [
"Initialize",
"the",
"duplicates",
"filter",
"."
] | def __init__(self, server, key, debug=False):
"""Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
key : str
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests.
"""
self.server = server
self.key = key
self.debug = debug
self.bf = BloomFilter(server, key, blockNum=1) # you can increase blockNum if your are filtering too many urls
self.logdupes = True | [
"def",
"__init__",
"(",
"self",
",",
"server",
",",
"key",
",",
"debug",
"=",
"False",
")",
":",
"self",
".",
"server",
"=",
"server",
"self",
".",
"key",
"=",
"key",
"self",
".",
"debug",
"=",
"debug",
"self",
".",
"bf",
"=",
"BloomFilter",
"(",
"server",
",",
"key",
",",
"blockNum",
"=",
"1",
")",
"# you can increase blockNum if your are filtering too many urls",
"self",
".",
"logdupes",
"=",
"True"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/dupefilter.py#L25-L42 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/dupefilter.py | python | RFPDupeFilter.from_settings | (cls, settings) | return cls(server, key=key, debug=debug) | Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance. | Returns an instance from given settings. | [
"Returns",
"an",
"instance",
"from",
"given",
"settings",
"."
] | def from_settings(cls, settings):
"""Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance.
"""
server = get_redis_from_settings(settings)
# XXX: This creates one-time key. needed to support to use this
# class as standalone dupefilter with scrapy's default scheduler
# if scrapy passes spider on open() method this wouldn't be needed
# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server, key=key, debug=debug) | [
"def",
"from_settings",
"(",
"cls",
",",
"settings",
")",
":",
"server",
"=",
"get_redis_from_settings",
"(",
"settings",
")",
"# XXX: This creates one-time key. needed to support to use this",
"# class as standalone dupefilter with scrapy's default scheduler",
"# if scrapy passes spider on open() method this wouldn't be needed",
"# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.",
"key",
"=",
"defaults",
".",
"DUPEFILTER_KEY",
"%",
"{",
"'timestamp'",
":",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"}",
"debug",
"=",
"settings",
".",
"getbool",
"(",
"'DUPEFILTER_DEBUG'",
")",
"return",
"cls",
"(",
"server",
",",
"key",
"=",
"key",
",",
"debug",
"=",
"debug",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/dupefilter.py#L45-L70 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/dupefilter.py | python | RFPDupeFilter.from_crawler | (cls, crawler) | return cls.from_settings(crawler.settings) | Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter. | Returns instance from crawler. | [
"Returns",
"instance",
"from",
"crawler",
"."
] | def from_crawler(cls, crawler):
"""Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter.
"""
return cls.from_settings(crawler.settings) | [
"def",
"from_crawler",
"(",
"cls",
",",
"crawler",
")",
":",
"return",
"cls",
".",
"from_settings",
"(",
"crawler",
".",
"settings",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/dupefilter.py#L73-L86 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/dupefilter.py | python | RFPDupeFilter.request_seen | (self, request) | Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool | Returns True if request was already seen. | [
"Returns",
"True",
"if",
"request",
"was",
"already",
"seen",
"."
] | def request_seen(self, request):
"""Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool
"""
fp = request_fingerprint(request)
if self.bf.isContains(fp):
return True
else:
self.bf.insert(fp)
return False | [
"def",
"request_seen",
"(",
"self",
",",
"request",
")",
":",
"fp",
"=",
"request_fingerprint",
"(",
"request",
")",
"if",
"self",
".",
"bf",
".",
"isContains",
"(",
"fp",
")",
":",
"return",
"True",
"else",
":",
"self",
".",
"bf",
".",
"insert",
"(",
"fp",
")",
"return",
"False"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/dupefilter.py#L88-L105 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/dupefilter.py | python | RFPDupeFilter.request_fingerprint | (self, request) | return request_fingerprint(request) | Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str | Returns a fingerprint for a given request. | [
"Returns",
"a",
"fingerprint",
"for",
"a",
"given",
"request",
"."
] | def request_fingerprint(self, request):
"""Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str
"""
return request_fingerprint(request) | [
"def",
"request_fingerprint",
"(",
"self",
",",
"request",
")",
":",
"return",
"request_fingerprint",
"(",
"request",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/dupefilter.py#L107-L119 |
|
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/dupefilter.py | python | RFPDupeFilter.close | (self, reason='') | Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional | Delete data on close. Called by Scrapy's scheduler. | [
"Delete",
"data",
"on",
"close",
".",
"Called",
"by",
"Scrapy",
"s",
"scheduler",
"."
] | def close(self, reason=''):
"""Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional
"""
self.clear() | [
"def",
"close",
"(",
"self",
",",
"reason",
"=",
"''",
")",
":",
"self",
".",
"clear",
"(",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/dupefilter.py#L121-L129 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/dupefilter.py | python | RFPDupeFilter.clear | (self) | Clears fingerprints data. | Clears fingerprints data. | [
"Clears",
"fingerprints",
"data",
"."
] | def clear(self):
"""Clears fingerprints data."""
self.server.delete(self.key) | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"server",
".",
"delete",
"(",
"self",
".",
"key",
")"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/dupefilter.py#L131-L133 |
||
AlexTan-b-z/ZhihuSpider | 7f35d157fa7f3a7ac8545b386e98286ee2764462 | zhihu/zhihu/scrapy_redis/dupefilter.py | python | RFPDupeFilter.log | (self, request, spider) | Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider | Logs given request. | [
"Logs",
"given",
"request",
"."
] | def log(self, request, spider):
"""Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider
"""
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False | [
"def",
"log",
"(",
"self",
",",
"request",
",",
"spider",
")",
":",
"if",
"self",
".",
"debug",
":",
"msg",
"=",
"\"Filtered duplicate request: %(request)s\"",
"self",
".",
"logger",
".",
"debug",
"(",
"msg",
",",
"{",
"'request'",
":",
"request",
"}",
",",
"extra",
"=",
"{",
"'spider'",
":",
"spider",
"}",
")",
"elif",
"self",
".",
"logdupes",
":",
"msg",
"=",
"(",
"\"Filtered duplicate request %(request)s\"",
"\" - no more duplicates will be shown\"",
"\" (see DUPEFILTER_DEBUG to show all duplicates)\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"msg",
",",
"{",
"'request'",
":",
"request",
"}",
",",
"extra",
"=",
"{",
"'spider'",
":",
"spider",
"}",
")",
"self",
".",
"logdupes",
"=",
"False"
] | https://github.com/AlexTan-b-z/ZhihuSpider/blob/7f35d157fa7f3a7ac8545b386e98286ee2764462/zhihu/zhihu/scrapy_redis/dupefilter.py#L135-L152 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | bin/train_asr.py | python | Solver.fetch_data | (self, data) | return feat, feat_len, txt, txt_len | Move data to device and compute text seq. length | Move data to device and compute text seq. length | [
"Move",
"data",
"to",
"device",
"and",
"compute",
"text",
"seq",
".",
"length"
] | def fetch_data(self, data):
''' Move data to device and compute text seq. length'''
_, feat, feat_len, txt = data
feat = feat.to(self.device)
feat_len = feat_len.to(self.device)
txt = txt.to(self.device)
txt_len = torch.sum(txt != 0, dim=-1)
return feat, feat_len, txt, txt_len | [
"def",
"fetch_data",
"(",
"self",
",",
"data",
")",
":",
"_",
",",
"feat",
",",
"feat_len",
",",
"txt",
"=",
"data",
"feat",
"=",
"feat",
".",
"to",
"(",
"self",
".",
"device",
")",
"feat_len",
"=",
"feat_len",
".",
"to",
"(",
"self",
".",
"device",
")",
"txt",
"=",
"txt",
".",
"to",
"(",
"self",
".",
"device",
")",
"txt_len",
"=",
"torch",
".",
"sum",
"(",
"txt",
"!=",
"0",
",",
"dim",
"=",
"-",
"1",
")",
"return",
"feat",
",",
"feat_len",
",",
"txt",
",",
"txt_len"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/bin/train_asr.py#L20-L28 |
|
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | bin/train_asr.py | python | Solver.load_data | (self) | Load data for training/validation, store tokenizer and input/output shape | Load data for training/validation, store tokenizer and input/output shape | [
"Load",
"data",
"for",
"training",
"/",
"validation",
"store",
"tokenizer",
"and",
"input",
"/",
"output",
"shape"
] | def load_data(self):
''' Load data for training/validation, store tokenizer and input/output shape'''
self.tr_set, self.dv_set, self.feat_dim, self.vocab_size, self.tokenizer, msg = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
self.curriculum > 0, **self.config['data'])
self.verbose(msg) | [
"def",
"load_data",
"(",
"self",
")",
":",
"self",
".",
"tr_set",
",",
"self",
".",
"dv_set",
",",
"self",
".",
"feat_dim",
",",
"self",
".",
"vocab_size",
",",
"self",
".",
"tokenizer",
",",
"msg",
"=",
"load_dataset",
"(",
"self",
".",
"paras",
".",
"njobs",
",",
"self",
".",
"paras",
".",
"gpu",
",",
"self",
".",
"paras",
".",
"pin_memory",
",",
"self",
".",
"curriculum",
">",
"0",
",",
"*",
"*",
"self",
".",
"config",
"[",
"'data'",
"]",
")",
"self",
".",
"verbose",
"(",
"msg",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/bin/train_asr.py#L30-L35 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | bin/train_asr.py | python | Solver.set_model | (self) | Setup ASR model and optimizer | Setup ASR model and optimizer | [
"Setup",
"ASR",
"model",
"and",
"optimizer"
] | def set_model(self):
''' Setup ASR model and optimizer '''
# Model
init_adadelta = self.config['hparas']['optimizer'] == 'Adadelta'
self.model = ASR(self.feat_dim, self.vocab_size, init_adadelta, **
self.config['model']).to(self.device)
self.verbose(self.model.create_msg())
model_paras = [{'params': self.model.parameters()}]
# Losses
self.seq_loss = torch.nn.CrossEntropyLoss(ignore_index=0)
# Note: zero_infinity=False is unstable?
self.ctc_loss = torch.nn.CTCLoss(blank=0, zero_infinity=False)
# Plug-ins
self.emb_fuse = False
self.emb_reg = ('emb' in self.config) and (
self.config['emb']['enable'])
if self.emb_reg:
from src.plugin import EmbeddingRegularizer
self.emb_decoder = EmbeddingRegularizer(
self.tokenizer, self.model.dec_dim, **self.config['emb']).to(self.device)
model_paras.append({'params': self.emb_decoder.parameters()})
self.emb_fuse = self.emb_decoder.apply_fuse
if self.emb_fuse:
self.seq_loss = torch.nn.NLLLoss(ignore_index=0)
self.verbose(self.emb_decoder.create_msg())
# Optimizer
self.optimizer = Optimizer(model_paras, **self.config['hparas'])
self.verbose(self.optimizer.create_msg())
# Enable AMP if needed
self.enable_apex()
# Automatically load pre-trained model if self.paras.load is given
self.load_ckpt() | [
"def",
"set_model",
"(",
"self",
")",
":",
"# Model",
"init_adadelta",
"=",
"self",
".",
"config",
"[",
"'hparas'",
"]",
"[",
"'optimizer'",
"]",
"==",
"'Adadelta'",
"self",
".",
"model",
"=",
"ASR",
"(",
"self",
".",
"feat_dim",
",",
"self",
".",
"vocab_size",
",",
"init_adadelta",
",",
"*",
"*",
"self",
".",
"config",
"[",
"'model'",
"]",
")",
".",
"to",
"(",
"self",
".",
"device",
")",
"self",
".",
"verbose",
"(",
"self",
".",
"model",
".",
"create_msg",
"(",
")",
")",
"model_paras",
"=",
"[",
"{",
"'params'",
":",
"self",
".",
"model",
".",
"parameters",
"(",
")",
"}",
"]",
"# Losses",
"self",
".",
"seq_loss",
"=",
"torch",
".",
"nn",
".",
"CrossEntropyLoss",
"(",
"ignore_index",
"=",
"0",
")",
"# Note: zero_infinity=False is unstable?",
"self",
".",
"ctc_loss",
"=",
"torch",
".",
"nn",
".",
"CTCLoss",
"(",
"blank",
"=",
"0",
",",
"zero_infinity",
"=",
"False",
")",
"# Plug-ins",
"self",
".",
"emb_fuse",
"=",
"False",
"self",
".",
"emb_reg",
"=",
"(",
"'emb'",
"in",
"self",
".",
"config",
")",
"and",
"(",
"self",
".",
"config",
"[",
"'emb'",
"]",
"[",
"'enable'",
"]",
")",
"if",
"self",
".",
"emb_reg",
":",
"from",
"src",
".",
"plugin",
"import",
"EmbeddingRegularizer",
"self",
".",
"emb_decoder",
"=",
"EmbeddingRegularizer",
"(",
"self",
".",
"tokenizer",
",",
"self",
".",
"model",
".",
"dec_dim",
",",
"*",
"*",
"self",
".",
"config",
"[",
"'emb'",
"]",
")",
".",
"to",
"(",
"self",
".",
"device",
")",
"model_paras",
".",
"append",
"(",
"{",
"'params'",
":",
"self",
".",
"emb_decoder",
".",
"parameters",
"(",
")",
"}",
")",
"self",
".",
"emb_fuse",
"=",
"self",
".",
"emb_decoder",
".",
"apply_fuse",
"if",
"self",
".",
"emb_fuse",
":",
"self",
".",
"seq_loss",
"=",
"torch",
".",
"nn",
".",
"NLLLoss",
"(",
"ignore_index",
"=",
"0",
")",
"self",
".",
"verbose",
"(",
"self",
".",
"emb_decoder",
".",
"create_msg",
"(",
")",
")",
"# Optimizer",
"self",
".",
"optimizer",
"=",
"Optimizer",
"(",
"model_paras",
",",
"*",
"*",
"self",
".",
"config",
"[",
"'hparas'",
"]",
")",
"self",
".",
"verbose",
"(",
"self",
".",
"optimizer",
".",
"create_msg",
"(",
")",
")",
"# Enable AMP if needed",
"self",
".",
"enable_apex",
"(",
")",
"# Automatically load pre-trained model if self.paras.load is given",
"self",
".",
"load_ckpt",
"(",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/bin/train_asr.py#L37-L73 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | bin/train_asr.py | python | Solver.exec | (self) | Training End-to-end ASR system | Training End-to-end ASR system | [
"Training",
"End",
"-",
"to",
"-",
"end",
"ASR",
"system"
] | def exec(self):
''' Training End-to-end ASR system '''
self.verbose('Total training steps {}.'.format(
human_format(self.max_step)))
ctc_loss, att_loss, emb_loss = None, None, None
n_epochs = 0
self.timer.set()
while self.step < self.max_step:
# Renew dataloader to enable random sampling
if self.curriculum > 0 and n_epochs == self.curriculum:
self.verbose(
'Curriculum learning ends after {} epochs, starting random sampling.'.format(n_epochs))
self.tr_set, _, _, _, _, _ = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
False, **self.config['data'])
for data in self.tr_set:
# Pre-step : update tf_rate/lr_rate and do zero_grad
tf_rate = self.optimizer.pre_step(self.step)
total_loss = 0
# Fetch data
feat, feat_len, txt, txt_len = self.fetch_data(data)
self.timer.cnt('rd')
# Forward model
# Note: txt should NOT start w/ <sos>
ctc_output, encode_len, att_output, att_align, dec_state = \
self.model(feat, feat_len, max(txt_len), tf_rate=tf_rate,
teacher=txt, get_dec_state=self.emb_reg)
# Plugins
if self.emb_reg:
emb_loss, fuse_output = self.emb_decoder(
dec_state, att_output, label=txt)
total_loss += self.emb_decoder.weight*emb_loss
# Compute all objectives
if ctc_output is not None:
if self.paras.cudnn_ctc:
ctc_loss = self.ctc_loss(ctc_output.transpose(0, 1),
txt.to_sparse().values().to(device='cpu', dtype=torch.int32),
[ctc_output.shape[1]] *
len(ctc_output),
txt_len.cpu().tolist())
else:
ctc_loss = self.ctc_loss(ctc_output.transpose(
0, 1), txt, encode_len, txt_len)
total_loss += ctc_loss*self.model.ctc_weight
if att_output is not None:
b, t, _ = att_output.shape
att_output = fuse_output if self.emb_fuse else att_output
att_loss = self.seq_loss(
att_output.view(b*t, -1), txt.view(-1))
total_loss += att_loss*(1-self.model.ctc_weight)
self.timer.cnt('fw')
# Backprop
grad_norm = self.backward(total_loss)
self.step += 1
# Logger
if (self.step == 1) or (self.step % self.PROGRESS_STEP == 0):
self.progress('Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'
.format(total_loss.cpu().item(), grad_norm, self.timer.show()))
self.write_log(
'loss', {'tr_ctc': ctc_loss, 'tr_att': att_loss})
self.write_log('emb_loss', {'tr': emb_loss})
self.write_log('wer', {'tr_att': cal_er(self.tokenizer, att_output, txt),
'tr_ctc': cal_er(self.tokenizer, ctc_output, txt, ctc=True)})
if self.emb_fuse:
if self.emb_decoder.fuse_learnable:
self.write_log('fuse_lambda', {
'emb': self.emb_decoder.get_weight()})
self.write_log(
'fuse_temp', {'temp': self.emb_decoder.get_temp()})
# Validation
if (self.step == 1) or (self.step % self.valid_step == 0):
self.validate()
# End of step
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-529185354
torch.cuda.empty_cache()
self.timer.set()
if self.step > self.max_step:
break
n_epochs += 1
self.log.close() | [
"def",
"exec",
"(",
"self",
")",
":",
"self",
".",
"verbose",
"(",
"'Total training steps {}.'",
".",
"format",
"(",
"human_format",
"(",
"self",
".",
"max_step",
")",
")",
")",
"ctc_loss",
",",
"att_loss",
",",
"emb_loss",
"=",
"None",
",",
"None",
",",
"None",
"n_epochs",
"=",
"0",
"self",
".",
"timer",
".",
"set",
"(",
")",
"while",
"self",
".",
"step",
"<",
"self",
".",
"max_step",
":",
"# Renew dataloader to enable random sampling",
"if",
"self",
".",
"curriculum",
">",
"0",
"and",
"n_epochs",
"==",
"self",
".",
"curriculum",
":",
"self",
".",
"verbose",
"(",
"'Curriculum learning ends after {} epochs, starting random sampling.'",
".",
"format",
"(",
"n_epochs",
")",
")",
"self",
".",
"tr_set",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
",",
"_",
"=",
"load_dataset",
"(",
"self",
".",
"paras",
".",
"njobs",
",",
"self",
".",
"paras",
".",
"gpu",
",",
"self",
".",
"paras",
".",
"pin_memory",
",",
"False",
",",
"*",
"*",
"self",
".",
"config",
"[",
"'data'",
"]",
")",
"for",
"data",
"in",
"self",
".",
"tr_set",
":",
"# Pre-step : update tf_rate/lr_rate and do zero_grad",
"tf_rate",
"=",
"self",
".",
"optimizer",
".",
"pre_step",
"(",
"self",
".",
"step",
")",
"total_loss",
"=",
"0",
"# Fetch data",
"feat",
",",
"feat_len",
",",
"txt",
",",
"txt_len",
"=",
"self",
".",
"fetch_data",
"(",
"data",
")",
"self",
".",
"timer",
".",
"cnt",
"(",
"'rd'",
")",
"# Forward model",
"# Note: txt should NOT start w/ <sos>",
"ctc_output",
",",
"encode_len",
",",
"att_output",
",",
"att_align",
",",
"dec_state",
"=",
"self",
".",
"model",
"(",
"feat",
",",
"feat_len",
",",
"max",
"(",
"txt_len",
")",
",",
"tf_rate",
"=",
"tf_rate",
",",
"teacher",
"=",
"txt",
",",
"get_dec_state",
"=",
"self",
".",
"emb_reg",
")",
"# Plugins",
"if",
"self",
".",
"emb_reg",
":",
"emb_loss",
",",
"fuse_output",
"=",
"self",
".",
"emb_decoder",
"(",
"dec_state",
",",
"att_output",
",",
"label",
"=",
"txt",
")",
"total_loss",
"+=",
"self",
".",
"emb_decoder",
".",
"weight",
"*",
"emb_loss",
"# Compute all objectives",
"if",
"ctc_output",
"is",
"not",
"None",
":",
"if",
"self",
".",
"paras",
".",
"cudnn_ctc",
":",
"ctc_loss",
"=",
"self",
".",
"ctc_loss",
"(",
"ctc_output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"txt",
".",
"to_sparse",
"(",
")",
".",
"values",
"(",
")",
".",
"to",
"(",
"device",
"=",
"'cpu'",
",",
"dtype",
"=",
"torch",
".",
"int32",
")",
",",
"[",
"ctc_output",
".",
"shape",
"[",
"1",
"]",
"]",
"*",
"len",
"(",
"ctc_output",
")",
",",
"txt_len",
".",
"cpu",
"(",
")",
".",
"tolist",
"(",
")",
")",
"else",
":",
"ctc_loss",
"=",
"self",
".",
"ctc_loss",
"(",
"ctc_output",
".",
"transpose",
"(",
"0",
",",
"1",
")",
",",
"txt",
",",
"encode_len",
",",
"txt_len",
")",
"total_loss",
"+=",
"ctc_loss",
"*",
"self",
".",
"model",
".",
"ctc_weight",
"if",
"att_output",
"is",
"not",
"None",
":",
"b",
",",
"t",
",",
"_",
"=",
"att_output",
".",
"shape",
"att_output",
"=",
"fuse_output",
"if",
"self",
".",
"emb_fuse",
"else",
"att_output",
"att_loss",
"=",
"self",
".",
"seq_loss",
"(",
"att_output",
".",
"view",
"(",
"b",
"*",
"t",
",",
"-",
"1",
")",
",",
"txt",
".",
"view",
"(",
"-",
"1",
")",
")",
"total_loss",
"+=",
"att_loss",
"*",
"(",
"1",
"-",
"self",
".",
"model",
".",
"ctc_weight",
")",
"self",
".",
"timer",
".",
"cnt",
"(",
"'fw'",
")",
"# Backprop",
"grad_norm",
"=",
"self",
".",
"backward",
"(",
"total_loss",
")",
"self",
".",
"step",
"+=",
"1",
"# Logger",
"if",
"(",
"self",
".",
"step",
"==",
"1",
")",
"or",
"(",
"self",
".",
"step",
"%",
"self",
".",
"PROGRESS_STEP",
"==",
"0",
")",
":",
"self",
".",
"progress",
"(",
"'Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'",
".",
"format",
"(",
"total_loss",
".",
"cpu",
"(",
")",
".",
"item",
"(",
")",
",",
"grad_norm",
",",
"self",
".",
"timer",
".",
"show",
"(",
")",
")",
")",
"self",
".",
"write_log",
"(",
"'loss'",
",",
"{",
"'tr_ctc'",
":",
"ctc_loss",
",",
"'tr_att'",
":",
"att_loss",
"}",
")",
"self",
".",
"write_log",
"(",
"'emb_loss'",
",",
"{",
"'tr'",
":",
"emb_loss",
"}",
")",
"self",
".",
"write_log",
"(",
"'wer'",
",",
"{",
"'tr_att'",
":",
"cal_er",
"(",
"self",
".",
"tokenizer",
",",
"att_output",
",",
"txt",
")",
",",
"'tr_ctc'",
":",
"cal_er",
"(",
"self",
".",
"tokenizer",
",",
"ctc_output",
",",
"txt",
",",
"ctc",
"=",
"True",
")",
"}",
")",
"if",
"self",
".",
"emb_fuse",
":",
"if",
"self",
".",
"emb_decoder",
".",
"fuse_learnable",
":",
"self",
".",
"write_log",
"(",
"'fuse_lambda'",
",",
"{",
"'emb'",
":",
"self",
".",
"emb_decoder",
".",
"get_weight",
"(",
")",
"}",
")",
"self",
".",
"write_log",
"(",
"'fuse_temp'",
",",
"{",
"'temp'",
":",
"self",
".",
"emb_decoder",
".",
"get_temp",
"(",
")",
"}",
")",
"# Validation",
"if",
"(",
"self",
".",
"step",
"==",
"1",
")",
"or",
"(",
"self",
".",
"step",
"%",
"self",
".",
"valid_step",
"==",
"0",
")",
":",
"self",
".",
"validate",
"(",
")",
"# End of step",
"# https://github.com/pytorch/pytorch/issues/13246#issuecomment-529185354",
"torch",
".",
"cuda",
".",
"empty_cache",
"(",
")",
"self",
".",
"timer",
".",
"set",
"(",
")",
"if",
"self",
".",
"step",
">",
"self",
".",
"max_step",
":",
"break",
"n_epochs",
"+=",
"1",
"self",
".",
"log",
".",
"close",
"(",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/bin/train_asr.py#L77-L167 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | bin/train_lm.py | python | Solver.fetch_data | (self, data) | return txt, txt_len | Move data to device, insert <sos> and compute text seq. length | Move data to device, insert <sos> and compute text seq. length | [
"Move",
"data",
"to",
"device",
"insert",
"<sos",
">",
"and",
"compute",
"text",
"seq",
".",
"length"
] | def fetch_data(self, data):
''' Move data to device, insert <sos> and compute text seq. length'''
txt = torch.cat(
(torch.zeros((data.shape[0], 1), dtype=torch.long), data), dim=1).to(self.device)
txt_len = torch.sum(data != 0, dim=-1)
return txt, txt_len | [
"def",
"fetch_data",
"(",
"self",
",",
"data",
")",
":",
"txt",
"=",
"torch",
".",
"cat",
"(",
"(",
"torch",
".",
"zeros",
"(",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
",",
"dtype",
"=",
"torch",
".",
"long",
")",
",",
"data",
")",
",",
"dim",
"=",
"1",
")",
".",
"to",
"(",
"self",
".",
"device",
")",
"txt_len",
"=",
"torch",
".",
"sum",
"(",
"data",
"!=",
"0",
",",
"dim",
"=",
"-",
"1",
")",
"return",
"txt",
",",
"txt_len"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/bin/train_lm.py#L18-L23 |
|
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | bin/train_lm.py | python | Solver.load_data | (self) | Load data for training/validation, store tokenizer and input/output shape | Load data for training/validation, store tokenizer and input/output shape | [
"Load",
"data",
"for",
"training",
"/",
"validation",
"store",
"tokenizer",
"and",
"input",
"/",
"output",
"shape"
] | def load_data(self):
''' Load data for training/validation, store tokenizer and input/output shape'''
self.tr_set, self.dv_set, self.vocab_size, self.tokenizer, msg = \
load_textset(self.paras.njobs, self.paras.gpu,
self.paras.pin_memory, **self.config['data'])
self.verbose(msg) | [
"def",
"load_data",
"(",
"self",
")",
":",
"self",
".",
"tr_set",
",",
"self",
".",
"dv_set",
",",
"self",
".",
"vocab_size",
",",
"self",
".",
"tokenizer",
",",
"msg",
"=",
"load_textset",
"(",
"self",
".",
"paras",
".",
"njobs",
",",
"self",
".",
"paras",
".",
"gpu",
",",
"self",
".",
"paras",
".",
"pin_memory",
",",
"*",
"*",
"self",
".",
"config",
"[",
"'data'",
"]",
")",
"self",
".",
"verbose",
"(",
"msg",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/bin/train_lm.py#L25-L30 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | bin/train_lm.py | python | Solver.set_model | (self) | Setup ASR model and optimizer | Setup ASR model and optimizer | [
"Setup",
"ASR",
"model",
"and",
"optimizer"
] | def set_model(self):
''' Setup ASR model and optimizer '''
# Model
self.model = RNNLM(self.vocab_size, **
self.config['model']).to(self.device)
self.verbose(self.model.create_msg())
# Losses
self.seq_loss = torch.nn.CrossEntropyLoss(ignore_index=0)
# Optimizer
self.optimizer = Optimizer(
self.model.parameters(), **self.config['hparas'])
# Enable AMP if needed
self.enable_apex()
# load pre-trained model
if self.paras.load:
self.load_ckpt()
ckpt = torch.load(self.paras.load, map_location=self.device)
self.model.load_state_dict(ckpt['model'])
self.optimizer.load_opt_state_dict(ckpt['optimizer'])
self.step = ckpt['global_step']
self.verbose('Load ckpt from {}, restarting at step {}'.format(
self.paras.load, self.step)) | [
"def",
"set_model",
"(",
"self",
")",
":",
"# Model",
"self",
".",
"model",
"=",
"RNNLM",
"(",
"self",
".",
"vocab_size",
",",
"*",
"*",
"self",
".",
"config",
"[",
"'model'",
"]",
")",
".",
"to",
"(",
"self",
".",
"device",
")",
"self",
".",
"verbose",
"(",
"self",
".",
"model",
".",
"create_msg",
"(",
")",
")",
"# Losses",
"self",
".",
"seq_loss",
"=",
"torch",
".",
"nn",
".",
"CrossEntropyLoss",
"(",
"ignore_index",
"=",
"0",
")",
"# Optimizer",
"self",
".",
"optimizer",
"=",
"Optimizer",
"(",
"self",
".",
"model",
".",
"parameters",
"(",
")",
",",
"*",
"*",
"self",
".",
"config",
"[",
"'hparas'",
"]",
")",
"# Enable AMP if needed",
"self",
".",
"enable_apex",
"(",
")",
"# load pre-trained model",
"if",
"self",
".",
"paras",
".",
"load",
":",
"self",
".",
"load_ckpt",
"(",
")",
"ckpt",
"=",
"torch",
".",
"load",
"(",
"self",
".",
"paras",
".",
"load",
",",
"map_location",
"=",
"self",
".",
"device",
")",
"self",
".",
"model",
".",
"load_state_dict",
"(",
"ckpt",
"[",
"'model'",
"]",
")",
"self",
".",
"optimizer",
".",
"load_opt_state_dict",
"(",
"ckpt",
"[",
"'optimizer'",
"]",
")",
"self",
".",
"step",
"=",
"ckpt",
"[",
"'global_step'",
"]",
"self",
".",
"verbose",
"(",
"'Load ckpt from {}, restarting at step {}'",
".",
"format",
"(",
"self",
".",
"paras",
".",
"load",
",",
"self",
".",
"step",
")",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/bin/train_lm.py#L32-L54 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | bin/train_lm.py | python | Solver.exec | (self) | Training End-to-end ASR system | Training End-to-end ASR system | [
"Training",
"End",
"-",
"to",
"-",
"end",
"ASR",
"system"
] | def exec(self):
''' Training End-to-end ASR system '''
self.verbose('Total training steps {}.'.format(
human_format(self.max_step)))
self.timer.set()
while self.step < self.max_step:
for data in self.tr_set:
# Pre-step : update tf_rate/lr_rate and do zero_grad
self.optimizer.pre_step(self.step)
# Fetch data
txt, txt_len = self.fetch_data(data)
self.timer.cnt('rd')
# Forward model
pred, _ = self.model(txt[:, :-1], txt_len)
# Compute all objectives
lm_loss = self.seq_loss(
pred.view(-1, self.vocab_size), txt[:, 1:].reshape(-1))
self.timer.cnt('fw')
# Backprop
grad_norm = self.backward(lm_loss)
self.step += 1
# Logger
if self.step % self.PROGRESS_STEP == 0:
self.progress('Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'
.format(lm_loss.cpu().item(), grad_norm, self.timer.show()))
self.write_log('entropy', {'tr': lm_loss})
self.write_log(
'perplexity', {'tr': torch.exp(lm_loss).cpu().item()})
# Validation
if (self.step == 1) or (self.step % self.valid_step == 0):
self.validate()
# End of step
self.timer.set()
if self.step > self.max_step:
break
self.log.close() | [
"def",
"exec",
"(",
"self",
")",
":",
"self",
".",
"verbose",
"(",
"'Total training steps {}.'",
".",
"format",
"(",
"human_format",
"(",
"self",
".",
"max_step",
")",
")",
")",
"self",
".",
"timer",
".",
"set",
"(",
")",
"while",
"self",
".",
"step",
"<",
"self",
".",
"max_step",
":",
"for",
"data",
"in",
"self",
".",
"tr_set",
":",
"# Pre-step : update tf_rate/lr_rate and do zero_grad",
"self",
".",
"optimizer",
".",
"pre_step",
"(",
"self",
".",
"step",
")",
"# Fetch data",
"txt",
",",
"txt_len",
"=",
"self",
".",
"fetch_data",
"(",
"data",
")",
"self",
".",
"timer",
".",
"cnt",
"(",
"'rd'",
")",
"# Forward model",
"pred",
",",
"_",
"=",
"self",
".",
"model",
"(",
"txt",
"[",
":",
",",
":",
"-",
"1",
"]",
",",
"txt_len",
")",
"# Compute all objectives",
"lm_loss",
"=",
"self",
".",
"seq_loss",
"(",
"pred",
".",
"view",
"(",
"-",
"1",
",",
"self",
".",
"vocab_size",
")",
",",
"txt",
"[",
":",
",",
"1",
":",
"]",
".",
"reshape",
"(",
"-",
"1",
")",
")",
"self",
".",
"timer",
".",
"cnt",
"(",
"'fw'",
")",
"# Backprop",
"grad_norm",
"=",
"self",
".",
"backward",
"(",
"lm_loss",
")",
"self",
".",
"step",
"+=",
"1",
"# Logger",
"if",
"self",
".",
"step",
"%",
"self",
".",
"PROGRESS_STEP",
"==",
"0",
":",
"self",
".",
"progress",
"(",
"'Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'",
".",
"format",
"(",
"lm_loss",
".",
"cpu",
"(",
")",
".",
"item",
"(",
")",
",",
"grad_norm",
",",
"self",
".",
"timer",
".",
"show",
"(",
")",
")",
")",
"self",
".",
"write_log",
"(",
"'entropy'",
",",
"{",
"'tr'",
":",
"lm_loss",
"}",
")",
"self",
".",
"write_log",
"(",
"'perplexity'",
",",
"{",
"'tr'",
":",
"torch",
".",
"exp",
"(",
"lm_loss",
")",
".",
"cpu",
"(",
")",
".",
"item",
"(",
")",
"}",
")",
"# Validation",
"if",
"(",
"self",
".",
"step",
"==",
"1",
")",
"or",
"(",
"self",
".",
"step",
"%",
"self",
".",
"valid_step",
"==",
"0",
")",
":",
"self",
".",
"validate",
"(",
")",
"# End of step",
"self",
".",
"timer",
".",
"set",
"(",
")",
"if",
"self",
".",
"step",
">",
"self",
".",
"max_step",
":",
"break",
"self",
".",
"log",
".",
"close",
"(",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/bin/train_lm.py#L56-L99 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | corpus/librispeech.py | python | read_text | (file) | Get transcription of target wave file,
it's somewhat redundant for accessing each txt multiplt times,
but it works fine with multi-thread | Get transcription of target wave file,
it's somewhat redundant for accessing each txt multiplt times,
but it works fine with multi-thread | [
"Get",
"transcription",
"of",
"target",
"wave",
"file",
"it",
"s",
"somewhat",
"redundant",
"for",
"accessing",
"each",
"txt",
"multiplt",
"times",
"but",
"it",
"works",
"fine",
"with",
"multi",
"-",
"thread"
] | def read_text(file):
'''Get transcription of target wave file,
it's somewhat redundant for accessing each txt multiplt times,
but it works fine with multi-thread'''
src_file = '-'.join(file.split('-')[:-1])+'.trans.txt'
idx = file.split('/')[-1].split('.')[0]
with open(src_file, 'r') as fp:
for line in fp:
if idx == line.split(' ')[0]:
return line[:-1].split(' ', 1)[1] | [
"def",
"read_text",
"(",
"file",
")",
":",
"src_file",
"=",
"'-'",
".",
"join",
"(",
"file",
".",
"split",
"(",
"'-'",
")",
"[",
":",
"-",
"1",
"]",
")",
"+",
"'.trans.txt'",
"idx",
"=",
"file",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"with",
"open",
"(",
"src_file",
",",
"'r'",
")",
"as",
"fp",
":",
"for",
"line",
"in",
"fp",
":",
"if",
"idx",
"==",
"line",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
":",
"return",
"line",
"[",
":",
"-",
"1",
"]",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"1",
"]"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/corpus/librispeech.py#L15-L25 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | src/bert_embedding.py | python | generate_embedding | (bert_model, labels) | return embedding | Generate bert's embedding from fine-tuned model. | Generate bert's embedding from fine-tuned model. | [
"Generate",
"bert",
"s",
"embedding",
"from",
"fine",
"-",
"tuned",
"model",
"."
] | def generate_embedding(bert_model, labels):
"""Generate bert's embedding from fine-tuned model."""
batch_size, time = labels.shape
cls_ids = torch.full(
(batch_size, 1), bert_model.bert_text_encoder.cls_idx, dtype=labels.dtype, device=labels.device)
bert_labels = torch.cat([cls_ids, labels], 1)
# replace eos with sep
eos_idx = bert_model.bert_text_encoder.eos_idx
sep_idx = bert_model.bert_text_encoder.sep_idx
bert_labels[bert_labels == eos_idx] = sep_idx
embedding, _ = bert_model.bert(bert_labels, output_all_encoded_layers=True)
# sum over all layers embedding
embedding = torch.stack(embedding).sum(0)
# get rid of cls
embedding = embedding[:, 1:]
assert labels.shape == embedding.shape[:-1]
return embedding | [
"def",
"generate_embedding",
"(",
"bert_model",
",",
"labels",
")",
":",
"batch_size",
",",
"time",
"=",
"labels",
".",
"shape",
"cls_ids",
"=",
"torch",
".",
"full",
"(",
"(",
"batch_size",
",",
"1",
")",
",",
"bert_model",
".",
"bert_text_encoder",
".",
"cls_idx",
",",
"dtype",
"=",
"labels",
".",
"dtype",
",",
"device",
"=",
"labels",
".",
"device",
")",
"bert_labels",
"=",
"torch",
".",
"cat",
"(",
"[",
"cls_ids",
",",
"labels",
"]",
",",
"1",
")",
"# replace eos with sep",
"eos_idx",
"=",
"bert_model",
".",
"bert_text_encoder",
".",
"eos_idx",
"sep_idx",
"=",
"bert_model",
".",
"bert_text_encoder",
".",
"sep_idx",
"bert_labels",
"[",
"bert_labels",
"==",
"eos_idx",
"]",
"=",
"sep_idx",
"embedding",
",",
"_",
"=",
"bert_model",
".",
"bert",
"(",
"bert_labels",
",",
"output_all_encoded_layers",
"=",
"True",
")",
"# sum over all layers embedding",
"embedding",
"=",
"torch",
".",
"stack",
"(",
"embedding",
")",
".",
"sum",
"(",
"0",
")",
"# get rid of cls",
"embedding",
"=",
"embedding",
"[",
":",
",",
"1",
":",
"]",
"assert",
"labels",
".",
"shape",
"==",
"embedding",
".",
"shape",
"[",
":",
"-",
"1",
"]",
"return",
"embedding"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/src/bert_embedding.py#L38-L58 |
|
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | src/bert_embedding.py | python | load_fine_tuned_model | (bert_model, text_encoder, path) | return model | Load fine-tuned bert model given text encoder and checkpoint path. | Load fine-tuned bert model given text encoder and checkpoint path. | [
"Load",
"fine",
"-",
"tuned",
"bert",
"model",
"given",
"text",
"encoder",
"and",
"checkpoint",
"path",
"."
] | def load_fine_tuned_model(bert_model, text_encoder, path):
"""Load fine-tuned bert model given text encoder and checkpoint path."""
bert_text_encoder = BertLikeSentencePieceTextEncoder(text_encoder)
model = BertForMaskedLM.from_pretrained(bert_model)
model.bert_text_encoder = bert_text_encoder
model.bert.embeddings.word_embeddings = nn.Embedding(
bert_text_encoder.vocab_size, model.bert.embeddings.word_embeddings.weight.shape[1])
model.config.vocab_size = bert_text_encoder.vocab_size
model.cls = BertOnlyMLMHead(
model.config, model.bert.embeddings.word_embeddings.weight)
model.load_state_dict(torch.load(path))
return model | [
"def",
"load_fine_tuned_model",
"(",
"bert_model",
",",
"text_encoder",
",",
"path",
")",
":",
"bert_text_encoder",
"=",
"BertLikeSentencePieceTextEncoder",
"(",
"text_encoder",
")",
"model",
"=",
"BertForMaskedLM",
".",
"from_pretrained",
"(",
"bert_model",
")",
"model",
".",
"bert_text_encoder",
"=",
"bert_text_encoder",
"model",
".",
"bert",
".",
"embeddings",
".",
"word_embeddings",
"=",
"nn",
".",
"Embedding",
"(",
"bert_text_encoder",
".",
"vocab_size",
",",
"model",
".",
"bert",
".",
"embeddings",
".",
"word_embeddings",
".",
"weight",
".",
"shape",
"[",
"1",
"]",
")",
"model",
".",
"config",
".",
"vocab_size",
"=",
"bert_text_encoder",
".",
"vocab_size",
"model",
".",
"cls",
"=",
"BertOnlyMLMHead",
"(",
"model",
".",
"config",
",",
"model",
".",
"bert",
".",
"embeddings",
".",
"word_embeddings",
".",
"weight",
")",
"model",
".",
"load_state_dict",
"(",
"torch",
".",
"load",
"(",
"path",
")",
")",
"return",
"model"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/src/bert_embedding.py#L61-L75 |
|
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | src/solver.py | python | BaseSolver.backward | (self, loss) | return grad_norm | Standard backward step with self.timer and debugger
Arguments
loss - the loss to perform loss.backward() | Standard backward step with self.timer and debugger
Arguments
loss - the loss to perform loss.backward() | [
"Standard",
"backward",
"step",
"with",
"self",
".",
"timer",
"and",
"debugger",
"Arguments",
"loss",
"-",
"the",
"loss",
"to",
"perform",
"loss",
".",
"backward",
"()"
] | def backward(self, loss):
'''
Standard backward step with self.timer and debugger
Arguments
loss - the loss to perform loss.backward()
'''
self.timer.set()
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.GRAD_CLIP)
if math.isnan(grad_norm):
self.verbose('Error : grad norm is NaN @ step '+str(self.step))
else:
self.optimizer.step()
self.timer.cnt('bw')
return grad_norm | [
"def",
"backward",
"(",
"self",
",",
"loss",
")",
":",
"self",
".",
"timer",
".",
"set",
"(",
")",
"loss",
".",
"backward",
"(",
")",
"grad_norm",
"=",
"torch",
".",
"nn",
".",
"utils",
".",
"clip_grad_norm_",
"(",
"self",
".",
"model",
".",
"parameters",
"(",
")",
",",
"self",
".",
"GRAD_CLIP",
")",
"if",
"math",
".",
"isnan",
"(",
"grad_norm",
")",
":",
"self",
".",
"verbose",
"(",
"'Error : grad norm is NaN @ step '",
"+",
"str",
"(",
"self",
".",
"step",
")",
")",
"else",
":",
"self",
".",
"optimizer",
".",
"step",
"(",
")",
"self",
".",
"timer",
".",
"cnt",
"(",
"'bw'",
")",
"return",
"grad_norm"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/src/solver.py#L76-L91 |
|
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | src/solver.py | python | BaseSolver.load_ckpt | (self) | Load ckpt if --load option is specified | Load ckpt if --load option is specified | [
"Load",
"ckpt",
"if",
"--",
"load",
"option",
"is",
"specified"
] | def load_ckpt(self):
''' Load ckpt if --load option is specified '''
if self.paras.load:
# Load weights
ckpt = torch.load(
self.paras.load, map_location=self.device if self.mode == 'train' else 'cpu')
self.model.load_state_dict(ckpt['model'])
if self.emb_decoder is not None:
self.emb_decoder.load_state_dict(ckpt['emb_decoder'])
# if self.amp:
# amp.load_state_dict(ckpt['amp'])
# Load task-dependent items
metric = "None"
score = 0.0
for k, v in ckpt.items():
if type(v) is float:
metric, score = k, v
if self.mode == 'train':
self.step = ckpt['global_step']
self.optimizer.load_opt_state_dict(ckpt['optimizer'])
self.verbose('Load ckpt from {}, restarting at step {} (recorded {} = {:.2f} %)'.format(
self.paras.load, self.step, metric, score))
else:
self.model.eval()
if self.emb_decoder is not None:
self.emb_decoder.eval()
self.verbose('Evaluation target = {} (recorded {} = {:.2f} %)'.format(self.paras.load, metric, score)) | [
"def",
"load_ckpt",
"(",
"self",
")",
":",
"if",
"self",
".",
"paras",
".",
"load",
":",
"# Load weights",
"ckpt",
"=",
"torch",
".",
"load",
"(",
"self",
".",
"paras",
".",
"load",
",",
"map_location",
"=",
"self",
".",
"device",
"if",
"self",
".",
"mode",
"==",
"'train'",
"else",
"'cpu'",
")",
"self",
".",
"model",
".",
"load_state_dict",
"(",
"ckpt",
"[",
"'model'",
"]",
")",
"if",
"self",
".",
"emb_decoder",
"is",
"not",
"None",
":",
"self",
".",
"emb_decoder",
".",
"load_state_dict",
"(",
"ckpt",
"[",
"'emb_decoder'",
"]",
")",
"# if self.amp:",
"# amp.load_state_dict(ckpt['amp'])",
"# Load task-dependent items",
"metric",
"=",
"\"None\"",
"score",
"=",
"0.0",
"for",
"k",
",",
"v",
"in",
"ckpt",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"v",
")",
"is",
"float",
":",
"metric",
",",
"score",
"=",
"k",
",",
"v",
"if",
"self",
".",
"mode",
"==",
"'train'",
":",
"self",
".",
"step",
"=",
"ckpt",
"[",
"'global_step'",
"]",
"self",
".",
"optimizer",
".",
"load_opt_state_dict",
"(",
"ckpt",
"[",
"'optimizer'",
"]",
")",
"self",
".",
"verbose",
"(",
"'Load ckpt from {}, restarting at step {} (recorded {} = {:.2f} %)'",
".",
"format",
"(",
"self",
".",
"paras",
".",
"load",
",",
"self",
".",
"step",
",",
"metric",
",",
"score",
")",
")",
"else",
":",
"self",
".",
"model",
".",
"eval",
"(",
")",
"if",
"self",
".",
"emb_decoder",
"is",
"not",
"None",
":",
"self",
".",
"emb_decoder",
".",
"eval",
"(",
")",
"self",
".",
"verbose",
"(",
"'Evaluation target = {} (recorded {} = {:.2f} %)'",
".",
"format",
"(",
"self",
".",
"paras",
".",
"load",
",",
"metric",
",",
"score",
")",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/src/solver.py#L93-L119 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | src/solver.py | python | BaseSolver.verbose | (self, msg) | Verbose function for print information to stdout | Verbose function for print information to stdout | [
"Verbose",
"function",
"for",
"print",
"information",
"to",
"stdout"
] | def verbose(self, msg):
''' Verbose function for print information to stdout'''
if self.paras.verbose:
if type(msg) == list:
for m in msg:
print('[INFO]', m.ljust(100))
else:
print('[INFO]', msg.ljust(100)) | [
"def",
"verbose",
"(",
"self",
",",
"msg",
")",
":",
"if",
"self",
".",
"paras",
".",
"verbose",
":",
"if",
"type",
"(",
"msg",
")",
"==",
"list",
":",
"for",
"m",
"in",
"msg",
":",
"print",
"(",
"'[INFO]'",
",",
"m",
".",
"ljust",
"(",
"100",
")",
")",
"else",
":",
"print",
"(",
"'[INFO]'",
",",
"msg",
".",
"ljust",
"(",
"100",
")",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/src/solver.py#L121-L128 |
||
Alexander-H-Liu/End-to-end-ASR-Pytorch | 1103d144423e8e692f1d18cd9db27a96cb49fb9d | src/solver.py | python | BaseSolver.progress | (self, msg) | Verbose function for updating progress on stdout (do not include newline) | Verbose function for updating progress on stdout (do not include newline) | [
"Verbose",
"function",
"for",
"updating",
"progress",
"on",
"stdout",
"(",
"do",
"not",
"include",
"newline",
")"
] | def progress(self, msg):
''' Verbose function for updating progress on stdout (do not include newline) '''
if self.paras.verbose:
sys.stdout.write("\033[K") # Clear line
print('[{}] {}'.format(human_format(self.step), msg), end='\r') | [
"def",
"progress",
"(",
"self",
",",
"msg",
")",
":",
"if",
"self",
".",
"paras",
".",
"verbose",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\033[K\"",
")",
"# Clear line",
"print",
"(",
"'[{}] {}'",
".",
"format",
"(",
"human_format",
"(",
"self",
".",
"step",
")",
",",
"msg",
")",
",",
"end",
"=",
"'\\r'",
")"
] | https://github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch/blob/1103d144423e8e692f1d18cd9db27a96cb49fb9d/src/solver.py#L130-L134 |