zip
stringlengths 19
109
| filename
stringlengths 4
185
| contents
stringlengths 0
30.1M
| type_annotations
sequencelengths 0
1.97k
| type_annotation_starts
sequencelengths 0
1.97k
| type_annotation_ends
sequencelengths 0
1.97k
|
---|---|---|---|---|---|
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_average.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
import sockeye.average as average
@pytest.mark.parametrize(
"test_points, expected_top_n, size, maximize", [
([(1.1, 3), (2.2, 2), (3.3, 1)], [(3.3, 1), (2.2, 2), (1.1, 3)], 3, True),
([(1.1, 3), (2.2, 2), (3.3, 1)], [(1.1, 3), (2.2, 2), (3.3, 1)], 3, False),
([(1.1, 4), (2.2, 3), (3.3, 2), (4.4, 1)], [(4.4, 1), (3.3, 2), (2.2, 3)], 3, True),
([(1.1, 4), (2.2, 3), (3.3, 2), (4.4, 1)], [(4.4, 1), (3.3, 2), (2.2, 3), (1.1, 4)], 5, True)
])
def test_strategy_best(test_points, expected_top_n, size, maximize):
result = average._strategy_best(test_points, size, maximize)
assert result == expected_top_n
@pytest.mark.parametrize(
"test_points, expected_top_n, size, maximize", [
([(1.1, 3), (2.2, 2), (3.3, 1)], [(1.1, 3), (2.2, 2), (3.3, 1)], 3, True),
([(1.1, 3), (2.2, 2), (3.3, 1)], [(1.1, 3)], 3, False),
([(1.1, 4), (2.2, 3), (3.3, 2), (4.4, 1)], [(2.2, 3), (3.3, 2), (4.4, 1)], 3, True),
([(2.2, 4), (1.1, 3), (3.3, 2), (4.4, 1)], [(2.2, 4), (1.1, 3)], 3, False),
([(2.2, 4), (1.1, 3), (3.3, 2), (4.4, 1)], [(1.1, 3)], 1, False),
([(1.1, 4), (2.2, 3), (3.3, 2), (4.4, 1)], [(1.1, 4), (2.2, 3), (3.3, 2), (4.4, 1)], 5, True)
])
def test_strategy_last(test_points, expected_top_n, size, maximize):
result = average._strategy_last(test_points, size, maximize)
assert result == expected_top_n
@pytest.mark.parametrize(
"test_points, expected_top_n, size, maximize", [
([(1.1, 3), (2.2, 2), (3.3, 1)], [[0, 3.3, 1], [0, 2.2, 2], [0, 1.1, 3]], 3, True),
([(1.1, 4), (2.2, 3), (3.3, 2), (4.4, 1)], [[0, 4.4, 1], [0, 3.3, 2], [0, 2.2, 3]], 3, True),
([(3.3, 3), (2.2, 2), (1.1, 1)], [[2, 3.3, 3], [0, 2.2, 2], [0, 1.1, 1]], 3, True),
([(3.3, 3), (2.2, 2), (1.1, 1)], [[0, 1.1, 1], [0, 2.2, 2], [0, 3.3, 3]], 3, False),
([(2.2, 4), (1.1, 3), (3.3, 2), (4.4, 1)], [[1, 2.2, 4], [0, 4.4, 1], [0, 3.3, 2]], 3, True),
([(2.2, 4), (1.1, 3), (3.3, 2), (4.4, 1)], [[2, 1.1, 3]], 1, False),
([(1.1, 4), (2.2, 3), (3.3, 2), (4.4, 1)], [[3, 1.1, 4], [0, 2.2, 3], [0, 3.3, 2], [0, 4.4, 1]], 5, False)
])
def test_strategy_lifespan(test_points, expected_top_n, size, maximize):
result = average._strategy_lifespan(test_points, size, maximize)
assert result == expected_top_n
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_bleu.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from collections import namedtuple
import pytest
from contrib import sacrebleu
EPSILON = 1e-8
Statistics = namedtuple('Statistics', ['common', 'total'])
test_cases = [(["this is a test", "another test"], ["ref1", "ref2"], 0.003799178428257963),
(["this is a test"], ["this is a test"], 1.0),
(["this is a fest"], ["this is a test"], 0.223606797749979)]
test_case_offset = [("am I am a character sequence", "I am a symbol string sequence a a", 0.1555722182, 0)]
# statistic structure:
# - common counts
# - total counts
# - hyp_count
# - ref_count
test_case_statistics = [("am I am a character sequence", "I am a symbol string sequence a a",
Statistics([4, 2, 1, 0], [6, 5, 4, 3]))]
test_case_scoring = [((Statistics([9, 7, 5, 3], [10, 8, 6, 4]), 11, 11), 0.8375922397)]
test_case_effective_order = [(["test"], ["a test"], 0.3678794411714425),
(["a test"], ["a test"], 1.0),
(["a little test"], ["a test"], 0.03218297948685433)]
# testing that right score is returned for null statistics and different offsets
# format: stat, offset, expected score
test_case_degenerate_stats = [((Statistics([0, 0, 0, 0], [4, 4, 2, 1]), 0, 1), 0.0, 0.0),
((Statistics([0, 0, 0, 0], [10, 11, 12, 0]), 14, 10), 0.0, 0.0),
((Statistics([0, 0, 0, 0], [0, 0, 0, 0]), 0, 0), 0.0, 0.0),
((Statistics([6, 5, 4, 0], [6, 5, 4, 3]), 6, 6), 0.0, 0.0),
((Statistics([0, 0, 0, 0], [0, 0, 0, 0]), 0, 0), 0.1, 0.0),
((Statistics([0, 0, 0, 0], [0, 0, 0, 0]), 1, 5), 0.01, 0.0)]
test_cases_uneven = [(["I am one sentence"], ["But I", "am two"]),
(["And I", "am a number of sentences", "three actually"], ["Compared to just one reference"])]
@pytest.mark.parametrize("hypotheses, references, expected_bleu", test_cases)
def test_bleu(hypotheses, references, expected_bleu):
bleu = sacrebleu.raw_corpus_bleu(hypotheses, [references], .01).score / 100
assert abs(bleu - expected_bleu) < EPSILON
@pytest.mark.parametrize("hypotheses, references, expected_bleu", test_case_effective_order)
def test_effective_order(hypotheses, references, expected_bleu):
bleu = sacrebleu.raw_corpus_bleu(hypotheses, [references], .01).score / 100
assert abs(bleu - expected_bleu) < EPSILON
@pytest.mark.parametrize("hypothesis, reference, expected_stat", test_case_statistics)
def test_statistics(hypothesis, reference, expected_stat):
result = sacrebleu.raw_corpus_bleu(hypothesis, reference, .01)
stat = Statistics(result.counts, result.totals)
assert stat == expected_stat
@pytest.mark.parametrize("statistics, expected_score", test_case_scoring)
def test_scoring(statistics, expected_score):
score = sacrebleu.compute_bleu(statistics[0].common, statistics[0].total, statistics[1], statistics[2]).score / 100
assert abs(score - expected_score) < EPSILON
@pytest.mark.parametrize("hypothesis, reference, expected_with_offset, expected_without_offset",
test_case_offset)
def test_offset(hypothesis, reference, expected_with_offset, expected_without_offset):
score_without_offset = sacrebleu.raw_corpus_bleu(hypothesis, reference, 0.0).score / 100
assert abs(expected_without_offset - score_without_offset) < EPSILON
score_with_offset = sacrebleu.raw_corpus_bleu(hypothesis, reference, 0.1).score / 100
assert abs(expected_with_offset - score_with_offset) < EPSILON
@pytest.mark.parametrize("statistics, offset, expected_score", test_case_degenerate_stats)
def test_degenerate_statistics(statistics, offset, expected_score):
score = sacrebleu.compute_bleu(statistics[0].common, statistics[0].total, statistics[1], statistics[2],
smooth='floor', smooth_floor=offset).score / 100
assert score == expected_score
@pytest.mark.parametrize("hypotheses, references", test_cases_uneven)
def test_degenerate_uneven(hypotheses, references):
with pytest.raises(EOFError, match=r'.*stream.*'):
sacrebleu.raw_corpus_bleu(hypotheses, references)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_callback.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Tests sockeye.callback.TrainingMonitor optimization logic
"""
import os
import tempfile
import numpy as np
import pytest
from sockeye import callback
from sockeye import constants as C
from sockeye import utils
test_constants = [('perplexity', np.inf,
[{'perplexity': 100.0, '_': 42}, {'perplexity': 50.0}, {'perplexity': 60.0}, {'perplexity': 80.0}],
[{'perplexity': 200.0}, {'perplexity': 100.0}, {'perplexity': 100.001}, {'perplexity': 99.99}],
[True, True, False, True]),
('accuracy', 0.0,
[{'accuracy': 100.0}, {'accuracy': 50.0}, {'accuracy': 60.0}, {'accuracy': 80.0}],
[{'accuracy': 200.0}, {'accuracy': 100.0}, {'accuracy': 100.001}, {'accuracy': 99.99}],
[True, False, False, False])]
class DummyMetric:
def __init__(self, metric_dict):
self.metric_dict = metric_dict
def get_name_value(self):
for metric_name, value in self.metric_dict.items():
yield metric_name, value
@pytest.mark.parametrize("optimized_metric, initial_best, train_metrics, eval_metrics, improved_seq",
test_constants)
def test_callback(optimized_metric, initial_best, train_metrics, eval_metrics, improved_seq):
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 32
monitor = callback.TrainingMonitor(batch_size=batch_size,
output_folder=tmpdir,
optimized_metric=optimized_metric)
assert monitor.optimized_metric == optimized_metric
assert monitor.get_best_validation_score() == initial_best
metrics_fname = os.path.join(tmpdir, C.METRICS_NAME)
for checkpoint, (train_metric, eval_metric, expected_improved) in enumerate(
zip(train_metrics, eval_metrics, improved_seq), 1):
monitor.checkpoint_callback(checkpoint, train_metric)
assert len(monitor.metrics) == checkpoint
assert monitor.metrics[-1] == {k + "-train": v for k, v in train_metric.items()}
improved, best_checkpoint = monitor.eval_end_callback(checkpoint, DummyMetric(eval_metric))
assert {k + "-val" for k in eval_metric.keys()} <= monitor.metrics[-1].keys()
assert improved == expected_improved
assert os.path.exists(metrics_fname)
metrics = utils.read_metrics_file(metrics_fname)
_compare_metrics(metrics, monitor.metrics)
def _compare_metrics(a, b):
assert len(a) == len(b)
for x, y in zip(a, b):
assert len(x.items()) == len(y.items())
for (xk, xv), (yk, yv) in zip(sorted(x.items()), sorted(y.items())):
assert xk == yk
assert pytest.approx(xv, yv)
def test_bleu_requires_checkpoint_decoder():
with pytest.raises(utils.SockeyeError) as e, tempfile.TemporaryDirectory() as tmpdir:
callback.TrainingMonitor(batch_size=1,
output_folder=tmpdir,
optimized_metric='bleu',
cp_decoder=None)
assert "bleu requires CheckpointDecoder" == str(e.value)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_chrf.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
import numpy as np
import sockeye.chrf as chrf
@pytest.mark.parametrize("hypothesis, reference, expected_chrf",
[("a b c", "a b c", 1.0),
("a b c", "abc", 1.0),
("", "c", 0.0)])
def test_sentence_chrf(hypothesis, reference, expected_chrf):
value = chrf.sentence_chrf(hypothesis, reference)
assert np.isclose(value, expected_chrf)
@pytest.mark.parametrize("hypotheses, references, expected_chrf",
[(["a b c"], ["a b c"], 1.0),
(["a b c"], ["abc"], 1.0),
([""], ["c"], 0.0),
(["a", "b"], ["a", "c"], 0.5)])
def test_corpus_chrf(hypotheses, references, expected_chrf):
value = chrf.corpus_chrf(hypotheses, references)
assert np.isclose(value, expected_chrf) | [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_config.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import tempfile
import os
import pytest
from sockeye import config
class ConfigTest(config.Config):
yaml_tag = "!ConfigTest"
def __init__(self, param, config=None):
super().__init__()
self.param = param
self.config = config
def test_base_freeze():
c = config.Config()
c.param = 1
assert c.param == 1
c.freeze()
with pytest.raises(AttributeError) as e:
c.param = 2
assert str(e.value) == "Cannot set 'param' in frozen config"
def test_freeze():
c1 = ConfigTest(param=1)
c2 = ConfigTest(param=3)
c1.param = 2
assert c1.param == 2
c1.config = c2
assert c2 == c1.config
c1.config.param = 2
assert c1.config.param == 2
c1.freeze()
assert c1.config._frozen # pylint: disable= no-member
assert c2._frozen # pylint: disable= no-member
with pytest.raises(AttributeError) as e:
c1.param = 3
assert str(e.value) == "Cannot set 'param' in frozen config"
with pytest.raises(AttributeError) as e:
c1.config.param = 3
assert str(e.value) == "Cannot set 'param' in frozen config"
def test_config_repr():
c1 = ConfigTest(param=1, config=ConfigTest(param=3))
c1.config.freeze()
assert str(c1) == "Config[_frozen=False, config=Config[_frozen=True, config=None, param=3], param=1]"
def test_eq():
basic_c = config.Config()
c1 = ConfigTest(param=1)
c1_other = ConfigTest(param=1)
c2 = ConfigTest(param=2)
c_nested = ConfigTest(param=1, config=c1)
c_nested_other = ConfigTest(param=1, config=c1_other)
c_nested_c2 = ConfigTest(param=1, config=c2)
assert c1 != "OTHER_TYPE"
assert c1 != basic_c
assert c1 == c1_other
assert c1 != c2
assert c_nested == c_nested_other
assert c_nested != c_nested_c2
def test_no_self_attribute():
c1 = ConfigTest(param=1)
with pytest.raises(AttributeError) as e:
c1.config = c1
assert str(e.value) == "Cannot set self as attribute"
def test_serialization():
c1 = ConfigTest(param=1, config=ConfigTest(param=2))
expected_serialization = """!ConfigTest
config: !ConfigTest
config: null
param: 2
param: 1
"""
with tempfile.TemporaryDirectory() as tmp_dir:
fname = os.path.join(tmp_dir, "config")
c1.freeze()
c1.save(fname)
assert os.path.exists(fname)
with open(fname) as f:
assert f.read() == expected_serialization
c2 = config.Config.load(fname)
assert c2.param == c1.param
assert c2.config.param == c1.config.param
assert not c2._frozen
def test_copy():
c1 = ConfigTest(param=1)
copy_c1 = c1.copy()
# should be a different object that is equal to the original object
assert c1 is not copy_c1
assert c1 == copy_c1
# optionally you can modify attributes when copying:
mod_c1 = ConfigTest(param=5)
mod_copy_c1 = c1.copy(param=5)
assert mod_c1 is not mod_copy_c1
assert mod_c1 == mod_copy_c1
assert c1 != mod_copy_c1
class ConfigWithMissingAttributes(config.Config):
def __init__(self, existing_attribute, new_attribute="new_attribute"):
super().__init__()
self.existing_attribute = existing_attribute
self.new_attribute = new_attribute
def test_missing_attributes_filled_with_default():
# when we load a configuration object that does not contain all attributes as the current version of the
# configuration object we expect the missing attributes to be filled with the default values taken from the
# __init__ method.
config_obj = config.Config.load("test/data/config_with_missing_attributes.yaml")
assert config_obj.new_attribute == "new_attribute"
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_coverage.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from unittest.mock import patch
import mxnet as mx
import numpy as np
import pytest
import sockeye.coverage
from test.common import gaussian_vector, integer_vector, uniform_vector
activation_types = ["tanh", "sigmoid", "relu", "softrelu"]
def setup_module():
# Store a reference to the original MXNet sequence mask function.
_mask_with_one.original_sequence_mask = mx.sym.SequenceMask
@pytest.mark.parametrize("act_type", activation_types)
def test_activation_coverage(act_type):
# Before running our test we patch MXNet's sequence mask function with a custom implementation. Our custom function
# will call the built in masking operation, but ensure the masking value is the number one. This masking value
# allows for clear test assertions.
_patch_sequence_mask(lambda: _test_activation_coverage(act_type))
def test_gru_coverage():
# Before running our test we patch MXNet's sequence mask function with a custom implementation. Our custom function
# will call the built in masking operation, but ensure the masking value is the number one. This masking value
# allows for clear test assertions.
_patch_sequence_mask(lambda: _test_gru_coverage())
def _test_activation_coverage(act_type):
config_coverage = sockeye.coverage.CoverageConfig(type=act_type, num_hidden=2, layer_normalization=False)
encoder_num_hidden, decoder_num_hidden, source_seq_len, batch_size = 5, 5, 10, 4
# source: (batch_size, source_seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
# source_length: (batch_size,)
source_length = mx.sym.Variable("source_length")
# prev_hidden: (batch_size, decoder_num_hidden)
prev_hidden = mx.sym.Variable("prev_hidden")
# prev_coverage: (batch_size, source_seq_len, coverage_num_hidden)
prev_coverage = mx.sym.Variable("prev_coverage")
# attention_scores: (batch_size, source_seq_len)
attention_scores = mx.sym.Variable("attention_scores")
source_shape = (batch_size, source_seq_len, encoder_num_hidden)
source_length_shape = (batch_size,)
prev_hidden_shape = (batch_size, decoder_num_hidden)
attention_scores_shape = (batch_size, source_seq_len)
prev_coverage_shape = (batch_size, source_seq_len, config_coverage.num_hidden)
source_data = gaussian_vector(shape=source_shape)
source_length_data = integer_vector(shape=source_length_shape, max_value=source_seq_len)
prev_hidden_data = gaussian_vector(shape=prev_hidden_shape)
prev_coverage_data = gaussian_vector(shape=prev_coverage_shape)
attention_scores_data = uniform_vector(shape=attention_scores_shape)
attention_scores_data = attention_scores_data / np.sum(attention_scores_data)
coverage = sockeye.coverage.get_coverage(config_coverage)
coverage_func = coverage.on(source, source_length, source_seq_len)
updated_coverage = coverage_func(prev_hidden, attention_scores, prev_coverage)
executor = updated_coverage.simple_bind(ctx=mx.cpu(),
source=source_shape,
source_length=source_length_shape,
prev_hidden=prev_hidden_shape,
prev_coverage=prev_coverage_shape,
attention_scores=attention_scores_shape)
executor.arg_dict["source"][:] = source_data
executor.arg_dict["source_length"][:] = source_length_data
executor.arg_dict["prev_hidden"][:] = prev_hidden_data
executor.arg_dict["prev_coverage"][:] = prev_coverage_data
executor.arg_dict["attention_scores"][:] = attention_scores_data
result = executor.forward()
# this is needed to modulate the 0 input. The output changes according to the activation type used.
activation = mx.sym.Activation(name="activation", act_type=act_type)
modulated = activation.eval(ctx=mx.cpu(), activation_data=mx.nd.zeros((1,1)))[0].asnumpy()
new_coverage = result[0].asnumpy()
assert new_coverage.shape == prev_coverage_shape
assert (np.sum(np.sum(new_coverage == modulated, axis=2) != 0, axis=1) == source_length_data).all()
def _test_gru_coverage():
config_coverage = sockeye.coverage.CoverageConfig(type="gru", num_hidden=2, layer_normalization=False)
encoder_num_hidden, decoder_num_hidden, source_seq_len, batch_size = 5, 5, 10, 4
# source: (batch_size, source_seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
# source_length: (batch_size,)
source_length = mx.sym.Variable("source_length")
# prev_hidden: (batch_size, decoder_num_hidden)
prev_hidden = mx.sym.Variable("prev_hidden")
# prev_coverage: (batch_size, source_seq_len, coverage_num_hidden)
prev_coverage = mx.sym.Variable("prev_coverage")
# attention_scores: (batch_size, source_seq_len)
attention_scores = mx.sym.Variable("attention_scores")
source_shape = (batch_size, source_seq_len, encoder_num_hidden)
source_length_shape = (batch_size,)
prev_hidden_shape = (batch_size, decoder_num_hidden)
attention_scores_shape = (batch_size, source_seq_len)
prev_coverage_shape = (batch_size, source_seq_len, config_coverage.num_hidden)
source_data = gaussian_vector(shape=source_shape)
source_length_data = integer_vector(shape=source_length_shape, max_value=source_seq_len)
prev_hidden_data = gaussian_vector(shape=prev_hidden_shape)
prev_coverage_data = gaussian_vector(shape=prev_coverage_shape)
attention_scores_data = uniform_vector(shape=attention_scores_shape)
attention_scores_data = attention_scores_data / np.sum(attention_scores_data)
coverage = sockeye.coverage.get_coverage(config_coverage)
coverage_func = coverage.on(source, source_length, source_seq_len)
updated_coverage = coverage_func(prev_hidden, attention_scores, prev_coverage)
executor = updated_coverage.simple_bind(ctx=mx.cpu(),
source=source_shape,
source_length=source_length_shape,
prev_hidden=prev_hidden_shape,
prev_coverage=prev_coverage_shape,
attention_scores=attention_scores_shape)
executor.arg_dict["source"][:] = source_data
executor.arg_dict["source_length"][:] = source_length_data
executor.arg_dict["prev_hidden"][:] = prev_hidden_data
executor.arg_dict["prev_coverage"][:] = prev_coverage_data
executor.arg_dict["attention_scores"][:] = attention_scores_data
result = executor.forward()
new_coverage = result[0].asnumpy()
assert new_coverage.shape == prev_coverage_shape
assert (np.sum(np.sum(new_coverage != 1, axis=2) != 0, axis=1) == source_length_data).all()
def _mask_with_one(data, use_sequence_length, sequence_length):
return _mask_with_one.original_sequence_mask(data=data, use_sequence_length=use_sequence_length,
sequence_length=sequence_length, value=1)
def _patch_sequence_mask(test):
# Wrap mx.sym to make it easily patchable. All un-patched methods will fall-back to their default implementation.
with patch.object(mx, 'sym', wraps=mx.sym) as mxnet_mock:
# Patch Sequence Mask to use ones for padding.
mxnet_mock.SequenceMask = _mask_with_one
test()
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_data_io.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import random
from tempfile import TemporaryDirectory
from typing import Optional, List, Tuple
import mxnet as mx
import numpy as np
import pytest
from sockeye import constants as C
from sockeye import data_io
from sockeye import vocab
from sockeye.utils import SockeyeError, get_tokens, seedRNGs
from test.common import tmp_digits_dataset
seedRNGs(12)
define_bucket_tests = [(50, 10, [10, 20, 30, 40, 50]),
(50, 20, [20, 40, 50]),
(50, 50, [50]),
(5, 10, [5]),
(11, 5, [5, 10, 11]),
(19, 10, [10, 19])]
@pytest.mark.parametrize("max_seq_len, step, expected_buckets", define_bucket_tests)
def test_define_buckets(max_seq_len, step, expected_buckets):
buckets = data_io.define_buckets(max_seq_len, step=step)
assert buckets == expected_buckets
define_parallel_bucket_tests = [(50, 50, 10, 1.0, [(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)]),
(50, 50, 10, 0.5,
[(10, 5), (20, 10), (30, 15), (40, 20), (50, 25), (50, 30), (50, 35), (50, 40),
(50, 45), (50, 50)]),
(10, 10, 10, 0.1,
[(10, 2), (10, 3), (10, 4), (10, 5), (10, 6), (10, 7), (10, 8), (10, 9), (10, 10)]),
(10, 5, 10, 0.01, [(10, 2), (10, 3), (10, 4), (10, 5)]),
(50, 50, 10, 2.0,
[(5, 10), (10, 20), (15, 30), (20, 40), (25, 50), (30, 50), (35, 50), (40, 50),
(45, 50), (50, 50)]),
(5, 10, 10, 10.0, [(2, 10), (3, 10), (4, 10), (5, 10)]),
(5, 10, 10, 11.0, [(2, 10), (3, 10), (4, 10), (5, 10)]),
(50, 50, 50, 0.5, [(50, 25), (50, 50)]),
(50, 50, 50, 1.5, [(33, 50), (50, 50)]),
(75, 75, 50, 1.5, [(33, 50), (66, 75), (75, 75)])]
@pytest.mark.parametrize("max_seq_len_source, max_seq_len_target, bucket_width, length_ratio, expected_buckets",
define_parallel_bucket_tests)
def test_define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width, length_ratio, expected_buckets):
buckets = data_io.define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width=bucket_width,
length_ratio=length_ratio)
assert buckets == expected_buckets
get_bucket_tests = [([10, 20, 30, 40, 50], 50, 50),
([10, 20, 30, 40, 50], 11, 20),
([10, 20, 30, 40, 50], 9, 10),
([10, 20, 30, 40, 50], 51, None),
([10, 20, 30, 40, 50], 1, 10),
([10, 20, 30, 40, 50], 0, 10),
([], 50, None)]
@pytest.mark.parametrize("buckets, length, expected_bucket",
get_bucket_tests)
def test_get_bucket(buckets, length, expected_bucket):
bucket = data_io.get_bucket(length, buckets)
assert bucket == expected_bucket
tokens2ids_tests = [(["a", "b", "c"], {"a": 1, "b": 0, "c": 300, C.UNK_SYMBOL: 12}, [1, 0, 300]),
(["a", "x", "c"], {"a": 1, "b": 0, "c": 300, C.UNK_SYMBOL: 12}, [1, 12, 300])]
@pytest.mark.parametrize("tokens, vocab, expected_ids", tokens2ids_tests)
def test_tokens2ids(tokens, vocab, expected_ids):
ids = data_io.tokens2ids(tokens, vocab)
assert ids == expected_ids
@pytest.mark.parametrize("tokens, expected_ids", [(["1", "2", "3", "0"], [1, 2, 3, 0]), ([], [])])
def test_strids2ids(tokens, expected_ids):
ids = data_io.strids2ids(tokens)
assert ids == expected_ids
@pytest.mark.parametrize("ids, expected_string", [([1, 2, 3, 0], "1 2 3 0"), ([], "")])
def test_ids2strids(ids, expected_string):
string = data_io.ids2strids(ids)
assert string == expected_string
sequence_reader_tests = [(["1 2 3", "2", "14", "2 2 2"], False, False),
(["a b c", "c"], True, False),
(["a b c", "c"], True, True)]
@pytest.mark.parametrize("sequences, use_vocab, add_bos", sequence_reader_tests)
def test_sequence_reader(sequences, use_vocab, add_bos):
with TemporaryDirectory() as work_dir:
path = os.path.join(work_dir, 'input')
with open(path, 'w') as f:
for sequence in sequences:
f.write(sequence + "\n")
vocabulary = vocab.build_vocab(sequences) if use_vocab else None
reader = data_io.SequenceReader(path, vocab=vocabulary, add_bos=add_bos)
read_sequences = [s for s in reader]
assert reader.is_done()
assert len(read_sequences) == reader.count
if vocabulary is None:
with pytest.raises(SockeyeError) as e:
_ = data_io.SequenceReader(path, vocab=vocabulary, add_bos=True)
assert str(e.value) == "Adding a BOS symbol requires a vocabulary"
expected_sequences = [data_io.strids2ids(get_tokens(s)) for s in sequences]
assert read_sequences == expected_sequences
else:
expected_sequences = [data_io.tokens2ids(get_tokens(s), vocabulary) for s in sequences]
if add_bos:
expected_sequences = [[vocabulary[C.BOS_SYMBOL]] + s for s in expected_sequences]
assert read_sequences == expected_sequences
# check raise for multiple concurrent iters
_ = iter(reader)
with pytest.raises(SockeyeError) as e:
iter(reader)
assert str(e.value) == "Can not iterate multiple times simultaneously."
def test_sample_based_define_bucket_batch_sizes():
batch_by_words = False
batch_size = 32
max_seq_len = 100
buckets = data_io.define_parallel_buckets(max_seq_len, max_seq_len, 10, 1.5)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets=buckets,
batch_size=batch_size,
batch_by_words=batch_by_words,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
for bbs in bucket_batch_sizes:
assert bbs.batch_size == batch_size
assert bbs.average_words_per_batch == bbs.bucket[1] * batch_size
def test_word_based_define_bucket_batch_sizes():
batch_by_words = True
batch_num_devices = 1
batch_size = 200
max_seq_len = 100
buckets = data_io.define_parallel_buckets(max_seq_len, max_seq_len, 10, 1.5)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets=buckets,
batch_size=batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
data_target_average_len=[None] * len(buckets))
# last bucket batch size is different
for bbs in bucket_batch_sizes[:-1]:
expected_batch_size = round((batch_size / bbs.bucket[1]) / batch_num_devices)
assert bbs.batch_size == expected_batch_size
expected_average_words_per_batch = expected_batch_size * bbs.bucket[1]
assert bbs.average_words_per_batch == expected_average_words_per_batch
def _get_random_bucketed_data(buckets: List[Tuple[int, int]],
min_count: int,
max_count: int,
bucket_counts: Optional[List[Optional[int]]] = None):
"""
Get random bucket data.
:param buckets: The list of buckets.
:param min_count: The minimum number of samples that will be sampled if no exact count is given.
:param max_count: The maximum number of samples that will be sampled if no exact count is given.
:param bucket_counts: For each bucket an optional exact example count can be given. If it is not given it will be
sampled.
:return: The random source, target and label arrays.
"""
if bucket_counts is None:
bucket_counts = [None for _ in buckets]
bucket_counts = [random.randint(min_count, max_count) if given_count is None else given_count
for given_count in bucket_counts]
source = [mx.nd.array(np.random.randint(0, 10, (count, random.randint(1, bucket[0])))) for count, bucket in
zip(bucket_counts, buckets)]
target = [mx.nd.array(np.random.randint(0, 10, (count, random.randint(1, bucket[1])))) for count, bucket in
zip(bucket_counts, buckets)]
label = target
return source, target, label
def test_parallel_data_set():
buckets = data_io.define_parallel_buckets(100, 100, 10, 1.0)
source, target, label = _get_random_bucketed_data(buckets, min_count=0, max_count=5)
def check_equal(arrays1, arrays2):
assert len(arrays1) == len(arrays2)
for a1, a2 in zip(arrays1, arrays2):
assert np.array_equal(a1.asnumpy(), a2.asnumpy())
with TemporaryDirectory() as work_dir:
dataset = data_io.ParallelDataSet(source, target, label)
fname = os.path.join(work_dir, 'dataset')
dataset.save(fname)
dataset_loaded = data_io.ParallelDataSet.load(fname)
check_equal(dataset.source, dataset_loaded.source)
check_equal(dataset.target, dataset_loaded.target)
check_equal(dataset.label, dataset_loaded.label)
def test_parallel_data_set_fill_up():
batch_size = 32
buckets = data_io.define_parallel_buckets(100, 100, 10, 1.0)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words=False,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=1, max_count=5))
dataset_filled_up = dataset.fill_up(bucket_batch_sizes, 'replicate')
assert len(dataset_filled_up.source) == len(dataset.source)
assert len(dataset_filled_up.target) == len(dataset.target)
assert len(dataset_filled_up.label) == len(dataset.label)
for bidx in range(len(dataset)):
bucket_batch_size = bucket_batch_sizes[bidx].batch_size
assert dataset_filled_up.source[bidx].shape[0] == bucket_batch_size
assert dataset_filled_up.target[bidx].shape[0] == bucket_batch_size
assert dataset_filled_up.label[bidx].shape[0] == bucket_batch_size
def test_get_permutations():
data = [list(range(3)), list(range(1)), list(range(7)), []]
bucket_counts = [len(d) for d in data]
permutation, inverse_permutation = data_io.get_permutations(bucket_counts)
assert len(permutation) == len(inverse_permutation) == len(bucket_counts) == len(data)
for d, p, pi in zip(data, permutation, inverse_permutation):
p = p.asnumpy().astype(np.int)
pi = pi.asnumpy().astype(np.int)
p_set = set(p)
pi_set = set(pi)
assert len(p_set) == len(p)
assert len(pi_set) == len(pi)
assert p_set - pi_set == set()
if d:
d = np.array(d)
assert (d[p][pi] == d).all()
else:
assert len(p_set) == 1
def test_parallel_data_set_permute():
batch_size = 5
buckets = data_io.define_parallel_buckets(100, 100, 10, 1.0)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words=False,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5)).fill_up(
bucket_batch_sizes, 'replicate')
permutations, inverse_permutations = data_io.get_permutations(dataset.get_bucket_counts())
assert len(permutations) == len(inverse_permutations) == len(dataset)
dataset_restored = dataset.permute(permutations).permute(inverse_permutations)
assert len(dataset) == len(dataset_restored)
for buck_idx in range(len(dataset)):
num_samples = dataset.source[buck_idx].shape[0]
if num_samples:
assert (dataset.source[buck_idx] == dataset_restored.source[buck_idx]).asnumpy().all()
assert (dataset.target[buck_idx] == dataset_restored.target[buck_idx]).asnumpy().all()
assert (dataset.label[buck_idx] == dataset_restored.label[buck_idx]).asnumpy().all()
else:
assert not dataset_restored.source[buck_idx]
assert not dataset_restored.target[buck_idx]
assert not dataset_restored.label[buck_idx]
def test_get_batch_indices():
max_bucket_size = 50
batch_size = 10
buckets = data_io.define_parallel_buckets(100, 100, 10, 1.0)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words=False,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets=buckets,
min_count=1,
max_count=max_bucket_size))
indices = data_io.get_batch_indices(dataset, bucket_batch_sizes=bucket_batch_sizes)
# check for valid indices
for buck_idx, start_pos in indices:
assert 0 <= buck_idx < len(dataset)
assert 0 <= start_pos < len(dataset.source[buck_idx]) - batch_size + 1
# check that all indices are used for a filled-up dataset
dataset = dataset.fill_up(bucket_batch_sizes, fill_up='replicate')
indices = data_io.get_batch_indices(dataset, bucket_batch_sizes=bucket_batch_sizes)
all_bucket_indices = set(list(range(len(dataset))))
computed_bucket_indices = set([i for i, j in indices])
assert not all_bucket_indices - computed_bucket_indices
@pytest.mark.parametrize("buckets, expected_default_bucket_key",
[([(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)], (50, 50)),
([(5, 10), (10, 20), (15, 30), (25, 50), (20, 40)], (25, 50))])
def test_get_default_bucket_key(buckets, expected_default_bucket_key):
default_bucket_key = data_io.get_default_bucket_key(buckets)
assert default_bucket_key == expected_default_bucket_key
get_parallel_bucket_tests = [([(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)], 50, 50, 4, (50, 50)),
([(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)], 50, 10, 4, (50, 50)),
([(10, 10), (20, 20), (30, 30), (40, 40), (50, 50)], 20, 10, 1, (20, 20)),
([(10, 10)], 20, 10, None, None),
([], 20, 10, None, None),
([(10, 11)], 11, 10, None, None),
([(11, 10)], 11, 10, 0, (11, 10))]
@pytest.mark.parametrize("buckets, source_length, target_length, expected_bucket_index, expected_bucket",
get_parallel_bucket_tests)
def test_get_parallel_bucket(buckets, source_length, target_length, expected_bucket_index, expected_bucket):
bucket_index, bucket = data_io.get_parallel_bucket(buckets, source_length, target_length)
assert bucket_index == expected_bucket_index
assert bucket == expected_bucket
@pytest.mark.parametrize("source, target, expected_num_sents, expected_mean, expected_std",
[([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
[[1, 1, 1], [2, 2, 2], [3, 3, 3]], 3, 1.0, 0.0),
([[1, 1], [2, 2], [3, 3]],
[[1, 1, 1], [2, 2, 2], [3, 3, 3]], 3, 1.5, 0.0),
([[1, 1, 1], [2, 2], [3, 3, 3, 3, 3, 3, 3]],
[[1, 1, 1], [2], [3, 3, 3]], 2, 0.75, 0.25)])
def test_calculate_length_statistics(source, target, expected_num_sents, expected_mean, expected_std):
length_statistics = data_io.calculate_length_statistics(source, target, 5, 5)
assert len(source) == len(target)
assert length_statistics.num_sents == expected_num_sents
assert np.isclose(length_statistics.length_ratio_mean, expected_mean)
assert np.isclose(length_statistics.length_ratio_std, expected_std)
def test_get_training_data_iters():
train_line_count = 100
train_max_length = 30
dev_line_count = 20
dev_max_length = 30
expected_mean = 1.0
expected_std = 0.0
test_line_count = 20
test_line_count_empty = 0
test_max_length = 30
batch_size = 5
with tmp_digits_dataset("tmp_corpus",
train_line_count, train_max_length, dev_line_count, dev_max_length,
test_line_count, test_line_count_empty, test_max_length) as data:
# tmp common vocab
vcb = vocab.build_from_paths([data['source'], data['target']])
train_iter, val_iter, config_data = data_io.get_training_data_iters(data['source'], data['target'],
data['validation_source'],
data['validation_target'],
vocab_source=vcb,
vocab_target=vcb,
vocab_source_path=None,
vocab_target_path=None,
shared_vocab=True,
batch_size=batch_size,
batch_by_words=False,
batch_num_devices=1,
fill_up="replicate",
max_seq_len_source=train_max_length,
max_seq_len_target=train_max_length,
bucketing=True,
bucket_width=10)
assert isinstance(train_iter, data_io.ParallelSampleIter)
assert isinstance(val_iter, data_io.ParallelSampleIter)
assert isinstance(config_data, data_io.DataConfig)
assert config_data.source == data['source']
assert config_data.target == data['target']
assert config_data.vocab_source is None
assert config_data.vocab_target is None
assert config_data.data_statistics.max_observed_len_source == train_max_length - 1
assert config_data.data_statistics.max_observed_len_target == train_max_length
assert np.isclose(config_data.data_statistics.length_ratio_mean, expected_mean)
assert np.isclose(config_data.data_statistics.length_ratio_std, expected_std)
assert train_iter.batch_size == batch_size
assert val_iter.batch_size == batch_size
assert train_iter.default_bucket_key == (train_max_length, train_max_length)
assert val_iter.default_bucket_key == (dev_max_length, dev_max_length)
assert train_iter.dtype == 'float32'
# test some batches
bos_id = vcb[C.BOS_SYMBOL]
expected_first_target_symbols = np.full((batch_size,), bos_id, dtype='float32')
for epoch in range(2):
while train_iter.iter_next():
batch = train_iter.next()
assert len(batch.data) == 2
assert len(batch.label) == 1
assert batch.bucket_key in train_iter.buckets
source = batch.data[0].asnumpy()
target = batch.data[1].asnumpy()
label = batch.label[0].asnumpy()
assert source.shape[0] == target.shape[0] == label.shape[0] == batch_size
# target first symbol should be BOS
assert np.array_equal(target[:, 0], expected_first_target_symbols)
# label first symbol should be 2nd target symbol
assert np.array_equal(label[:, 0], target[:, 1])
# each label sequence contains one EOS symbol
assert np.sum(label == vcb[C.EOS_SYMBOL]) == batch_size
train_iter.reset()
def _data_batches_equal(db1, db2):
# We just compare the data, should probably be enough
equal = True
for data1, data2 in zip(db1.data, db2.data):
equal = equal and np.allclose(data1.asnumpy(), data2.asnumpy())
return equal
def test_parallel_sample_iter():
batch_size = 2
buckets = data_io.define_parallel_buckets(100, 100, 10, 1.0)
# The first bucket is going to be empty:
bucket_counts = [0] + [None] * (len(buckets) - 1)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words=False,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
it = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
with TemporaryDirectory() as work_dir:
# Test 1
it.next()
expected_batch = it.next()
fname = os.path.join(work_dir, "saved_iter")
it.save_state(fname)
it_loaded = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
# Test 2
it.reset()
expected_batch = it.next()
it.save_state(fname)
it_loaded = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
# Test 3
it.reset()
expected_batch = it.next()
it.save_state(fname)
it_loaded = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
while it.iter_next():
it.next()
it_loaded.next()
assert not it_loaded.iter_next()
def test_sharded_parallel_sample_iter():
batch_size = 2
buckets = data_io.define_parallel_buckets(100, 100, 10, 1.0)
# The first bucket is going to be empty:
bucket_counts = [0] + [None] * (len(buckets) - 1)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words=False,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset1 = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
dataset2 = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
with TemporaryDirectory() as work_dir:
shard1_fname = os.path.join(work_dir, 'shard1')
shard2_fname = os.path.join(work_dir, 'shard2')
dataset1.save(shard1_fname)
dataset2.save(shard2_fname)
shard_fnames = [shard1_fname, shard2_fname]
it = data_io.ShardedParallelSampleIter(shard_fnames, buckets, batch_size, bucket_batch_sizes, 'replicate')
with TemporaryDirectory() as work_dir:
# Test 1
it.next()
expected_batch = it.next()
fname = os.path.join(work_dir, "saved_iter")
it.save_state(fname)
it_loaded = data_io.ShardedParallelSampleIter(shard_fnames, buckets, batch_size, bucket_batch_sizes,
'replicate')
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
# Test 2
it.reset()
expected_batch = it.next()
it.save_state(fname)
it_loaded = data_io.ShardedParallelSampleIter(shard_fnames, buckets, batch_size, bucket_batch_sizes,
'replicate')
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
# Test 3
it.reset()
expected_batch = it.next()
it.save_state(fname)
it_loaded = data_io.ShardedParallelSampleIter(shard_fnames, buckets, batch_size, bucket_batch_sizes,
'replicate')
it_loaded.reset()
it_loaded.load_state(fname)
loaded_batch = it_loaded.next()
assert _data_batches_equal(expected_batch, loaded_batch)
while it.iter_next():
it.next()
it_loaded.next()
assert not it_loaded.iter_next()
def test_sharded_parallel_sample_iter_num_batches():
num_shards = 2
batch_size = 2
num_batches_per_bucket = 10
buckets = data_io.define_parallel_buckets(100, 100, 10, 1.0)
bucket_counts = [batch_size * num_batches_per_bucket for _ in buckets]
num_batches_per_shard = num_batches_per_bucket * len(buckets)
num_batches = num_shards * num_batches_per_shard
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words=False,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset1 = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
dataset2 = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
with TemporaryDirectory() as work_dir:
shard1_fname = os.path.join(work_dir, 'shard1')
shard2_fname = os.path.join(work_dir, 'shard2')
dataset1.save(shard1_fname)
dataset2.save(shard2_fname)
shard_fnames = [shard1_fname, shard2_fname]
it = data_io.ShardedParallelSampleIter(shard_fnames, buckets, batch_size, bucket_batch_sizes,
'replicate')
num_batches_seen = 0
while it.iter_next():
it.next()
num_batches_seen += 1
assert num_batches_seen == num_batches
def test_sharded_and_parallel_iter_same_num_batches():
""" Tests that a sharded data iterator with just a single shard produces as many shards as an iterator directly
using the same dataset. """
batch_size = 2
num_batches_per_bucket = 10
buckets = data_io.define_parallel_buckets(100, 100, 10, 1.0)
bucket_counts = [batch_size * num_batches_per_bucket for _ in buckets]
num_batches = num_batches_per_bucket * len(buckets)
bucket_batch_sizes = data_io.define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words=False,
batch_num_devices=1,
data_target_average_len=[None] * len(buckets))
dataset = data_io.ParallelDataSet(*_get_random_bucketed_data(buckets, min_count=0, max_count=5,
bucket_counts=bucket_counts))
with TemporaryDirectory() as work_dir:
shard_fname = os.path.join(work_dir, 'shard1')
dataset.save(shard_fname)
shard_fnames = [shard_fname]
it_sharded = data_io.ShardedParallelSampleIter(shard_fnames, buckets, batch_size, bucket_batch_sizes,
'replicate')
it_parallel = data_io.ParallelSampleIter(dataset, buckets, batch_size, bucket_batch_sizes)
num_batches_seen = 0
while it_parallel.iter_next():
assert it_sharded.iter_next()
it_parallel.next()
it_sharded.next()
num_batches_seen += 1
assert num_batches_seen == num_batches
print("Resetting...")
it_sharded.reset()
it_parallel.reset()
num_batches_seen = 0
while it_parallel.iter_next():
assert it_sharded.iter_next()
it_parallel.next()
it_sharded.next()
num_batches_seen += 1
assert num_batches_seen == num_batches
| [
"List[Tuple[int, int]]",
"int",
"int"
] | [
8196,
8260,
8306
] | [
8217,
8263,
8309
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_decoder.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import pytest
import sockeye.rnn_attention
import sockeye.rnn
import sockeye.constants as C
import sockeye.coverage
import sockeye.decoder
from test.common import gaussian_vector, integer_vector
step_tests = [(C.GRU_TYPE, True), (C.LSTM_TYPE, False)]
@pytest.mark.parametrize("cell_type, context_gating", step_tests)
def test_step(cell_type, context_gating,
num_embed=2,
encoder_num_hidden=5,
decoder_num_hidden=5):
vocab_size, batch_size, source_seq_len = 10, 10, 7,
# (batch_size, source_seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
source_shape = (batch_size, source_seq_len, encoder_num_hidden)
# (batch_size,)
source_length = mx.sym.Variable("source_length")
source_length_shape = (batch_size,)
# (batch_size, num_embed)
word_vec_prev = mx.sym.Variable("word_vec_prev")
word_vec_prev_shape = (batch_size, num_embed)
# (batch_size, decoder_num_hidden)
hidden_prev = mx.sym.Variable("hidden_prev")
hidden_prev_shape = (batch_size, decoder_num_hidden)
# List(mx.sym.Symbol(batch_size, decoder_num_hidden)
states_shape = (batch_size, decoder_num_hidden)
config_coverage = sockeye.coverage.CoverageConfig(type="tanh",
num_hidden=2,
layer_normalization=False)
config_attention = sockeye.rnn_attention.AttentionConfig(type="coverage",
num_hidden=2,
input_previous_word=False,
source_num_hidden=decoder_num_hidden,
query_num_hidden=decoder_num_hidden,
layer_normalization=False,
config_coverage=config_coverage)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len)
attention_state = attention.get_initial_state(source_length, source_seq_len)
attention_func = attention.on(source, source_length, source_seq_len)
config_rnn = sockeye.rnn.RNNConfig(cell_type=cell_type,
num_hidden=decoder_num_hidden,
num_layers=1,
dropout_inputs=0.,
dropout_states=0.,
residual=False,
forget_bias=0.)
config_decoder = sockeye.decoder.RecurrentDecoderConfig(max_seq_len_source=source_seq_len,
rnn_config=config_rnn,
attention_config=config_attention,
context_gating=context_gating)
decoder = sockeye.decoder.RecurrentDecoder(config=config_decoder)
if cell_type == C.GRU_TYPE:
layer_states = [gaussian_vector(shape=states_shape, return_symbol=True) for _ in range(config_rnn.num_layers)]
elif cell_type == C.LSTM_TYPE:
layer_states = [gaussian_vector(shape=states_shape, return_symbol=True) for _ in range(config_rnn.num_layers*2)]
else:
raise ValueError
state, attention_state = decoder._step(word_vec_prev=word_vec_prev,
state=sockeye.decoder.RecurrentDecoderState(hidden_prev, layer_states),
attention_func=attention_func,
attention_state=attention_state)
sym = mx.sym.Group([state.hidden, attention_state.probs, attention_state.dynamic_source])
executor = sym.simple_bind(ctx=mx.cpu(),
source=source_shape,
source_length=source_length_shape,
word_vec_prev=word_vec_prev_shape,
hidden_prev=hidden_prev_shape)
executor.arg_dict["source"][:] = gaussian_vector(source_shape)
executor.arg_dict["source_length"][:] = integer_vector(source_length_shape, source_seq_len)
executor.arg_dict["word_vec_prev"][:] = gaussian_vector(word_vec_prev_shape)
executor.arg_dict["hidden_prev"][:] = gaussian_vector(hidden_prev_shape)
executor.arg_dict["states"] = layer_states
hidden_result, attention_probs_result, attention_dynamic_source_result = executor.forward()
assert hidden_result.shape == hidden_prev_shape
assert attention_probs_result.shape == (batch_size, source_seq_len)
assert attention_dynamic_source_result.shape == (batch_size, source_seq_len, config_coverage.num_hidden)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_encoder.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
import mxnet as mx
import numpy as np
import sockeye.encoder
_BATCH_SIZE = 8
_SEQ_LEN = 10
_NUM_EMBED = 8
_DATA_LENGTH_ND = mx.nd.array([1, 2, 3, 4, 5, 6, 7, 8])
@pytest.mark.parametrize("config, out_data_shape, out_data_length, out_seq_len", [
(sockeye.encoder.ConvolutionalEmbeddingConfig(num_embed=_NUM_EMBED,
output_dim=None,
max_filter_width=3,
num_filters=[8, 16, 16],
pool_stride=4,
num_highway_layers=2,
dropout=0,
add_positional_encoding=False),
(8, 3, 40),
[1, 1, 1, 1, 2, 2, 2, 2],
3),
(sockeye.encoder.ConvolutionalEmbeddingConfig(num_embed=_NUM_EMBED,
output_dim=32,
max_filter_width=2,
num_filters=[8, 16],
pool_stride=3,
num_highway_layers=0,
dropout=0.1,
add_positional_encoding=True),
(8, 4, 32),
[1, 1, 1, 2, 2, 2, 3, 3],
4),
])
def test_convolutional_embedding_encoder(config, out_data_shape, out_data_length, out_seq_len):
conv_embed = sockeye.encoder.ConvolutionalEmbeddingEncoder(config)
data_nd = mx.nd.random_normal(shape=(_BATCH_SIZE, _SEQ_LEN, _NUM_EMBED))
data = mx.sym.Variable("data", shape=data_nd.shape)
data_length = mx.sym.Variable("data_length", shape=_DATA_LENGTH_ND.shape)
(encoded_data,
encoded_data_length,
encoded_seq_len) = conv_embed.encode(data=data, data_length=data_length, seq_len=_SEQ_LEN)
exe = encoded_data.simple_bind(mx.cpu(), data=data_nd.shape)
exe.forward(data=data_nd)
assert exe.outputs[0].shape == out_data_shape
exe = encoded_data_length.simple_bind(mx.cpu(), data_length=_DATA_LENGTH_ND.shape)
exe.forward(data_length=_DATA_LENGTH_ND)
assert np.equal(exe.outputs[0].asnumpy(), np.asarray(out_data_length)).all()
assert encoded_seq_len == out_seq_len
def test_sincos_positional_embeddings():
# Test that .encode() and .encode_positions() return the same values:
data = mx.sym.Variable("data")
positions = mx.sym.Variable("positions")
pos_encoder = sockeye.encoder.AddSinCosPositionalEmbeddings(num_embed=_NUM_EMBED,
scale_up_input=False,
scale_down_positions=False,
prefix="test")
encoded, _, __ = pos_encoder.encode(data, None, _SEQ_LEN)
nd_encoded = encoded.eval(data=mx.nd.zeros((_BATCH_SIZE, _SEQ_LEN, _NUM_EMBED)))[0]
# Take the first element in the batch to get (seq_len, num_embed)
nd_encoded = nd_encoded[0]
encoded_positions = pos_encoder.encode_positions(positions, data)
# Explicitly encode all positions from 0 to _SEQ_LEN
nd_encoded_positions = encoded_positions.eval(positions=mx.nd.arange(0, _SEQ_LEN),
data=mx.nd.zeros((_SEQ_LEN, _NUM_EMBED)))[0]
assert np.isclose(nd_encoded.asnumpy(), nd_encoded_positions.asnumpy()).all()
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_inference.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import numpy as np
import pytest
import sockeye.inference
_BOS = 0
_EOS = -1
def test_concat_translations():
expected_target_ids = [0, 1, 2, 8, 9, 3, 4, 5, -1]
NUM_SRC = 7
def length_penalty(length):
return 1. / length
expected_score = (1 + 2 + 3) / length_penalty(len(expected_target_ids))
translations = [sockeye.inference.Translation([0, 1, 2, -1], np.zeros((4, NUM_SRC)), 1.0 / length_penalty(4)),
# Translation without EOS
sockeye.inference.Translation([0, 8, 9], np.zeros((3, NUM_SRC)), 2.0 / length_penalty(3)),
sockeye.inference.Translation([0, 3, 4, 5, -1], np.zeros((5, NUM_SRC)), 3.0 / length_penalty(5))]
combined = sockeye.inference._concat_translations(translations, start_id=_BOS, stop_ids={_EOS},
length_penalty=length_penalty)
assert combined.target_ids == expected_target_ids
assert combined.attention_matrix.shape == (len(expected_target_ids), len(translations) * NUM_SRC)
assert np.isclose(combined.score, expected_score)
def test_length_penalty_default():
lengths = mx.nd.array([[1], [2], [3]])
length_penalty = sockeye.inference.LengthPenalty(1.0, 0.0)
expected_lp = np.array([[1.0], [2.], [3.]])
assert np.isclose(length_penalty(lengths).asnumpy(), expected_lp).all()
def test_length_penalty():
lengths = mx.nd.array([[1], [2], [3]])
length_penalty = sockeye.inference.LengthPenalty(.2, 5.0)
expected_lp = np.array([[6 ** 0.2 / 6 ** 0.2], [7 ** 0.2 / 6 ** 0.2], [8 ** 0.2 / 6 ** 0.2]])
assert np.isclose(length_penalty(lengths).asnumpy(), expected_lp).all()
def test_length_penalty_int_input():
length = 1
length_penalty = sockeye.inference.LengthPenalty(.2, 5.0)
expected_lp = [6 ** 0.2 / 6 ** 0.2]
assert np.isclose(np.asarray([length_penalty(length)]),
np.asarray(expected_lp)).all()
@pytest.mark.parametrize("supported_max_seq_len_source, supported_max_seq_len_target, training_max_seq_len_source, "
"forced_max_input_len, length_ratio_mean, length_ratio_std, "
"expected_max_input_len, expected_max_output_len",
[
(100, 100, 100, None, 0.9, 0.2, 89, 100),
(100, 100, 100, None, 1.1, 0.2, 75, 100),
# No source length constraints.
(None, 100, 100, None, 0.9, 0.1, 98, 100),
# No target length constraints.
(80, None, 100, None, 1.1, 0.4, 80, 122),
# No source/target length constraints. Source is max observed during training and target
# based on length ratios.
(None, None, 100, None, 1.0, 0.1, 100, 113),
# Force a maximum input length.
(100, 100, 100, 50, 1.1, 0.2, 50, 67),
])
def test_get_max_input_output_length(
supported_max_seq_len_source,
supported_max_seq_len_target,
training_max_seq_len_source,
forced_max_input_len,
length_ratio_mean,
length_ratio_std,
expected_max_input_len,
expected_max_output_len):
max_input_len, get_max_output_len = sockeye.inference.get_max_input_output_length(
supported_max_seq_len_source=supported_max_seq_len_source,
supported_max_seq_len_target=supported_max_seq_len_target,
training_max_seq_len_source=training_max_seq_len_source,
forced_max_input_len=forced_max_input_len,
length_ratio_mean=length_ratio_mean,
length_ratio_std=length_ratio_std,
num_stds=1)
max_output_len = get_max_output_len(max_input_len)
if supported_max_seq_len_source is not None:
assert max_input_len <= supported_max_seq_len_source
if supported_max_seq_len_target is not None:
assert max_output_len <= supported_max_seq_len_target
if expected_max_input_len is not None:
assert max_input_len == expected_max_input_len
if expected_max_output_len is not None:
assert max_output_len == expected_max_output_len
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_init_embedding.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
import numpy as np
import mxnet as mx
import sockeye.init_embedding as init_embedding
@pytest.mark.parametrize(
"embed, vocab_in, vocab_out, expected_embed_init", [
(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]),
{'w1': 0, 'w2': 1, 'w3': 2},
{'w2': 0, 'w3': 1, 'w4': 2, 'w5': 3},
mx.nd.array([[2, 2, 2], [3, 3, 3], [0, 0, 0], [0, 0, 0]]))
])
def test_init_embedding(embed, vocab_in, vocab_out, expected_embed_init):
embed_init = init_embedding.init_embedding(embed, vocab_in, vocab_out)
assert (embed_init == expected_embed_init).asnumpy().all()
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_layers.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import numpy as np
import sockeye.layers
import sockeye.rnn
def test_layer_normalization():
batch_size = 32
other_dim = 10
num_hidden = 64
x = mx.sym.Variable('x')
x_nd = mx.nd.uniform(0, 10, (batch_size, other_dim, num_hidden))
x_np = x_nd.asnumpy()
ln = sockeye.layers.LayerNormalization(num_hidden, prefix="")
# test moments
sym = mx.sym.Group(ln.moments(x))
mean, var = sym.eval(x=x_nd)
expected_mean = np.mean(x_np, axis=-1, keepdims=True)
expected_var = np.var(x_np, axis=-1, keepdims=True)
assert np.isclose(mean.asnumpy(), expected_mean).all()
assert np.isclose(var.asnumpy(), expected_var).all()
sym = ln.normalize(x)
norm = sym.eval(x=x_nd,
_gamma=mx.nd.ones((num_hidden,)),
_beta=mx.nd.zeros((num_hidden,)))[0]
expected_norm = (x_np - expected_mean) / np.sqrt(expected_var)
assert np.isclose(norm.asnumpy(), expected_norm, atol=1.e-6).all()
def test_weight_normalization():
# The norm after the operation should be equal to the scale factor.
expected_norm = np.asarray([1., 2.])
scale_factor = mx.nd.array([[1.], [2.]])
weight = mx.sym.Variable("weight")
weight_norm = sockeye.layers.WeightNormalization(weight,
num_hidden=2)
norm_weight = weight_norm()
nd_norm_weight = norm_weight.eval(weight=mx.nd.array([[1., 2.],
[3., 4.]]),
wn_scale=scale_factor)
assert np.isclose(np.linalg.norm(nd_norm_weight[0].asnumpy(), axis=1), expected_norm).all()
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_lexicon.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from tempfile import TemporaryDirectory
import numpy as np
import sockeye.constants as C
import sockeye.lexicon
def test_topk_lexicon():
lexicon = ["a\ta\t-0.6931471805599453",
"a\tb\t-1.2039728043259361",
"a\tc\t-1.6094379124341003",
"b\tb\t0.0"]
vocab_list = ["a", "b", "c"]
vocab = dict((y, x) for (x, y) in enumerate(C.VOCAB_SYMBOLS + vocab_list))
k = 2
lex = sockeye.lexicon.TopKLexicon(vocab, vocab)
# Create from known lexicon
with TemporaryDirectory(prefix="test_topk_lexicon.") as work_dir:
# Write fast_align format lex table
input_lex_path = os.path.join(work_dir, "input.lex")
with open(input_lex_path, "w") as out:
for line in lexicon:
print(line, file=out)
# Use fast_align lex table to build top-k lexicon
lex.create(input_lex_path, k)
# Test against known lexicon
expected = np.zeros((len(C.VOCAB_SYMBOLS) + len(vocab_list), k), dtype=np.int)
# a -> special + a b
expected[len(C.VOCAB_SYMBOLS),:2] = [len(C.VOCAB_SYMBOLS), len(C.VOCAB_SYMBOLS) + 1]
# b -> special + b
expected[len(C.VOCAB_SYMBOLS) + 1,:1] = [len(C.VOCAB_SYMBOLS) + 1]
assert np.all(lex.lex == expected)
# Test save/load
json_lex_path = os.path.join(work_dir, "lex.json")
lex.save(json_lex_path)
lex.load(json_lex_path)
assert np.all(lex.lex == expected)
# Test lookup
trg_ids = lex.get_trg_ids(np.array([[vocab["a"], vocab["c"]]], dtype=np.int))
expected = np.array([vocab[symbol] for symbol in C.VOCAB_SYMBOLS + ["a", "b"]], dtype=np.int)
assert np.all(trg_ids == expected)
trg_ids = lex.get_trg_ids(np.array([[vocab["b"]]], dtype=np.int))
expected = np.array([vocab[symbol] for symbol in C.VOCAB_SYMBOLS + ["b"]], dtype=np.int)
assert np.all(trg_ids == expected)
trg_ids = lex.get_trg_ids(np.array([[vocab["c"]]], dtype=np.int))
expected = np.array([vocab[symbol] for symbol in C.VOCAB_SYMBOLS], dtype=np.int)
assert np.all(trg_ids == expected)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_loss.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import numpy as np
import pytest
import sockeye.constants as C
import sockeye.loss
import sockeye.model
def test_cross_entropy_loss():
config = sockeye.loss.LossConfig(name=C.CROSS_ENTROPY, vocab_size=4, normalization_type=C.LOSS_NORM_BATCH)
loss = sockeye.loss.get_loss(config)
assert isinstance(loss, sockeye.loss.CrossEntropyLoss)
logits = mx.sym.Variable("logits")
labels = mx.sym.Variable("labels")
sym = mx.sym.Group(loss.get_loss(logits, labels))
assert sym.list_arguments() == ['logits', 'labels']
assert sym.list_outputs() == [C.SOFTMAX_NAME + "_output"]
logits_np = mx.nd.array([[1, 2, 3, 4],
[4, 2, 2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4]])
labels_np = mx.nd.array([1, 0, 2, 3]) # C.PAD_ID == 0
expected_softmax = np.asarray([[0.0320586, 0.08714432, 0.23688284, 0.64391428],
[0.71123451, 0.09625512, 0.09625512, 0.09625512],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25]])
expected_grads = np.asarray([[0.0320586, -0.91285568, 0.23688284, 0.64391428],
[0., 0., 0., 0.],
[0.25, 0.25, -0.75, 0.25],
[0.25, 0.25, 0.25, -0.75]])
_, out_shapes, _ = (sym.infer_shape(logits=logits_np.shape, labels=labels_np.shape))
assert out_shapes[0] == logits_np.shape
executor = sym.simple_bind(ctx=mx.cpu(),
logits=logits_np.shape,
labels=labels_np.shape)
executor.arg_dict["logits"][:] = logits_np
executor.arg_dict["labels"][:] = labels_np
softmax = executor.forward(is_train=True)[0].asnumpy()
assert np.isclose(softmax, expected_softmax).all()
executor.backward()
grads = executor.grad_dict["logits"].asnumpy()
assert np.isclose(grads, expected_grads).all()
label_grad_sum = executor.grad_dict["labels"].asnumpy().sum()
assert label_grad_sum == 0
def test_smoothed_cross_entropy_loss():
config = sockeye.loss.LossConfig(name=C.CROSS_ENTROPY,
vocab_size=4,
normalization_type=C.LOSS_NORM_BATCH,
label_smoothing=0.5)
loss = sockeye.loss.get_loss(config)
assert isinstance(loss, sockeye.loss.CrossEntropyLoss)
logits = mx.sym.Variable("logits")
labels = mx.sym.Variable("labels")
sym = mx.sym.Group(loss.get_loss(logits, labels))
assert sym.list_arguments() == ['logits', 'labels']
assert sym.list_outputs() == [C.SOFTMAX_NAME + "_output"]
logits_np = mx.nd.array([[1, 2, 3, 4],
[4, 2, 2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4]])
labels_np = mx.nd.array([1, 0, 2, 3]) # C.PAD_ID == 0
expected_softmax = np.asarray([[0.0320586, 0.08714432, 0.23688284, 0.64391428],
[0.71123451, 0.09625512, 0.09625512, 0.09625512],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25]])
expected_grads = np.asarray([[-0.13460806, -0.41285568, 0.07021617, 0.4772476],
[0., 0., 0., 0.],
[0.08333333, 0.08333333, -0.25, 0.08333333],
[0.08333333, 0.08333333, 0.08333333, -0.25]])
_, out_shapes, _ = (sym.infer_shape(logits=logits_np.shape, labels=labels_np.shape))
assert out_shapes[0] == logits_np.shape
executor = sym.simple_bind(ctx=mx.cpu(),
logits=logits_np.shape,
labels=labels_np.shape)
executor.arg_dict["logits"][:] = logits_np
executor.arg_dict["labels"][:] = labels_np
outputs = executor.forward(is_train=True)
softmax = outputs[0].asnumpy()
assert np.isclose(softmax, expected_softmax).all()
executor.backward()
grads = executor.grad_dict["logits"].asnumpy()
assert np.isclose(grads, expected_grads).all()
label_grad_sum = executor.grad_dict["labels"].asnumpy().sum()
assert label_grad_sum == 0
@pytest.mark.parametrize("preds, labels, normalization_type, label_smoothing, expected_value",
[(mx.nd.array([[0.0, 0.2, 0.8],
[0.0, 1.0, 0.0]]),
mx.nd.array([[2],
[0]]),
'valid',
0.0,
-np.log(0.8 + 1e-8) / 1.0),
(mx.nd.array([[0.0, 0.2, 0.8],
[0.0, 1.0, 0.0]]),
mx.nd.array([[2],
[0]]),
'batch',
0.0,
-np.log(0.8 + 1e-8) / 2.0)]
)
def test_cross_entropy_metric(preds, labels, normalization_type, label_smoothing, expected_value):
config = sockeye.loss.LossConfig(name=C.CROSS_ENTROPY,
vocab_size=preds.shape[1],
normalization_type=normalization_type,
label_smoothing=label_smoothing)
metric = sockeye.loss.CrossEntropyMetric(config)
metric.update([labels], [preds])
name, value = metric.get()
assert name == 'cross-entropy'
assert np.isclose(value, expected_value)
def test_cross_entropy_internal():
config = sockeye.loss.LossConfig(name=C.CROSS_ENTROPY,
vocab_size=3,
normalization_type='valid',
label_smoothing=0.0)
metric = sockeye.loss.CrossEntropyMetric(config)
pred = mx.nd.array([0.0, 0.2, 0.8])
label = mx.nd.array([2])
expected_cross_entropy = -np.log(0.8 + 1e-8) / 1.0
cross_entropy = metric.cross_entropy(pred, label, ignore=(label == C.PAD_ID)).sum()
cross_entropy_smoothed = metric.cross_entropy_smoothed(pred, label, ignore=(label == C.PAD_ID)).sum()
assert np.isclose(cross_entropy.asnumpy(), expected_cross_entropy)
assert np.isclose(cross_entropy_smoothed.asnumpy(), expected_cross_entropy)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_lr_scheduler.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
from sockeye import lr_scheduler
from sockeye.lr_scheduler import LearningRateSchedulerFixedStep, LearningRateSchedulerInvSqrtT, LearningRateSchedulerInvT
def test_lr_scheduler():
updates_per_checkpoint = 13
half_life_num_checkpoints = 3
schedulers = [LearningRateSchedulerInvT(updates_per_checkpoint, half_life_num_checkpoints),
LearningRateSchedulerInvSqrtT(updates_per_checkpoint, half_life_num_checkpoints)]
for scheduler in schedulers:
scheduler.base_lr = 1.0
# test correct half-life:
assert scheduler(updates_per_checkpoint * half_life_num_checkpoints) == pytest.approx(0.5)
def test_fixed_step_lr_scheduler():
# Parse schedule string
schedule_str = "0.5:16,0.25:8"
schedule = LearningRateSchedulerFixedStep.parse_schedule_str(schedule_str)
assert schedule == [(0.5, 16), (0.25, 8)]
# Check learning rate steps
updates_per_checkpoint = 2
scheduler = LearningRateSchedulerFixedStep(schedule, updates_per_checkpoint)
t = 0
for _ in range(16):
t += 1
assert scheduler(t) == 0.5
if t % 2 == 0:
scheduler.new_evaluation_result(False)
assert scheduler(t) == 0.25
for _ in range(8):
t += 1
assert scheduler(t) == 0.25
if t % 2 == 0:
scheduler.new_evaluation_result(False)
@pytest.mark.parametrize("scheduler_type, reduce_factor, expected_instance",
[("fixed-rate-inv-sqrt-t", 1.0, lr_scheduler.LearningRateSchedulerInvSqrtT),
("fixed-rate-inv-t", 1.0, lr_scheduler.LearningRateSchedulerInvT),
("plateau-reduce", 0.5, lr_scheduler.LearningRateSchedulerPlateauReduce)])
def test_get_lr_scheduler(scheduler_type, reduce_factor, expected_instance):
scheduler = lr_scheduler.get_lr_scheduler(scheduler_type,
updates_per_checkpoint=4,
learning_rate_half_life=2,
learning_rate_reduce_factor=reduce_factor,
learning_rate_reduce_num_not_improved=16)
assert isinstance(scheduler, expected_instance)
def test_get_lr_scheduler_no_reduce():
scheduler = lr_scheduler.get_lr_scheduler("plateau-reduce",
updates_per_checkpoint=4,
learning_rate_half_life=2,
learning_rate_reduce_factor=1.0,
learning_rate_reduce_num_not_improved=16)
assert scheduler is None
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_optimizers.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from random import random
import mxnet.ndarray as nd
import pytest
from mxnet import optimizer as opt
import sockeye.constants as C
from sockeye.optimizers import BatchState, CheckpointState, SockeyeOptimizer
@pytest.mark.parametrize("optimizer, optimizer_params",
((C.OPTIMIZER_ADAM, {}),
(C.OPTIMIZER_EVE, {}),
(C.OPTIMIZER_EVE, {"use_batch_objective": True, "use_checkpoint_objective": True}),
))
def test_optimizer(optimizer, optimizer_params):
# Weights
index = 0
weight = nd.zeros(shape=(8,))
# Optimizer from registry
optimizer = opt.create(optimizer, **optimizer_params)
state = optimizer.create_state(index, weight)
# Run a few updates
for i in range(1, 13):
grad = nd.random_normal(shape=(8,))
if isinstance(optimizer, SockeyeOptimizer):
batch_state = BatchState(metric_val=random())
optimizer.pre_update_batch(batch_state)
optimizer.update(index, weight, grad, state)
# Checkpoint
if i % 3 == 0:
if isinstance(optimizer, SockeyeOptimizer):
checkpoint_state = CheckpointState(checkpoint=(i % 3 + 1), metric_val=random())
optimizer.pre_update_checkpoint(checkpoint_state)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_output_handler.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import io
import pytest
import numpy as np
from sockeye.inference import TranslatorInput, TranslatorOutput
import sockeye.output_handler
stream_handler_tests = [(sockeye.output_handler.StringOutputHandler(io.StringIO()),
TranslatorInput(id=0, sentence="a test", tokens=None),
TranslatorOutput(id=0, translation="ein Test", tokens=None,
attention_matrix=None,
score=0.),
0.,
"ein Test\n"),
(sockeye.output_handler.StringOutputHandler(io.StringIO()),
TranslatorInput(id=0, sentence="", tokens=None),
TranslatorOutput(id=0, translation="", tokens=None,
attention_matrix=None,
score=0.),
0.,
"\n"),
(sockeye.output_handler.StringWithAlignmentsOutputHandler(io.StringIO(), threshold=0.5),
TranslatorInput(id=0, sentence="a test", tokens=None),
TranslatorOutput(id=0, translation="ein Test", tokens=None,
attention_matrix=np.asarray([[1, 0],
[0, 1]]),
score=0.),
0.,
"ein Test\t0-0 1-1\n"),
(sockeye.output_handler.StringWithAlignmentsOutputHandler(io.StringIO(), threshold=0.5),
TranslatorInput(id=0, sentence="a test", tokens=None),
TranslatorOutput(id=0, translation="ein Test !", tokens=None,
attention_matrix=np.asarray([[0.4, 0.6],
[0.8, 0.2],
[0.5, 0.5]]),
score=0.),
0.,
"ein Test !\t0-1 1-0\n"),
(sockeye.output_handler.BenchmarkOutputHandler(io.StringIO()),
TranslatorInput(id=0, sentence="a test", tokens=["a", "test"]),
TranslatorOutput(id=0, translation="ein Test", tokens=["ein", "Test"],
attention_matrix=None,
score=0.),
0.5,
"input=a test\toutput=ein Test\tinput_tokens=2\toutput_tokens=2\ttranslation_time=0.5000\n"),
]
@pytest.mark.parametrize("handler, translation_input, translation_output, translation_walltime, expected_string", stream_handler_tests)
def test_stream_output_handler(handler, translation_input, translation_output, translation_walltime, expected_string):
handler.handle(translation_input, translation_output, translation_walltime)
assert handler.stream.getvalue() == expected_string
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_params.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import itertools
import glob
import os.path
import tempfile
import sockeye.training
import sockeye.constants as C
def test_cleanup_param_files():
with tempfile.TemporaryDirectory() as tmpDir:
for n in itertools.chain(range(1, 20, 2), range(21, 41)):
# Create empty files
open(os.path.join(tmpDir, C.PARAMS_NAME % n), "w").close()
sockeye.training.cleanup_params_files(tmpDir, 5, 40, 17)
expectedSurviving = set([os.path.join(tmpDir, C.PARAMS_NAME % n)
for n in [17, 36, 37, 38, 39, 40]])
# 17 must survive because it is the best one
assert set(glob.glob(os.path.join(tmpDir, C.PARAMS_PREFIX + "*"))) == expectedSurviving
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_rnn.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import numpy as np
import pytest
from sockeye import constants as C
from sockeye import rnn
cell_test_cases = [
(rnn.LayerNormLSTMCell(100, prefix='rnn_', forget_bias=1.0),
sorted(['rnn_c_scale', 'rnn_c_shift',
'rnn_h2h_bias', 'rnn_h2h_scale', 'rnn_h2h_shift', 'rnn_h2h_weight',
'rnn_i2h_bias', 'rnn_i2h_scale', 'rnn_i2h_shift', 'rnn_i2h_weight'])),
(rnn.LayerNormPerGateLSTMCell(100, prefix='rnn_', forget_bias=1.0),
sorted(['rnn_c_scale', 'rnn_c_shift',
'rnn_f_scale', 'rnn_f_shift',
'rnn_h2h_bias', 'rnn_h2h_weight',
'rnn_i2h_bias', 'rnn_i2h_weight',
'rnn_i_scale', 'rnn_i_shift',
'rnn_o_scale', 'rnn_o_shift',
'rnn_s_scale', 'rnn_s_shift'])),
(rnn.LayerNormGRUCell(100, prefix='rnn_'),
sorted(['rnn_h2h_bias', 'rnn_h2h_scale', 'rnn_h2h_shift', 'rnn_h2h_weight',
'rnn_i2h_bias', 'rnn_i2h_scale', 'rnn_i2h_shift', 'rnn_i2h_weight'])),
(rnn.LayerNormPerGateGRUCell(100, prefix='rnn_'),
sorted(['rnn_h2h_bias', 'rnn_h2h_weight',
'rnn_i2h_bias', 'rnn_i2h_weight',
'rnn_o_scale', 'rnn_o_shift',
'rnn_r_scale', 'rnn_r_shift',
'rnn_z_scale', 'rnn_z_shift']))
]
@pytest.mark.parametrize("cell, expected_param_keys", cell_test_cases)
def test_ln_cell(cell, expected_param_keys):
inputs = [mx.sym.Variable('rnn_t%d_data' % i) for i in range(3)]
outputs, _ = cell.unroll(3, inputs)
outputs = mx.sym.Group(outputs)
print(sorted(cell.params._params.keys()))
assert sorted(cell.params._params.keys()) == expected_param_keys
assert outputs.list_outputs() == ['rnn_t0_out_output', 'rnn_t1_out_output', 'rnn_t2_out_output']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50), rnn_t2_data=(10, 50))
assert outs == [(10, 100), (10, 100), (10, 100)]
get_rnn_test_cases = [
(rnn.RNNConfig(cell_type=C.LSTM_TYPE, num_hidden=100, num_layers=2, dropout_inputs=0.5, dropout_states=0.5,
residual=False, forget_bias=0.0), mx.rnn.LSTMCell),
(rnn.RNNConfig(cell_type=C.LSTM_TYPE, num_hidden=100, num_layers=2, dropout_inputs=0.0, dropout_states=0.0,
dropout_recurrent=0.5, residual=False, forget_bias=0.0), rnn.RecurrentDropoutLSTMCell),
(rnn.RNNConfig(cell_type=C.LNLSTM_TYPE, num_hidden=12, num_layers=2, dropout_inputs=0.5, dropout_states=0.5,
residual=False, forget_bias=1.0), rnn.LayerNormLSTMCell),
(rnn.RNNConfig(cell_type=C.LNGLSTM_TYPE, num_hidden=55, num_layers=2, dropout_inputs=0.5, dropout_states=0.5,
residual=False, forget_bias=0.0), rnn.LayerNormPerGateLSTMCell),
(rnn.RNNConfig(cell_type=C.GRU_TYPE, num_hidden=200, num_layers=2, dropout_inputs=0.9, dropout_states=0.9,
residual=False, forget_bias=0.0), mx.rnn.GRUCell),
(rnn.RNNConfig(cell_type=C.LNGRU_TYPE, num_hidden=100, num_layers=2, dropout_inputs=0.0, dropout_states=0.5,
residual=False, forget_bias=0.0), rnn.LayerNormGRUCell),
(rnn.RNNConfig(cell_type=C.LNGGRU_TYPE, num_hidden=2, num_layers=2, dropout_inputs=0.0, dropout_states=0.0,
residual=True, forget_bias=0.0), rnn.LayerNormPerGateGRUCell),
(rnn.RNNConfig(cell_type=C.LSTM_TYPE, num_hidden=2, num_layers=3, dropout_inputs=0.0, dropout_states=0.0,
residual=True, forget_bias=0.0), mx.rnn.LSTMCell)]
@pytest.mark.parametrize("config, expected_cell", get_rnn_test_cases)
def test_get_stacked_rnn(config, expected_cell):
cell = rnn.get_stacked_rnn(config, prefix=config.cell_type)
assert isinstance(cell, mx.rnn.SequentialRNNCell)
cell = cell._cells[-1] # last cell
if config.residual:
assert isinstance(cell, mx.rnn.ResidualCell)
cell = cell.base_cell
if config.dropout_inputs > 0 or config.dropout_states > 0:
assert isinstance(cell, rnn.VariationalDropoutCell)
cell = cell.base_cell
assert isinstance(cell, expected_cell)
assert cell._num_hidden, config.num_hidden
def test_cell_parallel_input():
num_hidden = 128
batch_size = 256
parallel_size = 64
input_shape = (batch_size, num_hidden)
states_shape = (batch_size, num_hidden)
parallel_shape = (batch_size, parallel_size)
inp = mx.sym.Variable("input")
parallel_input = mx.sym.Variable("parallel")
params = mx.rnn.RNNParams("params_")
states = mx.sym.Variable("states")
default_cell = mx.rnn.RNNCell(num_hidden, params=params)
default_cell_output, _ = default_cell(mx.sym.concat(inp, parallel_input), states)
inner_rnn_cell = mx.rnn.RNNCell(num_hidden, params=params)
parallel_cell = rnn.ParallelInputCell(inner_rnn_cell)
parallel_cell_output, _ = parallel_cell(inp, parallel_input, states)
input_nd = mx.nd.random_uniform(shape=input_shape)
states_nd = mx.nd.random_uniform(shape=states_shape)
parallel_nd = mx.nd.random_uniform(shape=parallel_shape)
arg_shapes, _, _ = default_cell_output.infer_shape(input=input_shape, states=states_shape, parallel=parallel_shape)
params_with_shapes = filter(lambda a: a[0].startswith("params_"),
[x for x in zip(default_cell_output.list_arguments(), arg_shapes)]
)
params_nd = {}
for name, shape in params_with_shapes:
params_nd[name] = mx.nd.random_uniform(shape=shape)
out_default_residual = default_cell_output.eval(input=input_nd,
states=states_nd,
parallel=parallel_nd,
**params_nd)[0]
out_parallel = parallel_cell_output.eval(input=input_nd,
states=states_nd,
parallel=parallel_nd,
**params_nd)[0]
assert np.isclose(out_default_residual.asnumpy(), out_parallel.asnumpy()).all()
def test_residual_cell_parallel_input():
num_hidden = 128
batch_size = 256
parallel_size = 64
input_shape = (batch_size, num_hidden)
states_shape = (batch_size, num_hidden)
parallel_shape = (batch_size, parallel_size)
inp = mx.sym.Variable("input")
parallel_input = mx.sym.Variable("parallel")
params = mx.rnn.RNNParams("params_")
states = mx.sym.Variable("states")
default_cell = mx.rnn.RNNCell(num_hidden, params=params)
default_cell_output, _ = default_cell(mx.sym.concat(inp, parallel_input), states)
default_residual_output = mx.sym.elemwise_add(default_cell_output, inp)
inner_rnn_cell = mx.rnn.RNNCell(num_hidden, params=params)
parallel_cell = rnn.ResidualCellParallelInput(inner_rnn_cell)
parallel_cell_output, _ = parallel_cell(inp, parallel_input, states)
input_nd = mx.nd.random_uniform(shape=input_shape)
states_nd = mx.nd.random_uniform(shape=states_shape)
parallel_nd = mx.nd.random_uniform(shape=parallel_shape)
arg_shapes, _, _ = default_residual_output.infer_shape(input=input_shape, states=states_shape, parallel=parallel_shape)
params_with_shapes = filter(lambda a: a[0].startswith("params_"),
[x for x in zip(default_residual_output.list_arguments(), arg_shapes)]
)
params_nd = {}
for name, shape in params_with_shapes:
params_nd[name] = mx.nd.random_uniform(shape=shape)
out_default_residual = default_residual_output.eval(input=input_nd,
states=states_nd,
parallel=parallel_nd,
**params_nd)[0]
out_parallel = parallel_cell_output.eval(input=input_nd,
states=states_nd,
parallel=parallel_nd,
**params_nd)[0]
assert np.isclose(out_default_residual.asnumpy(), out_parallel.asnumpy()).all()
def test_sequential_rnn_cell_parallel_input():
num_hidden = 128
batch_size = 256
parallel_size = 64
n_layers = 3
input_shape = (batch_size, num_hidden)
states_shape = (batch_size, num_hidden)
parallel_shape = (batch_size, parallel_size)
input = mx.sym.Variable("input")
parallel_input = mx.sym.Variable("parallel")
params = mx.rnn.RNNParams("params_") # To simplify, we will share the parameters across all layers
states = mx.sym.Variable("states") # ...and also the previous states
last_output = input
for _ in range(n_layers):
cell = mx.rnn.RNNCell(num_hidden, params=params)
last_output, _ = cell(mx.sym.concat(last_output, parallel_input), states)
manual_stacking_output = last_output
sequential_cell = rnn.SequentialRNNCellParallelInput()
for _ in range(n_layers):
cell = mx.rnn.RNNCell(num_hidden, params=params)
cell = rnn.ParallelInputCell(cell)
sequential_cell.add(cell)
sequential_output, _ = sequential_cell(input, parallel_input, [states]*n_layers)
input_nd = mx.nd.random_uniform(shape=input_shape)
states_nd = mx.nd.random_uniform(shape=states_shape)
parallel_nd = mx.nd.random_uniform(shape=parallel_shape)
arg_shapes, _, _ = manual_stacking_output.infer_shape(input=input_shape, states=states_shape, parallel=parallel_shape)
params_with_shapes = filter(lambda a: a[0].startswith("params_"),
[x for x in zip(manual_stacking_output.list_arguments(), arg_shapes)]
)
params_nd = {}
for name, shape in params_with_shapes:
params_nd[name] = mx.nd.random_uniform(shape=shape)
out_manual = manual_stacking_output.eval(input=input_nd,
states=states_nd,
parallel=parallel_nd,
**params_nd)[0]
out_sequential = sequential_output.eval(input=input_nd,
states=states_nd,
parallel=parallel_nd,
**params_nd)[0]
assert np.isclose(out_manual.asnumpy(), out_sequential.asnumpy()).all()
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_translate.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import io
import unittest
import unittest.mock
import pytest
import sockeye.inference
import sockeye.output_handler
import sockeye.translate
TEST_DATA = "Test file line 1\n" \
"Test file line 2\n"
@pytest.fixture
def mock_translator():
return unittest.mock.Mock(spec=sockeye.inference.Translator)
@pytest.fixture
def mock_output_handler():
return unittest.mock.Mock(spec=sockeye.output_handler.OutputHandler)
def mock_open(*args, **kargs):
f_open = unittest.mock.mock_open(*args, **kargs)
f_open.return_value.__iter__ = lambda self: iter(self.readline, '')
return f_open
@unittest.mock.patch("builtins.open", new_callable=mock_open, read_data=TEST_DATA)
def test_translate_by_file(mock_file, mock_translator, mock_output_handler):
mock_translator.translate.return_value = ['', '']
mock_translator.batch_size = 1
mock_file.return_value = TEST_DATA.splitlines()
sockeye.translate.read_and_translate(translator=mock_translator, output_handler=mock_output_handler,
chunk_size=2, source='/dev/null')
# Ensure that our translator has the correct input passed to it.
mock_translator.make_input.assert_any_call(1, "Test file line 1")
mock_translator.make_input.assert_any_call(2, "Test file line 2")
# Ensure translate gets called once. Input here will be a dummy mocked result, so we'll ignore it.
assert mock_translator.translate.call_count == 1
@unittest.mock.patch("sys.stdin", io.StringIO(TEST_DATA))
def test_translate_by_stdin_chunk2(mock_translator, mock_output_handler):
mock_translator.translate.return_value = ['', '']
mock_translator.batch_size = 1
sockeye.translate.read_and_translate(translator=mock_translator,
output_handler=mock_output_handler,
chunk_size=2)
# Ensure that our translator has the correct input passed to it.
mock_translator.make_input.assert_any_call(1, "Test file line 1\n")
mock_translator.make_input.assert_any_call(2, "Test file line 2\n")
# Ensure translate gets called once. Input here will be a dummy mocked result, so we'll ignore it.
assert mock_translator.translate.call_count == 1
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_utils.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import tempfile
import math
import mxnet as mx
import numpy as np
import pytest
from sockeye import __version__
from sockeye import utils
@pytest.mark.parametrize("some_list, expected", [
([1, 2, 3, 4, 5, 6, 7, 8], [[1, 2, 3], [4, 5, 6], [7, 8]]),
([1, 2], [[1, 2]]),
([1, 2, 3], [[1, 2, 3]]),
([1, 2, 3, 4], [[1, 2, 3], [4]]),
])
def test_chunks(some_list, expected):
chunk_size = 3
chunked_list = list(utils.chunks(some_list, chunk_size))
assert chunked_list == expected
def test_get_alignments():
attention_matrix = np.asarray([[0.1, 0.4, 0.5],
[0.2, 0.8, 0.0],
[0.4, 0.4, 0.2]])
test_cases = [(0.5, [(1, 1)]),
(0.8, []),
(0.1, [(0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 2)])]
for threshold, expected_alignment in test_cases:
alignment = list(utils.get_alignments(attention_matrix, threshold=threshold))
assert alignment == expected_alignment
device_params = [([-4, 3, 5], 6, [0, 1, 2, 3, 4, 5]),
([-2, 3, -2, 5], 6, [0, 1, 2, 3, 4, 5]),
([-1], 1, [0]),
([1], 1, [1])]
@pytest.mark.parametrize("requested_device_ids, num_gpus_available, expected", device_params)
def test_expand_requested_device_ids(requested_device_ids, num_gpus_available, expected):
assert set(utils._expand_requested_device_ids(requested_device_ids, num_gpus_available)) == set(expected)
@pytest.mark.parametrize("requested_device_ids, num_gpus_available, expected", device_params)
def test_aquire_gpus(tmpdir, requested_device_ids, num_gpus_available, expected):
with utils.acquire_gpus(requested_device_ids, lock_dir=str(tmpdir),
num_gpus_available=num_gpus_available) as acquired_gpus:
assert set(acquired_gpus) == set(expected)
# We expect the following settings to raise a ValueError
device_params_expected_exception = [
# requesting the same gpu twice
([-4, 3, 3, 5], 5),
# too few GPUs available
([-4, 3, 5], 5),
([3, 5], 1),
([-2], 1),
([-1, -1], 1)]
@pytest.mark.parametrize("requested_device_ids, num_gpus_available", device_params_expected_exception)
def test_expand_requested_device_ids_exception(requested_device_ids, num_gpus_available):
with pytest.raises(ValueError):
utils._expand_requested_device_ids(requested_device_ids, num_gpus_available)
@pytest.mark.parametrize("requested_device_ids, num_gpus_available", device_params_expected_exception)
def test_aquire_gpus_exception(tmpdir, requested_device_ids, num_gpus_available):
with pytest.raises(ValueError):
with utils.acquire_gpus(requested_device_ids, lock_dir=str(tmpdir),
num_gpus_available=num_gpus_available) as _:
pass
# Let's assume GPU 1 is locked already
device_params_1_locked = [([-4, 3, 5], 7, [0, 2, 3, 4, 5, 6]),
([-2, 3, -2, 5], 7, [0, 2, 3, 4, 5, 6])]
@pytest.mark.parametrize("requested_device_ids, num_gpus_available, expected", device_params_1_locked)
def test_aquire_gpus_1_locked(tmpdir, requested_device_ids, num_gpus_available, expected):
gpu_1 = 1
with utils.GpuFileLock([gpu_1], str(tmpdir)) as lock:
with utils.acquire_gpus(requested_device_ids, lock_dir=str(tmpdir),
num_gpus_available=num_gpus_available) as acquired_gpus:
assert set(acquired_gpus) == set(expected)
def test_acquire_gpus_exception_propagation(tmpdir):
raised_exception = RuntimeError("This exception should be propagated properly.")
caught_exception = None
try:
with utils.acquire_gpus([-1, 4, -1], lock_dir=str(tmpdir), num_gpus_available=12) as _:
raise raised_exception
except Exception as e:
caught_exception = e
assert caught_exception is raised_exception
def test_gpu_file_lock_cleanup(tmpdir):
gpu_id = 0
candidates = [gpu_id]
# Test that the lock files get created and clean up
with utils.GpuFileLock(candidates, str(tmpdir)) as lock:
assert lock == gpu_id
assert tmpdir.join("sockeye.gpu0.lock").check(), "Lock file did not exist."
assert not tmpdir.join("sockeye.gpu1.lock").check(), "Unrelated lock file did exist"
assert not tmpdir.join("sockeye.gpu0.lock").check(), "Lock file was not cleaned up."
def test_gpu_file_lock_exception_propagation(tmpdir):
gpu_ids = [0]
# Test that exceptions are properly propagated
raised_exception = RuntimeError("This exception should be propagated properly.")
caught_exception = None
try:
with utils.GpuFileLock(gpu_ids, str(tmpdir)) as lock:
raise raised_exception
except Exception as e:
caught_exception = e
assert caught_exception is raised_exception
def test_gpu_file_lock_locking(tmpdir):
# the second time we try to acquire a lock for the same device we should not succeed
gpu_id = 0
candidates = [gpu_id]
with utils.GpuFileLock(candidates, str(tmpdir)) as lock_inner:
assert lock_inner == 0
with utils.GpuFileLock(candidates, str(tmpdir)) as lock_outer:
assert lock_outer is None
def test_gpu_file_lock_permission_exception(tmpdir):
with pytest.raises(PermissionError):
tmpdir = tmpdir.mkdir("sub")
# remove permissions
tmpdir.chmod(0)
with utils.GpuFileLock([0], str(tmpdir)) as lock:
assert False, "We expect to raise an exception when aquiring the lock and never reach this code."
def test_check_condition_true():
utils.check_condition(1 == 1, "Nice")
def test_check_condition_false():
with pytest.raises(utils.SockeyeError) as e:
utils.check_condition(1 == 2, "Wrong")
assert "Wrong" == str(e.value)
@pytest.mark.parametrize("version_string,expected_version", [("1.0.3", ("1", "0", "3")),
("1.0.2.3", ("1", "0", "2.3"))])
def test_parse_version(version_string, expected_version):
assert expected_version == utils.parse_version(version_string)
def test_check_version_disregards_minor():
release, major, minor = utils.parse_version(__version__)
other_minor_version = "%s.%s.%d" % (release, major, int(minor) + 1)
utils.check_version(other_minor_version)
def _get_later_major_version():
release, major, minor = utils.parse_version(__version__)
return "%s.%d.%s" % (release, int(major) + 1, minor)
def test_check_version_checks_major():
version = _get_later_major_version()
with pytest.raises(utils.SockeyeError) as e:
utils.check_version(version)
assert "Given major version (%s) does not match major code version (%s)" % (version, __version__) == str(e.value)
@pytest.mark.parametrize("samples,expected_mean, expected_variance",
[
([1, 2], 1.5, 0.25),
([4., 100., 12., -3, 1000, 1., -200], 130.57142857142858, 132975.38775510204),
])
def test_online_mean_and_variance(samples, expected_mean, expected_variance):
mean_and_variance = utils.OnlineMeanAndVariance()
for sample in samples:
mean_and_variance.update(sample)
assert np.isclose(mean_and_variance.mean, expected_mean)
assert np.isclose(mean_and_variance.variance, expected_variance)
@pytest.mark.parametrize("samples,expected_mean",
[
([], 0.),
([5.], 5.),
])
def test_online_mean_and_variance_nan(samples, expected_mean):
mean_and_variance = utils.OnlineMeanAndVariance()
for sample in samples:
mean_and_variance.update(sample)
assert np.isclose(mean_and_variance.mean, expected_mean)
assert math.isnan(mean_and_variance.variance)
get_tokens_tests = [("this is a line \n", ["this", "is", "a", "line"]),
(" a \tb \r \n", ["a", "b"])]
@pytest.mark.parametrize("line, expected_tokens", get_tokens_tests)
def test_get_tokens(line, expected_tokens):
tokens = list(utils.get_tokens(line))
assert tokens == expected_tokens
def test_average_arrays():
n = 4
shape = (12, 14)
arrays = [np.random.uniform(0, 1, (12, 14)) for _ in range(n)]
expected_average = np.zeros(shape)
for array in arrays:
expected_average += array
expected_average /= 4
mx_arrays = [mx.nd.array(a) for a in arrays]
assert np.isclose(utils.average_arrays(mx_arrays).asnumpy(), expected_average).all()
with pytest.raises(utils.SockeyeError) as e:
other_shape = (12, 13)
utils.average_arrays(mx_arrays + [mx.nd.zeros(other_shape)])
assert "nd array shapes do not match" == str(e.value)
def test_save_and_load_params():
array = mx.nd.uniform(0, 1, (10, 12))
arg_params = {"array": array}
aux_params = {"array": array}
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "params")
utils.save_params(arg_params, path, aux_params=aux_params)
params = mx.nd.load(path)
assert len(params.keys()) == 2
assert "arg:array" in params.keys()
assert "aux:array" in params.keys()
loaded_arg_params, loaded_aux_params = utils.load_params(path)
assert "array" in loaded_arg_params
assert "array" in loaded_aux_params
assert np.isclose(loaded_arg_params['array'].asnumpy(), array.asnumpy()).all()
assert np.isclose(loaded_aux_params['array'].asnumpy(), array.asnumpy()).all()
def test_print_value():
data = mx.sym.Variable("data")
weights = mx.sym.Variable("weights")
softmax_label = mx.sym.Variable("softmax_label")
fc = mx.sym.FullyConnected(data=data, num_hidden=128, weight=weights, no_bias=True)
out = mx.sym.SoftmaxOutput(data=fc, label=softmax_label, name="softmax")
fc_print = mx.sym.Custom(op_type="PrintValue", data=fc, print_name="FullyConnected")
out_print = mx.sym.SoftmaxOutput(data=fc_print, label=softmax_label, name="softmax")
data_np = np.random.rand(1, 256)
weights_np = np.random.rand(128, 256)
label_np = np.random.rand(1, 128)
executor_base = out.simple_bind(mx.cpu(), data=(1, 256), softmax_label=(1, 128), weights=(128, 256))
executor_base.arg_dict["data"][:] = data_np
executor_base.arg_dict["weights"][:] = weights_np
executor_base.arg_dict["softmax_label"][:] = label_np
executor_print = out_print.simple_bind(mx.cpu(), data=(1, 256), softmax_label=(1, 128), weights=(128, 256))
executor_print.arg_dict["data"][:] = data_np
executor_print.arg_dict["weights"][:] = weights_np
executor_print.arg_dict["softmax_label"][:] = label_np
output_base = executor_base.forward(is_train=True)[0]
output_print = executor_print.forward(is_train=True)[0]
assert np.isclose(output_base.asnumpy(), output_print.asnumpy()).all()
executor_base.backward()
executor_print.backward()
assert np.isclose(executor_base.grad_arrays[1].asnumpy(), executor_print.grad_arrays[1].asnumpy()).all()
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_vocab.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
import sockeye.constants as C
from sockeye.vocab import build_vocab
test_vocab = [
# Example 1
(["one two three", "one two three"], 3, 1,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "two": 4, "three": 5, "one": 6}),
(["one two three", "one two three"], 3, 2,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "two": 4, "three": 5, "one": 6}),
(["one two three", "one two three"], 2, 2,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "two": 4, "three": 5}),
# Example 2
(["one one two three ", "one two three"], 3, 1,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "one": 4, "two": 5, "three": 6}),
(["one one two three ", "one two three"], 3, 2,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "one": 4, "two": 5, "three": 6}),
(["one one two three ", "one two three"], 3, 3,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "one": 4}),
(["one one two three ", "one two three"], 2, 1,
{"<pad>": 0, "<unk>": 1, "<s>": 2, "</s>": 3, "one": 4, "two": 5}),
]
@pytest.mark.parametrize("data,size,min_count,expected", test_vocab)
def test_build_vocab(data, size, min_count, expected):
vocab = build_vocab(data, size, min_count)
assert vocab == expected
test_constants = [
# Example 1
(["one two three", "one two three"], 3, 1, C.VOCAB_SYMBOLS),
(["one two three", "one two three"], 3, 2, C.VOCAB_SYMBOLS),
(["one two three", "one two three"], 2, 2, C.VOCAB_SYMBOLS),
# Example 2
(["one one two three ", "one two three"], 3, 1, C.VOCAB_SYMBOLS),
(["one one two three ", "one two three"], 3, 2, C.VOCAB_SYMBOLS),
(["one one two three ", "one two three"], 3, 3, C.VOCAB_SYMBOLS),
(["one one two three ", "one two three"], 2, 1, C.VOCAB_SYMBOLS),
]
@pytest.mark.parametrize("data,size,min_count,constants", test_constants)
def test_constants_in_vocab(data, size, min_count, constants):
vocab = build_vocab(data, size, min_count)
for const in constants:
assert const in vocab
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/tutorials/seqcopy/genseqcopy.py | import random
random.seed(12)
num_samples = 100000
num_dev = 1000
min_seq_len = 10
max_seq_len = 30
vocab_size = 10
samples = set()
for i in range(0, num_samples):
seq_len = random.randint(min_seq_len, max_seq_len)
samples.add(" ".join(str(random.randint(0, vocab_size)) for j in range(0, seq_len)))
samples = list(samples)
train_samples = samples[:num_samples-num_dev]
dev_samples = samples[num_samples-num_dev:]
with open("train.source", "w") as source, open("train.target", "w") as target:
for sample in train_samples:
source.write(sample + "\n")
target.write(sample + "\n")
with open("dev.source", "w") as source, open("dev.target", "w") as target:
for sample in dev_samples:
source.write(sample + "\n")
target.write(sample + "\n")
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/data_utils.py | # Modification of https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/rnn/translate/data_utils.py
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for downloading data from WMT, tokenizing, vocabularies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import gzip
import time
import tarfile
from tqdm import *
from glob import glob
from collections import defaultdict
from gensim import corpora
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
from tensorflow.python.platform import gfile
# Regular expressions used to tokenize.
_WORD_SPLIT = re.compile("([.,!?\"':;)(])")
_DIGIT_RE = re.compile(r"(^| )\d+")
_ENTITY = "@entity"
_BAR = "_BAR"
_UNK = "_UNK"
BAR_ID = 0
UNK_ID = 1
_START_VOCAB = [_BAR, _UNK]
tokenizer = RegexpTokenizer(r'@?\w+')
cachedStopWords = stopwords.words("english")
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
words = tokenizer.tokenize(sentence)
return [w for w in words if w not in stopwords.words("english")]
def create_vocabulary(vocabulary_path, context, max_vocabulary_size,
tokenizer=None, normalize_digits=True):
"""Create vocabulary file (if it does not exist yet) from data file.
Data file is assumed to contain one sentence per line. Each sentence is
tokenized and digits are normalized (if normalize_digits is set).
Vocabulary contains the most-frequent tokens up to max_vocabulary_size.
We write it to vocabulary_path in a one-token-per-line format, so that later
token in the first line gets id=0, second line gets id=1, and so on.
Args:
vocabulary_path: path where the vocabulary will be created.
data_path: data file that will be used to create vocabulary.
max_vocabulary_size: limit on the size of the created vocabulary.
tokenizer: a function to use to tokenize each data sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
if not gfile.Exists(vocabulary_path):
t0 = time.time()
print("Creating vocabulary %s" % (vocabulary_path))
texts = [word for word in context.lower().split() if word not in cachedStopWords]
dictionary = corpora.Dictionary([texts], prune_at=max_vocabulary_size)
print("Tokenize : %.4fs" % (t0 - time.time()))
dictionary.save(vocabulary_path)
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if gfile.Exists(vocabulary_path):
vocab = corpora.Dictionary.load(vocabulary_path)
return vocab.token2id, vocab.token2id.keys()
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def sentence_to_token_ids(sentence, vocabulary,
tokenizer=None, normalize_digits=True):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: a string, the sentence to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = basic_tokenizer(sentence)
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
# Normalize digits by 0 before looking words up in the vocabulary.
return [vocabulary.get(re.sub(_DIGIT_RE, " ", w), UNK_ID) for w in words]
def data_to_token_ids(data_path, target_path, vocab,
tokenizer=None, normalize_digits=True):
"""Tokenize data file and turn into token-ids using given vocabulary file.
This function loads data line-by-line from data_path, calls the above
sentence_to_token_ids, and saves the result to target_path. See comment
for sentence_to_token_ids on the details of token-ids format.
Args:
data_path: path to the data file in one-sentence-per-line format.
target_path: path where the file with token-ids will be created.
vocabulary_path: path to the vocabulary file.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
"""
#if not gfile.Exists(target_path):
if True:
with gfile.GFile(data_path, mode="r") as data_file:
counter = 0
results = []
for line in data_file:
if counter == 0:
results.append(line)
elif counter == 4:
entity, ans = line.split(":", 1)
try:
results.append("%s:%s" % (vocab[entity[:]], ans))
except:
continue
else:
token_ids = sentence_to_token_ids(line, vocab, tokenizer,
normalize_digits)
results.append(" ".join([str(tok) for tok in token_ids]) + "\n")
if line == "\n":
counter += 1
try:
len_d, len_q = len(results[2].split()), len(results[4].split())
except:
return
with gfile.GFile("%s_%s" % (target_path, len_d + len_q), mode="w") as tokens_file:
tokens_file.writelines(results)
def get_all_context(dir_name, context_fname):
context = ""
for fname in tqdm(glob(os.path.join(dir_name, "*.question"))):
with open(fname) as f:
try:
lines = f.read().split("\n\n")
context += lines[1] + " "
context += lines[4].replace(":"," ") + " "
except:
print(" [!] Error occured for %s" % fname)
print(" [*] Writing %s ..." % context_fname)
with open(context_fname, 'wb') as f:
f.write(context)
return context
def questions_to_token_ids(data_path, vocab_fname, vocab_size):
vocab, _ = initialize_vocabulary(vocab_fname)
for fname in tqdm(glob(os.path.join(data_path, "*.question"))):
data_to_token_ids(fname, fname + ".ids%s" % vocab_size, vocab)
def prepare_data(data_dir, dataset_name, vocab_size):
train_path = os.path.join(data_dir, dataset_name, 'questions', 'training')
context_fname = os.path.join(data_dir, dataset_name, '%s.context' % dataset_name)
vocab_fname = os.path.join(data_dir, dataset_name, '%s.vocab%s' % (dataset_name, vocab_size))
if not os.path.exists(context_fname):
print(" [*] Combining all contexts for %s in %s ..." % (dataset_name, train_path))
context = get_all_context(train_path, context_fname)
else:
context = gfile.GFile(context_fname, mode="r").read()
print(" [*] Skip combining all contexts")
if not os.path.exists(vocab_fname):
print(" [*] Create vocab from %s to %s ..." % (context_fname, vocab_fname))
create_vocabulary(vocab_fname, context, vocab_size)
else:
print(" [*] Skip creating vocab")
print(" [*] Convert data in %s into vocab indicies..." % (train_path))
questions_to_token_ids(train_path, vocab_fname, vocab_size)
def load_vocab(data_dir, dataset_name, vocab_size):
vocab_fname = os.path.join(data_dir, dataset_name, "%s.vocab%s" % (dataset_name, vocab_size))
print(" [*] Loading vocab from %s ..." % vocab_fname)
return initialize_vocabulary(vocab_fname)
def load_dataset(data_dir, dataset_name, vocab_size):
train_files = glob(os.path.join(data_dir, dataset_name, "questions",
"training", "*.question.ids%s_*" % (vocab_size)))
max_idx = len(train_files)
for idx, fname in enumerate(train_files):
with open(fname) as f:
yield f.read().split("\n\n"), idx, max_idx
if __name__ == '__main__':
if len(sys.argv) < 3:
print(" [*] usage: python data_utils.py DATA_DIR DATASET_NAME VOCAB_SIZE")
else:
data_dir = sys.argv[1]
dataset_name = sys.argv[2]
if len(sys.argv) > 3:
vocab_size = sys.argv[3]
else:
vocab_size = 100000
prepare_data(data_dir, dataset_name, int(vocab_size))
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/main.py | import os
import numpy as np
import tensorflow as tf
from model import DeepLSTM, DeepBiLSTM, AttentiveReader
from utils import pp
flags = tf.app.flags
flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
flags.DEFINE_integer("vocab_size", 10000, "The size of vocabulary [10000]")
flags.DEFINE_integer("batch_size", 32, "The size of batch images [32]")
flags.DEFINE_float("learning_rate", 5e-5, "Learning rate [0.00005]")
flags.DEFINE_float("momentum", 0.9, "Momentum of RMSProp [0.9]")
flags.DEFINE_float("decay", 0.95, "Decay of RMSProp [0.95]")
flags.DEFINE_string("model", "LSTM", "The type of model to train and test [LSTM, BiLSTM, Attentive, Impatient]")
flags.DEFINE_string("data_dir", "data", "The name of data directory [data]")
flags.DEFINE_string("dataset", "cnn", "The name of dataset [cnn, dailymail]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_boolean("forward_only", False, "True for forward only, False for training [False]")
FLAGS = flags.FLAGS
model_dict = {
'LSTM': DeepLSTM,
'BiLSTM': DeepBiLSTM,
'Attentive': AttentiveReader,
'Impatient': None,
}
def main(_):
pp.pprint(flags.FLAGS.__flags)
if not os.path.exists(FLAGS.checkpoint_dir):
print(" [*] Creating checkpoint directory...")
os.makedirs(FLAGS.checkpoint_dir)
with tf.device('/cpu:0'), tf.Session() as sess:
model = model_dict[FLAGS.model](batch_size=FLAGS.batch_size,
checkpoint_dir=FLAGS.checkpoint_dir, forward_only=FLAGS.forward_only)
if not FLAGS.forward_only:
model.train(sess, FLAGS.vocab_size, FLAGS.epoch,
FLAGS.learning_rate, FLAGS.momentum, FLAGS.decay,
FLAGS.data_dir, FLAGS.dataset)
else:
model.load(sess, FLAGS.checkpoint_dir, FLAGS.dataset)
if __name__ == '__main__':
tf.app.run()
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/model/__init__.py | from base_model import Model
from deep_lstm import DeepLSTM
from deep_bi_lstm import DeepBiLSTM
from attentive import AttentiveReader
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/model/attentive.py | import tensorflow as tf
import tensorflow as tf
from tensorflow.models.rnn import rnn, rnn_cell
from base_model import Model
class AttentiveReader(Model):
"""Attentive Reader."""
def __init__(self, vocab_size, size=256,
learning_rate=1e-4, batch_size=32,
dropout=0.1, max_time_unit=100):
"""Initialize the parameters for an Attentive Reader model.
Args:
vocab_size: int, The dimensionality of the input vocab
size: int, The dimensionality of the inputs into the Deep LSTM cell [32, 64, 256]
learning_rate: float, [1e-3, 5e-4, 1e-4, 5e-5]
batch_size: int, The size of a batch [16, 32]
dropout: unit Tensor or float between 0 and 1 [0.0, 0.1, 0.2]
max_time_unit: int, The max time unit [100]
"""
super(DeepLSTM, self).__init__()
self.vocab_size = vocab_size
self.size = size
self.learning_rate = learning_rate
self.batch_size = batch_size
self.dropout = dropout
self.max_time_unit = max_time_unit
self.inputs = []
for idx in xrange(max_time_unit):
self.inputs.append(tf.placeholder(tf.float32, [batch_size, vocab_size]))
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/model/base_model.py | import os
from glob import glob
import tensorflow as tf
class Model(object):
"""Abstract object representing an Reader model."""
def __init__(self):
self.vocab = None
self.data = None
def save(self, sess, checkpoint_dir, dataset_name, global_step=None):
self.saver = tf.train.Saver()
print(" [*] Saving checkpoints...")
model_name = type(self).__name__ or "Reader"
if self.batch_size:
model_dir = "%s_%s_%s" % (model_name, dataset_name, self.batch_size)
else:
model_dir = dataset_name
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(sess,
os.path.join(checkpoint_dir, model_name), global_step=global_step)
def load(self, sess, checkpoint_dir, dataset_name):
model_name = type(self).__name__ or "Reader"
self.saver = tf.train.Saver()
print(" [*] Loading checkpoints...")
if self.batch_size:
model_dir = "%s_%s_%s" % (model_name, dataset_name, self.batch_size)
else:
model_dir = dataset_name
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/model/cells.py | import tensorflow as tf
from tensorflow.models.rnn.rnn_cell import RNNCell, linear
class LSTMCell(RNNCell):
"""Almost same with tf.models.rnn.rnn_cell.BasicLSTMCell
except adding c to inputs and h to calculating gates,
adding a skip connection from the input of current time t,
and returning only h not concat of c and h.
"""
def __init__(self, num_units, forget_bias=1.0):
self._num_units = num_units
self._forget_bias = forget_bias
self.c = None
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with tf.variable_scope("BasicLSTMCell"):
h = state
if self.c == None: self.c = tf.reshape(tf.zeros_like(h), [-1, self._num_units])
concat = linear([inputs, h, self.c], 4 * self._num_units, True)
i, j, f, o = tf.split(1, 4, concat)
self.c = self.c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
new_h = tf.tanh(self.c) * tf.sigmoid(o)
softmax_w = tf.get_variable("softmax_w", [self._num_units, self._num_units])
softmax_b = tf.get_variable("softmax_b", [self._num_units])
new_y = tf.nn.xw_plus_b(new_h, softmax_w, softmax_b)
return new_y, new_y
class MultiRNNCellWithSkipConn(RNNCell):
"""Almost same with tf.models.rnn.rnn_cell.MultiRnnCell adding
a skip connection from the input of current time t and using
_num_units not state size because LSTMCell returns only [h] not [c, h].
"""
def __init__(self, cells):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
Raises:
ValueError: if cells is empty (not allowed) or if their sizes don't match.
"""
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
for i in xrange(len(cells) - 1):
if cells[i + 1].input_size != cells[i].output_size:
raise ValueError("In MultiRNNCell, the input size of each next"
" cell must match the output size of the previous one."
" Mismatched output size in cell %d." % i)
self._cells = cells
@property
def input_size(self):
return self._cells[0].input_size
@property
def output_size(self):
return self._cells[-1].output_size
@property
def state_size(self):
return sum([cell.state_size for cell in self._cells])
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
with tf.variable_scope("MultiRNNCellWithConn"):
cur_state_pos = 0
first_layer_input = cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.variable_scope("Cell%d" % i):
cur_state = tf.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
# Add skip connection from the input of current time t.
if i != 0:
first_layer_input = first_layer_input
else:
first_layer_input = tf.zeros_like(first_layer_input)
cur_inp, new_state = cell(tf.concat(1, [inputs, first_layer_input]), cur_state)
new_states.append(new_state)
return cur_inp, tf.concat(1, new_states)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/model/deep_bi_lstm.py | import tensorflow as tf
from tensorflow.models.rnn import rnn, rnn_cell
from base_model import Model
from data_utils import load_vocab, load_dataset
class DeepBiLSTM(Model):
"""Deep Bidirectional LSTM model."""
def __init__(self, vocab_size, size=256, depth=2,
learning_rate=1e-4, batch_size=32,
keep_prob=0.1, num_steps=100,
checkpoint_dir="checkpoint", forward_only=False):
"""Initialize the parameters for an Deep Bidirectional LSTM model.
Args:
vocab_size: int, The dimensionality of the input vocab
size: int, The dimensionality of the inputs into the Deep LSTM cell [32, 64, 256]
learning_rate: float, [1e-3, 5e-4, 1e-4, 5e-5]
batch_size: int, The size of a batch [16, 32]
keep_prob: unit Tensor or float between 0 and 1 [0.0, 0.1, 0.2]
num_steps: int, The max time unit [100]
"""
super(DeepBiLSTM, self).__init__()
self.vocab_size = int(vocab_size)
self.size = int(size)
self.depth = int(depth)
self.learning_rate = float(learning_rate)
self.batch_size = int(batch_size)
self.keep_prob = float(keep_prob)
self.num_steps = int(seq_length)
self.inputs = tf.placeholder(tf.int32, [self.batch_size, self.num_steps])
self.input_lengths = tf.placeholder(tf.int64, [self.batch_size])
with tf.device("/cpu:0"):
self.emb = tf.Variable(tf.truncated_normal(
[self.vocab_size, self.size], -0.1, 0.1), name='emb')
import ipdb; ipdb.set_trace()
self.embed_inputs = tf.nn.embedding_lookup(self.emb, tf.transpose(self.inputs))
self.cell = rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
self.stacked_cell = rnn_cell.MultiRNNCell([self.cell] * depth)
self.initial_state = self.stacked_cell.zero_state(batch_size, tf.float32)
if not forward_only and self.keep_prob < 1:
lstm_cell = rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=keep_prob)
self.outputs, self.states = rnn.rnn(self.stacked_cell,
tf.unpack(self.embed_inputs),
dtype=tf.float32,
sequence_length=self.input_lengths,
initial_state=self.initial_state)
output = tf.reduce_sum(tf.pack(self.output), 0)
def train(self, epoch=25, batch_size=1,
learning_rate=0.0002, momentum=0.9, decay=0.95,
data_dir="data", dataset_name="cnn", vocab_size=1000000):
if not self.vocab:
self.vocab, self.rev_vocab = load_vocab(data_dir, dataset_name, vocab_size)
self.opt = tf.train.RMSPropOptimizer(learning_rate,
decay=decay,
momentum=momentum)
for epoch_idx in xrange(epoch):
data_loader = load_dataset(data_dir, dataset_name, vocab_size)
contexts, questions, answers = [], [], []
for batch_idx in xrange(batch_size):
_, context, question, answer, _ = data_loader.next()
contexts.append(context)
questions.append(question)
answers.append(answers)
#self.model.
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/model/deep_lstm.py | import time
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import rnn, rnn_cell
from utils import array_pad
from base_model import Model
from cells import LSTMCell, MultiRNNCellWithSkipConn
from data_utils import load_vocab, load_dataset
class DeepLSTM(Model):
"""Deep LSTM model."""
def __init__(self, size=256, depth=3, batch_size=32,
keep_prob=0.1, max_nsteps=1000,
checkpoint_dir="checkpoint", forward_only=False):
"""Initialize the parameters for an Deep LSTM model.
Args:
size: int, The dimensionality of the inputs into the Deep LSTM cell [32, 64, 256]
learning_rate: float, [1e-3, 5e-4, 1e-4, 5e-5]
batch_size: int, The size of a batch [16, 32]
keep_prob: unit Tensor or float between 0 and 1 [0.0, 0.1, 0.2]
max_nsteps: int, The max time unit [1000]
"""
super(DeepLSTM, self).__init__()
self.size = int(size)
self.depth = int(depth)
self.batch_size = int(batch_size)
self.output_size = self.depth * self.size
self.keep_prob = float(keep_prob)
self.max_nsteps = int(max_nsteps)
self.checkpoint_dir = checkpoint_dir
start = time.clock()
print(" [*] Building Deep LSTM...")
self.cell = LSTMCell(size, forget_bias=0.0)
if not forward_only and self.keep_prob < 1:
self.cell = rnn_cell.DropoutWrapper(self.cell, output_keep_prob=keep_prob)
self.stacked_cell = MultiRNNCellWithSkipConn([self.cell] * depth)
self.initial_state = self.stacked_cell.zero_state(batch_size, tf.float32)
def prepare_model(self, data_dir, dataset_name, vocab_size):
if not self.vocab:
self.vocab, self.rev_vocab = load_vocab(data_dir, dataset_name, vocab_size)
print(" [*] Loading vocab finished.")
self.vocab_size = len(self.vocab)
self.emb = tf.get_variable("emb", [self.vocab_size, self.size])
# inputs
self.inputs = tf.placeholder(tf.int32, [self.batch_size, self.max_nsteps])
embed_inputs = tf.nn.embedding_lookup(self.emb, tf.transpose(self.inputs))
tf.histogram_summary("embed", self.emb)
# output states
_, states = rnn.rnn(self.stacked_cell,
tf.unpack(embed_inputs),
dtype=tf.float32,
initial_state=self.initial_state)
self.batch_states = tf.pack(states)
self.nstarts = tf.placeholder(tf.int32, [self.batch_size, 3])
outputs = tf.pack([tf.slice(self.batch_states, nstarts, [1, 1, self.output_size])
for idx, nstarts in enumerate(tf.unpack(self.nstarts))])
self.outputs = tf.reshape(outputs, [self.batch_size, self.output_size])
self.W = tf.get_variable("W", [self.vocab_size, self.output_size])
tf.histogram_summary("weights", self.W)
tf.histogram_summary("output", outputs)
self.y = tf.placeholder(tf.float32, [self.batch_size, self.vocab_size])
self.y_ = tf.matmul(self.outputs, self.W, transpose_b=True)
self.loss = tf.nn.softmax_cross_entropy_with_logits(self.y_, self.y)
tf.scalar_summary("loss", tf.reduce_mean(self.loss))
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
tf.scalar_summary("accuracy", self.accuracy)
print(" [*] Preparing model finished.")
def train(self, sess, vocab_size, epoch=25, learning_rate=0.0002,
momentum=0.9, decay=0.95, data_dir="data", dataset_name="cnn"):
self.prepare_model(data_dir, dataset_name, vocab_size)
start = time.clock()
print(" [*] Calculating gradient and loss...")
self.optim = tf.train.AdamOptimizer(learning_rate, 0.9).minimize(self.loss)
print(" [*] Calculating gradient and loss finished. Take %.2fs" % (time.clock() - start))
# Could not use RMSPropOptimizer because the sparse update of RMSPropOptimizer
# is not implemented yet (2016.01.24).
# self.optim = tf.train.RMSPropOptimizer(learning_rate,
# decay=decay,
# momentum=momentum).minimize(self.loss)
sess.run(tf.initialize_all_variables())
if self.load(sess, self.checkpoint_dir, dataset_name):
print(" [*] Deep LSTM checkpoint is loaded.")
else:
print(" [*] There is no checkpoint for this model.")
y = np.zeros([self.batch_size, self.vocab_size])
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("/tmp/deep", sess.graph_def)
counter = 0
start_time = time.time()
for epoch_idx in xrange(epoch):
data_loader = load_dataset(data_dir, dataset_name, vocab_size)
batch_stop = False
while True:
y.fill(0)
inputs, nstarts, answers = [], [], []
batch_idx = 0
while True:
try:
(_, document, question, answer, _), data_idx, data_max_idx = data_loader.next()
except StopIteration:
batch_stop = True
break
# [0] means splitter between d and q
data = [int(d) for d in document.split()] + [0] + \
[int(q) for q in question.split() for q in question.split()]
if len(data) > self.max_nsteps:
continue
inputs.append(data)
nstarts.append(len(inputs[-1]) - 1)
y[batch_idx][int(answer)] = 1
batch_idx += 1
if batch_idx == self.batch_size: break
if batch_stop: break
FORCE=False
if FORCE:
inputs = array_pad(inputs, self.max_nsteps, pad=-1, force=FORCE)
nstarts = np.where(inputs==-1)[1]
inputs[inputs==-1]=0
else:
inputs = array_pad(inputs, self.max_nsteps, pad=0)
nstarts = [[nstart, idx, 0] for idx, nstart in enumerate(nstarts)]
_, summary_str, cost, accuracy = sess.run([self.optim, merged, self.loss, self.accuracy],
feed_dict={self.inputs: inputs,
self.nstarts: nstarts,
self.y: y})
if counter % 10 == 0:
writer.add_summary(summary_str, counter)
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.8f, accuracy: %.8f" \
% (epoch_idx, data_idx, data_max_idx, time.time() - start_time, np.mean(cost), accuracy))
counter += 1
self.save(sess, self.checkpoint_dir, dataset_name)
def test(self, voab_size):
self.prepare_model(data_dir, dataset_name, vocab_size)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:机器阅读理解里程碑/code/attentive-reader-tensorflow-master/utils.py | import pprint
import numpy as np
pp = pprint.PrettyPrinter()
def array_pad(array, width, pad=-1, force=False):
max_length = max(map(len, array))
if max_length > width and force != True:
raise Exception(" [!] Max length of array %s is bigger than given %s" % (max_length, width))
result = np.full([len(array), width], pad, dtype=np.int64)
for i, row in enumerate(array):
for j, val in enumerate(row[:width-1]):
result[i][j] = val
return result
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:谷歌神经翻译模型/attention_is_all_you_need-master/net.py | # encoding: utf-8
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
from chainer import reporter
from train import source_pad_concat_convert
# linear_init = chainer.initializers.GlorotNormal()
linear_init = chainer.initializers.LeCunUniform()
def sentence_block_embed(embed, x):
""" Change implicitly embed_id function's target to ndim=2
Apply embed_id for array of ndim 2,
shape (batchsize, sentence_length),
instead for array of ndim 1.
"""
batch, length = x.shape
_, units = embed.W.shape
e = embed(x.reshape((batch * length, )))
assert(e.shape == (batch * length, units))
e = F.transpose(F.stack(F.split_axis(e, batch, axis=0), axis=0), (0, 2, 1))
assert(e.shape == (batch, units, length))
return e
def seq_func(func, x, reconstruct_shape=True):
""" Change implicitly function's target to ndim=3
Apply a given function for array of ndim 3,
shape (batchsize, dimension, sentence_length),
instead for array of ndim 2.
"""
batch, units, length = x.shape
e = F.transpose(x, (0, 2, 1)).reshape(batch * length, units)
e = func(e)
if not reconstruct_shape:
return e
out_units = e.shape[1]
e = F.transpose(e.reshape((batch, length, out_units)), (0, 2, 1))
assert(e.shape == (batch, out_units, length))
return e
class LayerNormalizationSentence(L.LayerNormalization):
""" Position-wise Linear Layer for Sentence Block
Position-wise layer-normalization layer for array of shape
(batchsize, dimension, sentence_length).
"""
def __init__(self, *args, **kwargs):
super(LayerNormalizationSentence, self).__init__(*args, **kwargs)
def __call__(self, x):
y = seq_func(super(LayerNormalizationSentence, self).__call__, x)
return y
class ConvolutionSentence(L.Convolution2D):
""" Position-wise Linear Layer for Sentence Block
Position-wise linear layer for array of shape
(batchsize, dimension, sentence_length)
can be implemented a convolution layer.
"""
def __init__(self, in_channels, out_channels,
ksize=1, stride=1, pad=0, nobias=False,
initialW=None, initial_bias=None):
super(ConvolutionSentence, self).__init__(
in_channels, out_channels,
ksize, stride, pad, nobias,
initialW, initial_bias)
def __call__(self, x):
"""Applies the linear layer.
Args:
x (~chainer.Variable): Batch of input vector block. Its shape is
(batchsize, in_channels, sentence_length).
Returns:
~chainer.Variable: Output of the linear layer. Its shape is
(batchsize, out_channels, sentence_length).
"""
x = F.expand_dims(x, axis=3)
y = super(ConvolutionSentence, self).__call__(x)
y = F.squeeze(y, axis=3)
return y
class MultiHeadAttention(chainer.Chain):
""" Multi Head Attention Layer for Sentence Blocks
For batch computation efficiency, dot product to calculate query-key
scores is performed all heads together.
"""
def __init__(self, n_units, h=8, dropout=0.1, self_attention=True):
super(MultiHeadAttention, self).__init__()
with self.init_scope():
if self_attention:
self.W_QKV = ConvolutionSentence(
n_units, n_units * 3, nobias=True,
initialW=linear_init)
else:
self.W_Q = ConvolutionSentence(
n_units, n_units, nobias=True,
initialW=linear_init)
self.W_KV = ConvolutionSentence(
n_units, n_units * 2, nobias=True,
initialW=linear_init)
self.finishing_linear_layer = ConvolutionSentence(
n_units, n_units, nobias=True,
initialW=linear_init)
self.h = h
self.scale_score = 1. / (n_units // h) ** 0.5
self.dropout = dropout
self.is_self_attention = self_attention
def __call__(self, x, z=None, mask=None):
xp = self.xp
h = self.h
if self.is_self_attention:
Q, K, V = F.split_axis(self.W_QKV(x), 3, axis=1)
else:
Q = self.W_Q(x)
K, V = F.split_axis(self.W_KV(z), 2, axis=1)
batch, n_units, n_querys = Q.shape
_, _, n_keys = K.shape
# Calculate Attention Scores with Mask for Zero-padded Areas
# Perform Multi-head Attention using pseudo batching
# all together at once for efficiency
batch_Q = F.concat(F.split_axis(Q, h, axis=1), axis=0)
batch_K = F.concat(F.split_axis(K, h, axis=1), axis=0)
batch_V = F.concat(F.split_axis(V, h, axis=1), axis=0)
assert(batch_Q.shape == (batch * h, n_units // h, n_querys))
assert(batch_K.shape == (batch * h, n_units // h, n_keys))
assert(batch_V.shape == (batch * h, n_units // h, n_keys))
mask = xp.concatenate([mask] * h, axis=0)
batch_A = F.batch_matmul(batch_Q, batch_K, transa=True) \
* self.scale_score
batch_A = F.where(mask, batch_A, xp.full(batch_A.shape, -np.inf, 'f'))
batch_A = F.softmax(batch_A, axis=2)
batch_A = F.where(
xp.isnan(batch_A.data), xp.zeros(batch_A.shape, 'f'), batch_A)
assert(batch_A.shape == (batch * h, n_querys, n_keys))
# Calculate Weighted Sum
batch_A, batch_V = F.broadcast(
batch_A[:, None], batch_V[:, :, None])
batch_C = F.sum(batch_A * batch_V, axis=3)
assert(batch_C.shape == (batch * h, n_units // h, n_querys))
C = F.concat(F.split_axis(batch_C, h, axis=0), axis=1)
assert(C.shape == (batch, n_units, n_querys))
C = self.finishing_linear_layer(C)
return C
class FeedForwardLayer(chainer.Chain):
def __init__(self, n_units):
super(FeedForwardLayer, self).__init__()
n_inner_units = n_units * 4
with self.init_scope():
self.W_1 = ConvolutionSentence(n_units, n_inner_units,
initialW=linear_init)
self.W_2 = ConvolutionSentence(n_inner_units, n_units,
initialW=linear_init)
# self.act = F.relu
self.act = F.leaky_relu
def __call__(self, e):
e = self.W_1(e)
e = self.act(e)
e = self.W_2(e)
return e
class EncoderLayer(chainer.Chain):
def __init__(self, n_units, h=8, dropout=0.1):
super(EncoderLayer, self).__init__()
with self.init_scope():
self.self_attention = MultiHeadAttention(n_units, h)
self.feed_forward = FeedForwardLayer(n_units)
self.ln_1 = LayerNormalizationSentence(n_units, eps=1e-6)
self.ln_2 = LayerNormalizationSentence(n_units, eps=1e-6)
self.dropout = dropout
def __call__(self, e, xx_mask):
sub = self.self_attention(e, e, xx_mask)
e = e + F.dropout(sub, self.dropout)
e = self.ln_1(e)
sub = self.feed_forward(e)
e = e + F.dropout(sub, self.dropout)
e = self.ln_2(e)
return e
class DecoderLayer(chainer.Chain):
def __init__(self, n_units, h=8, dropout=0.1):
super(DecoderLayer, self).__init__()
with self.init_scope():
self.self_attention = MultiHeadAttention(n_units, h)
self.source_attention = MultiHeadAttention(
n_units, h, self_attention=False)
self.feed_forward = FeedForwardLayer(n_units)
self.ln_1 = LayerNormalizationSentence(n_units, eps=1e-6)
self.ln_2 = LayerNormalizationSentence(n_units, eps=1e-6)
self.ln_3 = LayerNormalizationSentence(n_units, eps=1e-6)
self.dropout = dropout
def __call__(self, e, s, xy_mask, yy_mask):
sub = self.self_attention(e, e, yy_mask)
e = e + F.dropout(sub, self.dropout)
e = self.ln_1(e)
sub = self.source_attention(e, s, xy_mask)
e = e + F.dropout(sub, self.dropout)
e = self.ln_2(e)
sub = self.feed_forward(e)
e = e + F.dropout(sub, self.dropout)
e = self.ln_3(e)
return e
class Encoder(chainer.Chain):
def __init__(self, n_layers, n_units, h=8, dropout=0.1):
super(Encoder, self).__init__()
self.layer_names = []
for i in range(1, n_layers + 1):
name = 'l{}'.format(i)
layer = EncoderLayer(n_units, h, dropout)
self.add_link(name, layer)
self.layer_names.append(name)
def __call__(self, e, xx_mask):
for name in self.layer_names:
e = getattr(self, name)(e, xx_mask)
return e
class Decoder(chainer.Chain):
def __init__(self, n_layers, n_units, h=8, dropout=0.1):
super(Decoder, self).__init__()
self.layer_names = []
for i in range(1, n_layers + 1):
name = 'l{}'.format(i)
layer = DecoderLayer(n_units, h, dropout)
self.add_link(name, layer)
self.layer_names.append(name)
def __call__(self, e, source, xy_mask, yy_mask):
for name in self.layer_names:
e = getattr(self, name)(e, source, xy_mask, yy_mask)
return e
class Transformer(chainer.Chain):
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units,
h=8, dropout=0.1, max_length=500,
use_label_smoothing=False,
embed_position=False):
super(Transformer, self).__init__()
with self.init_scope():
self.embed_x = L.EmbedID(n_source_vocab, n_units, ignore_label=-1,
initialW=linear_init)
self.embed_y = L.EmbedID(n_target_vocab, n_units, ignore_label=-1,
initialW=linear_init)
self.encoder = Encoder(n_layers, n_units, h, dropout)
self.decoder = Decoder(n_layers, n_units, h, dropout)
if embed_position:
self.embed_pos = L.EmbedID(max_length, n_units,
ignore_label=-1)
self.n_layers = n_layers
self.n_units = n_units
self.n_target_vocab = n_target_vocab
self.dropout = dropout
self.use_label_smoothing = use_label_smoothing
self.initialize_position_encoding(max_length, n_units)
self.scale_emb = self.n_units ** 0.5
def initialize_position_encoding(self, length, n_units):
xp = self.xp
"""
# Implementation described in the paper
start = 1 # index starts from 1 or 0
posi_block = xp.arange(
start, length + start, dtype='f')[None, None, :]
unit_block = xp.arange(
start, n_units // 2 + start, dtype='f')[None, :, None]
rad_block = posi_block / 10000. ** (unit_block / (n_units // 2))
sin_block = xp.sin(rad_block)
cos_block = xp.cos(rad_block)
self.position_encoding_block = xp.empty((1, n_units, length), 'f')
self.position_encoding_block[:, ::2, :] = sin_block
self.position_encoding_block[:, 1::2, :] = cos_block
"""
# Implementation in the Google tensor2tensor repo
channels = n_units
position = xp.arange(length, dtype='f')
num_timescales = channels // 2
log_timescale_increment = (
xp.log(10000. / 1.) /
(float(num_timescales) - 1))
inv_timescales = 1. * xp.exp(
xp.arange(num_timescales).astype('f') * -log_timescale_increment)
scaled_time = \
xp.expand_dims(position, 1) * \
xp.expand_dims(inv_timescales, 0)
signal = xp.concatenate(
[xp.sin(scaled_time), xp.cos(scaled_time)], axis=1)
signal = xp.reshape(signal, [1, length, channels])
self.position_encoding_block = xp.transpose(signal, (0, 2, 1))
def make_input_embedding(self, embed, block):
batch, length = block.shape
emb_block = sentence_block_embed(embed, block) * self.scale_emb
emb_block += self.xp.array(self.position_encoding_block[:, :, :length])
if hasattr(self, 'embed_pos'):
emb_block += sentence_block_embed(
self.embed_pos,
self.xp.broadcast_to(
self.xp.arange(length).astype('i')[None, :], block.shape))
emb_block = F.dropout(emb_block, self.dropout)
return emb_block
def make_attention_mask(self, source_block, target_block):
mask = (target_block[:, None, :] >= 0) * \
(source_block[:, :, None] >= 0)
# (batch, source_length, target_length)
return mask
def make_history_mask(self, block):
batch, length = block.shape
arange = self.xp.arange(length)
history_mask = (arange[None, ] <= arange[:, None])[None, ]
history_mask = self.xp.broadcast_to(
history_mask, (batch, length, length))
return history_mask
def output(self, h):
return F.linear(h, self.embed_y.W)
def output_and_loss(self, h_block, t_block):
batch, units, length = h_block.shape
# Output (all together at once for efficiency)
concat_logit_block = seq_func(self.output, h_block,
reconstruct_shape=False)
rebatch, _ = concat_logit_block.shape
# Make target
concat_t_block = t_block.reshape((rebatch))
ignore_mask = (concat_t_block >= 0)
n_token = ignore_mask.sum()
normalizer = n_token # n_token or batch or 1
# normalizer = 1
if not self.use_label_smoothing:
loss = F.softmax_cross_entropy(concat_logit_block, concat_t_block)
loss = loss * n_token / normalizer
else:
log_prob = F.log_softmax(concat_logit_block)
broad_ignore_mask = self.xp.broadcast_to(
ignore_mask[:, None],
concat_logit_block.shape)
pre_loss = ignore_mask * \
log_prob[self.xp.arange(rebatch), concat_t_block]
loss = - F.sum(pre_loss) / normalizer
accuracy = F.accuracy(
concat_logit_block, concat_t_block, ignore_label=-1)
perp = self.xp.exp(loss.data * normalizer / n_token)
# Report the Values
reporter.report({'loss': loss.data * normalizer / n_token,
'acc': accuracy.data,
'perp': perp}, self)
if self.use_label_smoothing:
label_smoothing = broad_ignore_mask * \
- 1. / self.n_target_vocab * log_prob
label_smoothing = F.sum(label_smoothing) / normalizer
loss = 0.9 * loss + 0.1 * label_smoothing
return loss
def __call__(self, x_block, y_in_block, y_out_block, get_prediction=False):
batch, x_length = x_block.shape
batch, y_length = y_in_block.shape
# Make Embedding
ex_block = self.make_input_embedding(self.embed_x, x_block)
ey_block = self.make_input_embedding(self.embed_y, y_in_block)
# Make Masks
xx_mask = self.make_attention_mask(x_block, x_block)
xy_mask = self.make_attention_mask(y_in_block, x_block)
yy_mask = self.make_attention_mask(y_in_block, y_in_block)
yy_mask *= self.make_history_mask(y_in_block)
# Encode Sources
z_blocks = self.encoder(ex_block, xx_mask)
# [(batch, n_units, x_length), ...]
# Encode Targets with Sources (Decode without Output)
h_block = self.decoder(ey_block, z_blocks, xy_mask, yy_mask)
# (batch, n_units, y_length)
if get_prediction:
return self.output(h_block[:, :, -1])
else:
return self.output_and_loss(h_block, y_out_block)
def translate(self, x_block, max_length=50, beam=5):
if beam:
return self.translate_beam(x_block, max_length, beam)
# TODO: efficient inference by re-using result
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
x_block = source_pad_concat_convert(
x_block, device=None)
batch, x_length = x_block.shape
# y_block = self.xp.zeros((batch, 1), dtype=x_block.dtype)
y_block = self.xp.full(
(batch, 1), 2, dtype=x_block.dtype) # bos
eos_flags = self.xp.zeros((batch, ), dtype=x_block.dtype)
result = []
for i in range(max_length):
log_prob_tail = self(x_block, y_block, y_block,
get_prediction=True)
ys = self.xp.argmax(log_prob_tail.data, axis=1).astype('i')
result.append(ys)
y_block = F.concat([y_block, ys[:, None]], axis=1).data
eos_flags += (ys == 0)
if self.xp.all(eos_flags):
break
result = cuda.to_cpu(self.xp.stack(result).T)
# Remove EOS taggs
outs = []
for y in result:
inds = np.argwhere(y == 0)
if len(inds) > 0:
y = y[:inds[0, 0]]
if len(y) == 0:
y = np.array([1], 'i')
outs.append(y)
return outs
def translate_beam(self, x_block, max_length=50, beam=5):
# TODO: efficient inference by re-using result
# TODO: batch processing
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
x_block = source_pad_concat_convert(
x_block, device=None)
batch, x_length = x_block.shape
assert batch == 1, 'Batch processing is not supported now.'
y_block = self.xp.full(
(batch, 1), 2, dtype=x_block.dtype) # bos
eos_flags = self.xp.zeros(
(batch * beam, ), dtype=x_block.dtype)
sum_scores = self.xp.zeros(1, 'f')
result = [[2]] * batch * beam
for i in range(max_length):
log_prob_tail = self(x_block, y_block, y_block,
get_prediction=True)
ys_list, ws_list = get_topk(
log_prob_tail.data, beam, axis=1)
ys_concat = self.xp.concatenate(ys_list, axis=0)
sum_ws_list = [ws + sum_scores for ws in ws_list]
sum_ws_concat = self.xp.concatenate(sum_ws_list, axis=0)
# Get top-k from total candidates
idx_list, sum_w_list = get_topk(
sum_ws_concat, beam, axis=0)
idx_concat = self.xp.stack(idx_list, axis=0)
ys = ys_concat[idx_concat]
sum_scores = self.xp.stack(sum_w_list, axis=0)
if i != 0:
old_idx_list = (idx_concat % beam).tolist()
else:
old_idx_list = [0] * beam
result = [result[idx] + [y]
for idx, y in zip(old_idx_list, ys.tolist())]
y_block = self.xp.array(result).astype('i')
if x_block.shape[0] != y_block.shape[0]:
x_block = self.xp.broadcast_to(
x_block, (y_block.shape[0], x_block.shape[1]))
eos_flags += (ys == 0)
if self.xp.all(eos_flags):
break
outs = [[wi for wi in sent if wi not in [2, 0]] for sent in result]
outs = [sent if sent else [0] for sent in outs]
return outs
def get_topk(x, k=5, axis=1):
ids_list = []
scores_list = []
xp = cuda.get_array_module(x)
for i in range(k):
ids = xp.argmax(x, axis=axis).astype('i')
if axis == 0:
scores = x[ids]
x[ids] = - float('inf')
else:
scores = x[xp.arange(ids.shape[0]), ids]
x[xp.arange(ids.shape[0]), ids] = - float('inf')
ids_list.append(ids)
scores_list.append(scores)
return ids_list, scores_list
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:谷歌神经翻译模型/attention_is_all_you_need-master/preprocess.py | from __future__ import unicode_literals
import collections
import io
import re
import numpy
import progressbar
split_pattern = re.compile(r'([.,!?"\':;)(])')
digit_pattern = re.compile(r'\d')
def split_sentence(s):
s = s.lower()
s = s.replace('\u2019', "'")
s = digit_pattern.sub('0', s)
words = []
for word in s.strip().split():
words.extend(split_pattern.split(word))
words = [w for w in words if w]
return words
def open_file(path):
return io.open(path, encoding='utf-8', errors='ignore')
def count_lines(path):
with open_file(path) as f:
return sum([1 for _ in f])
def read_file(path):
n_lines = count_lines(path)
bar = progressbar.ProgressBar()
with open_file(path) as f:
for line in bar(f, max_value=n_lines):
words = split_sentence(line)
yield words
def count_words(path, max_vocab_size=40000):
counts = collections.Counter()
for words in read_file(path):
for word in words:
counts[word] += 1
vocab = [word for (word, _) in counts.most_common(max_vocab_size)]
return vocab
def make_dataset(path, vocab):
word_id = {word: index for index, word in enumerate(vocab)}
dataset = []
token_count = 0
unknown_count = 0
for words in read_file(path):
array = make_array(word_id, words)
dataset.append(array)
token_count += array.size
unknown_count += (array == 1).sum()
print('# of tokens: %d' % token_count)
print('# of unknown: %d (%.2f %%)'
% (unknown_count, 100. * unknown_count / token_count))
return dataset
def make_array(word_id, words):
ids = [word_id.get(word, 1) for word in words]
return numpy.array(ids, 'i')
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:谷歌神经翻译模型/attention_is_all_you_need-master/subfuncs.py | from __future__ import division
from chainer.training import extension
class VaswaniRule(extension.Extension):
"""Trainer extension to shift an optimizer attribute magically by Vaswani.
Args:
attr (str): Name of the attribute to shift.
rate (float): Rate of the exponential shift. This value is multiplied
to the attribute at each call.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
def __init__(self, attr, d, warmup_steps=4000,
init=None, target=None, optimizer=None,
scale=1.):
self._attr = attr
self._d_inv05 = d ** (-0.5) * scale
self._warmup_steps_inv15 = warmup_steps ** (-1.5)
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
# ensure that _init is set
if self._init is None:
# self._init = getattr(optimizer, self._attr)
self._init = self._d_inv05 * (1. * self._warmup_steps_inv15)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._init)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._d_inv05 * \
min(self._t ** (-0.5), self._t * self._warmup_steps_inv15)
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:谷歌神经翻译模型/attention_is_all_you_need-master/train.py | # encoding: utf-8
import argparse
import json
import os.path
from nltk.translate import bleu_score
import numpy
import six
import chainer
from chainer import cuda
from chainer.dataset import convert
from chainer import reporter
from chainer import training
from chainer.training import extensions
import preprocess
import net
from subfuncs import VaswaniRule
def seq2seq_pad_concat_convert(xy_batch, device, eos_id=0, bos_id=2):
"""
Args:
xy_batch (list of tuple of two numpy.ndarray-s or cupy.ndarray-s):
xy_batch[i][0] is an array
of token ids of i-th input sentence in a minibatch.
xy_batch[i][1] is an array
of token ids of i-th target sentence in a minibatch.
The shape of each array is `(sentence length, )`.
device (int or None): Device ID to which an array is sent. If it is
negative value, an array is sent to CPU. If it is positive, an
array is sent to GPU with the given ID. If it is ``None``, an
array is left in the original device.
Returns:
Tuple of Converted array.
(input_sent_batch_array, target_sent_batch_input_array,
target_sent_batch_output_array).
The shape of each array is `(batchsize, max_sentence_length)`.
All sentences are padded with -1 to reach max_sentence_length.
"""
x_seqs, y_seqs = zip(*xy_batch)
x_block = convert.concat_examples(x_seqs, device, padding=-1)
y_block = convert.concat_examples(y_seqs, device, padding=-1)
xp = cuda.get_array_module(x_block)
# The paper did not mention eos
# add eos
x_block = xp.pad(x_block, ((0, 0), (0, 1)),
'constant', constant_values=-1)
for i_batch, seq in enumerate(x_seqs):
x_block[i_batch, len(seq)] = eos_id
x_block = xp.pad(x_block, ((0, 0), (1, 0)),
'constant', constant_values=bos_id)
y_out_block = xp.pad(y_block, ((0, 0), (0, 1)),
'constant', constant_values=-1)
for i_batch, seq in enumerate(y_seqs):
y_out_block[i_batch, len(seq)] = eos_id
y_in_block = xp.pad(y_block, ((0, 0), (1, 0)),
'constant', constant_values=bos_id)
return (x_block, y_in_block, y_out_block)
def source_pad_concat_convert(x_seqs, device, eos_id=0, bos_id=2):
x_block = convert.concat_examples(x_seqs, device, padding=-1)
xp = cuda.get_array_module(x_block)
# add eos
x_block = xp.pad(x_block, ((0, 0), (0, 1)),
'constant', constant_values=-1)
for i_batch, seq in enumerate(x_seqs):
x_block[i_batch, len(seq)] = eos_id
x_block = xp.pad(x_block, ((0, 0), (1, 0)),
'constant', constant_values=bos_id)
return x_block
class CalculateBleu(chainer.training.Extension):
trigger = 1, 'epoch'
priority = chainer.training.PRIORITY_WRITER
def __init__(
self, model, test_data, key, batch=50, device=-1, max_length=50):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = device
self.max_length = max_length
def __call__(self, trainer):
print('## Calculate BLEU')
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(
sources, self.max_length, beam=False)]
# greedy generation for efficiency
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1) * 100
print('BLEU:', bleu)
reporter.report({self.key: bleu})
def main():
parser = argparse.ArgumentParser(
description='Chainer example: convolutional seq2seq')
parser.add_argument('--batchsize', '-b', type=int, default=48,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--unit', '-u', type=int, default=512,
help='Number of units')
parser.add_argument('--layer', '-l', type=int, default=6,
help='Number of layers')
parser.add_argument('--head', type=int, default=8,
help='Number of heads in attention mechanism')
parser.add_argument('--dropout', '-d', type=float, default=0.1,
help='Dropout rate')
parser.add_argument('--input', '-i', type=str, default='./',
help='Input directory')
parser.add_argument('--source', '-s', type=str,
default='europarl-v7.fr-en.en',
help='Filename of train data for source language')
parser.add_argument('--target', '-t', type=str,
default='europarl-v7.fr-en.fr',
help='Filename of train data for target language')
parser.add_argument('--source-valid', '-svalid', type=str,
default='dev/newstest2013.en',
help='Filename of validation data for source language')
parser.add_argument('--target-valid', '-tvalid', type=str,
default='dev/newstest2013.fr',
help='Filename of validation data for target language')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--source-vocab', type=int, default=40000,
help='Vocabulary size of source language')
parser.add_argument('--target-vocab', type=int, default=40000,
help='Vocabulary size of target language')
parser.add_argument('--no-bleu', '-no-bleu', action='store_true',
help='Skip BLEU calculation')
parser.add_argument('--use-label-smoothing', action='store_true',
help='Use label smoothing for cross entropy')
parser.add_argument('--embed-position', action='store_true',
help='Use position embedding rather than sinusoid')
parser.add_argument('--use-fixed-lr', action='store_true',
help='Use fixed learning rate rather than the ' +
'annealing proposed in the paper')
args = parser.parse_args()
print(json.dumps(args.__dict__, indent=4))
# Check file
en_path = os.path.join(args.input, args.source)
source_vocab = ['<eos>', '<unk>', '<bos>'] + \
preprocess.count_words(en_path, args.source_vocab)
source_data = preprocess.make_dataset(en_path, source_vocab)
fr_path = os.path.join(args.input, args.target)
target_vocab = ['<eos>', '<unk>', '<bos>'] + \
preprocess.count_words(fr_path, args.target_vocab)
target_data = preprocess.make_dataset(fr_path, target_vocab)
assert len(source_data) == len(target_data)
print('Original training data size: %d' % len(source_data))
train_data = [(s, t)
for s, t in six.moves.zip(source_data, target_data)
if 0 < len(s) < 50 and 0 < len(t) < 50]
print('Filtered training data size: %d' % len(train_data))
en_path = os.path.join(args.input, args.source_valid)
source_data = preprocess.make_dataset(en_path, source_vocab)
fr_path = os.path.join(args.input, args.target_valid)
target_data = preprocess.make_dataset(fr_path, target_vocab)
assert len(source_data) == len(target_data)
test_data = [(s, t) for s, t in six.moves.zip(source_data, target_data)
if 0 < len(s) and 0 < len(t)]
source_ids = {word: index for index, word in enumerate(source_vocab)}
target_ids = {word: index for index, word in enumerate(target_vocab)}
target_words = {i: w for w, i in target_ids.items()}
source_words = {i: w for w, i in source_ids.items()}
# Define Model
model = net.Transformer(
args.layer,
min(len(source_ids), len(source_words)),
min(len(target_ids), len(target_words)),
args.unit,
h=args.head,
dropout=args.dropout,
max_length=500,
use_label_smoothing=args.use_label_smoothing,
embed_position=args.embed_position)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu(args.gpu)
# Setup Optimizer
optimizer = chainer.optimizers.Adam(
alpha=5e-5,
beta1=0.9,
beta2=0.98,
eps=1e-9
)
optimizer.setup(model)
# Setup Trainer
train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test_data, args.batchsize,
repeat=False, shuffle=False)
iter_per_epoch = len(train_data) // args.batchsize
print('Number of iter/epoch =', iter_per_epoch)
updater = training.StandardUpdater(
train_iter, optimizer,
converter=seq2seq_pad_concat_convert, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# If you want to change a logging interval, change this number
log_trigger = (min(200, iter_per_epoch // 2), 'iteration')
def floor_step(trigger):
floored = trigger[0] - trigger[0] % log_trigger[0]
if floored <= 0:
floored = trigger[0]
return (floored, trigger[1])
# Validation every half epoch
eval_trigger = floor_step((iter_per_epoch // 2, 'iteration'))
record_trigger = training.triggers.MinValueTrigger(
'val/main/perp', eval_trigger)
evaluator = extensions.Evaluator(
test_iter, model,
converter=seq2seq_pad_concat_convert, device=args.gpu)
evaluator.default_name = 'val'
trainer.extend(evaluator, trigger=eval_trigger)
# Use Vaswan's magical rule of learning rate(Eq. 3 in the paper)
# But, the hyperparamter in the paper seems to work well
# only with a large batchsize.
# If you run on popular setup (e.g. size=48 on 1 GPU),
# you may have to change the hyperparamter.
# I scaled learning rate by 0.5 consistently.
# ("scale" is always multiplied to learning rate.)
# If you use a shallow layer network (<=2),
# you may not have to change it from the paper setting.
if not args.use_fixed_lr:
trainer.extend(
# VaswaniRule('alpha', d=args.unit, warmup_steps=4000, scale=1.),
# VaswaniRule('alpha', d=args.unit, warmup_steps=32000, scale=1.),
# VaswaniRule('alpha', d=args.unit, warmup_steps=4000, scale=0.5),
# VaswaniRule('alpha', d=args.unit, warmup_steps=16000, scale=1.),
VaswaniRule('alpha', d=args.unit, warmup_steps=64000, scale=1.),
trigger=(1, 'iteration'))
observe_alpha = extensions.observe_value(
'alpha',
lambda trainer: trainer.updater.get_optimizer('main').alpha)
trainer.extend(
observe_alpha,
trigger=(1, 'iteration'))
# Only if a model gets best validation score,
# save (overwrite) the model
trainer.extend(extensions.snapshot_object(
model, 'best_model.npz'),
trigger=record_trigger)
def translate_one(source, target):
words = preprocess.split_sentence(source)
print('# source : ' + ' '.join(words))
x = model.xp.array(
[source_ids.get(w, 1) for w in words], 'i')
ys = model.translate([x], beam=5)[0]
words = [target_words[y] for y in ys]
print('# result : ' + ' '.join(words))
print('# expect : ' + target)
@chainer.training.make_extension(trigger=(200, 'iteration'))
def translate(trainer):
translate_one(
'Who are we ?',
'Qui sommes-nous?')
translate_one(
'And it often costs over a hundred dollars ' +
'to obtain the required identity card .',
'Or, il en coûte souvent plus de cent dollars ' +
'pour obtenir la carte d\'identité requise.')
source, target = test_data[numpy.random.choice(len(test_data))]
source = ' '.join([source_words[i] for i in source])
target = ' '.join([target_words[i] for i in target])
translate_one(source, target)
# Gereneration Test
trainer.extend(
translate,
trigger=(min(200, iter_per_epoch), 'iteration'))
# Calculate BLEU every half epoch
if not args.no_bleu:
trainer.extend(
CalculateBleu(
model, test_data, 'val/main/bleu',
device=args.gpu, batch=args.batchsize // 4),
trigger=floor_step((iter_per_epoch // 2, 'iteration')))
# Log
trainer.extend(extensions.LogReport(trigger=log_trigger),
trigger=log_trigger)
trainer.extend(extensions.PrintReport(
['epoch', 'iteration',
'main/loss', 'val/main/loss',
'main/perp', 'val/main/perp',
'main/acc', 'val/main/acc',
'val/main/bleu',
'alpha',
'elapsed_time']),
trigger=log_trigger)
print('start training')
trainer.run()
if __name__ == '__main__':
main()
| [] | [] | [] |