content
stringlengths 0
1.55M
|
---|
##########################################################################
#
# Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
<import_from_future_stmt> with_statement<import_stmt>os<import_stmt>sys<import_stmt>shutil<import_stmt>unittest<import_stmt>IECore<class_stmt>TestBasicPreset(unittest.TestCase)<block_start><def_stmt>testCopy self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.FloatParameter("b" "" 1.0) ])<line_sep>testObj2=IECore.Parameterised("testParameterised2")<line_sep>testObj2.parameters().addParameters([IECore.BoolParameter("a" "" <false>) IECore.FloatParameter("c" "" 0.0) ])<line_sep>p=IECore.BasicPreset(testObj testObj.parameters())<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertFalse(p.applicableTo(testObj2 testObj2.parameters()))<line_sep>testObj.parameters()["a"].setTypedValue(<false>)<line_sep>testObj.parameters()["b"].setTypedValue(0.0)<line_sep>p(testObj testObj.parameters())<line_sep>self.assertEqual(testObj.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj.parameters()["b"].getTypedValue() 1.0)<line_sep>p2=IECore.BasicPreset(testObj testObj.parameters() parameters=(testObj.parameters()["a"] ))<line_sep>self.assertTrue(p2.applicableTo(testObj testObj.parameters()))<line_sep>self.assertTrue(p2.applicableTo(testObj2 testObj.parameters()))<line_sep>p2(testObj2 testObj2.parameters())<line_sep>self.assertEqual(testObj2.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj2.parameters()["c"].getTypedValue() 0.0)<block_end><def_stmt>testLoad self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.FloatParameter("b" "" 1.0) ])<line_sep>testObj2=IECore.Parameterised("testParameterised1")<line_sep>testObj2.parameters().addParameters([IECore.BoolParameter("a" "" <false>) IECore.FloatParameter("c" "" 0.0) ])<line_sep>savePath=os.path.abspath(os.path.join(os.path.dirname(__file__) "data" "basicPreset"))<line_sep>messageHandler=IECore.CapturingMessageHandler()<with_stmt>messageHandler<block_start>p=IECore.BasicPreset(os.path.join(savePath "basicPresetLoadTest" "basicPresetLoadTest-1.cob"))<block_end>self.assertEqual(len(messageHandler.messages) 0)<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertFalse(p.applicableTo(testObj2 testObj2.parameters()))<line_sep>testObj.parameters()["a"].setTypedValue(<false>)<line_sep>testObj.parameters()["b"].setTypedValue(0.0)<line_sep>p(testObj testObj.parameters())<line_sep>self.assertEqual(testObj.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj.parameters()["b"].getTypedValue() 1.0)<block_end><def_stmt>testSave self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.FloatParameter("b" "" 1.0) ])<line_sep>testObj2=IECore.Parameterised("testParameterised1")<line_sep>testObj2.parameters().addParameters([IECore.BoolParameter("a" "" <false>) IECore.FloatParameter("c" "" 0.0) ])<line_sep>savePath=os.path.abspath(os.path.join(os.path.dirname(__file__) "data" "basicPreset"))<line_sep>preset=IECore.BasicPreset(testObj testObj.parameters())<line_sep># Save for the classLoader and check its there, we test the 'loadability' later...
preset.save(savePath "basicPresetTest")<line_sep>self.assertTrue(os.path.isfile(os.path.join(savePath "basicPresetTest" "basicPresetTest-1.cob")))<line_sep>self.assertTrue(os.path.isfile(os.path.join(savePath "basicPresetTest" "basicPresetTest-1.py")))<line_sep># save without the classLoader and check its there
preset.save(savePath "basicPresetTest" classLoadable=<false>)<line_sep>self.assertTrue(os.path.isfile(os.path.join(savePath "basicPresetTest.cob")))<line_sep># reload
p=IECore.BasicPreset(os.path.join(savePath "basicPresetTest.cob"))<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertFalse(p.applicableTo(testObj2 testObj2.parameters()))<line_sep>testObj.parameters()["a"].setTypedValue(<false>)<line_sep>testObj.parameters()["b"].setTypedValue(0.0)<line_sep>p(testObj testObj.parameters())<line_sep>self.assertEqual(testObj.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj.parameters()["b"].getTypedValue() 1.0)<line_sep>preset2=IECore.BasicPreset(testObj testObj.parameters() parameters=(testObj.parameters()["a"] ))<line_sep>preset2.save(savePath "basicPresetTest2" classLoadable=<false>)<line_sep>#reload
p2=IECore.BasicPreset(os.path.join(savePath "basicPresetTest2.cob"))<line_sep>self.assertTrue(p2.applicableTo(testObj testObj.parameters()))<line_sep>self.assertTrue(p2.applicableTo(testObj2 testObj.parameters()))<line_sep>p2(testObj2 testObj2.parameters())<line_sep>self.assertEqual(testObj2.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj2.parameters()["c"].getTypedValue() 0.0)<block_end><def_stmt>testClassLoader self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.FloatParameter("b" "" 1.0) ])<line_sep>savePath=os.path.abspath(os.path.join(os.path.dirname(__file__) "data" "basicPreset"))<line_sep>preset=IECore.BasicPreset(testObj testObj.parameters())<line_sep>preset.save(savePath "basicPresetTestClassLoader")<line_sep># make sure that no messages are emitted during loading
messageHandler=IECore.CapturingMessageHandler()<with_stmt>messageHandler<block_start>loader=IECore.ClassLoader(IECore.SearchPath(savePath))<line_sep>p=loader.load("basicPresetTestClassLoader")()<block_end>self.assertEqual(len(messageHandler.messages) 0)<line_sep>self.assertTrue(isinstance(p IECore.BasicPreset))<line_sep>p.metadata()<block_end><def_stmt>testClasses self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.ClassParameter("b" "" "IECORE_OP_PATHS" os.path.join("maths" "multiply") 2) ])<line_sep>testObj2=IECore.Parameterised("testParameterised2")<line_sep>testObj2.parameters().addParameters([IECore.ClassParameter("c" "" "IECORE_OP_PATHS") ])<line_sep>classes1=testObj.parameters()["b"].getClass(<true>)<line_sep>classes2=testObj2.parameters()["c"].getClass(<true>)<line_sep>self.assertNotEqual(classes1[1:] classes2[1:])<line_sep>p=IECore.BasicPreset(testObj testObj.parameters()["b"])<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()["b"]))<line_sep>self.assertFalse(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertTrue(p.applicableTo(testObj2 testObj2.parameters()["c"]))<line_sep>p(testObj2 testObj2.parameters()["c"])<line_sep>classes1=testObj.parameters()["b"].getClass(<true>)<line_sep>classes2=testObj2.parameters()["c"].getClass(<true>)<line_sep>self.assertEqual(classes1[1:] classes2[1:])<block_end><def_stmt>testClassVectors self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.ClassVectorParameter("b" "" "IECORE_OP_PATHS") ])<line_sep>testObj.parameters()["b"].setClasses([("mult" os.path.join("maths" "multiply") 2) ("coIO" "compoundObjectInOut" 1) ])<line_sep>testObj2=IECore.Parameterised("testParameterised2")<line_sep>testObj2.parameters().addParameters([IECore.ClassVectorParameter("c" "" "IECORE_OP_PATHS") ])<line_sep>classes1=[c[1:]<for>c testObj.parameters()["b"].getClasses(<true>)]<line_sep>classes2=[c[1:]<for>c testObj2.parameters()["c"].getClasses(<true>)]<line_sep>self.assertNotEqual(classes1 classes2)<line_sep>p=IECore.BasicPreset(testObj testObj.parameters()["b"])<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()["b"]))<line_sep>self.assertFalse(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertTrue(p.applicableTo(testObj2 testObj2.parameters()["c"]))<line_sep>p(testObj2 testObj2.parameters()["c"])<line_sep>classes1=[c[1:]<for>c testObj.parameters()["b"].getClasses(<true>)]<line_sep>classes2=[c[1:]<for>c testObj2.parameters()["c"].getClasses(<true>)]<line_sep>self.assertEqual(classes1 classes2)<block_end><def_stmt>testCompoundVectorParameter self<block_start>p=IECore.Parameterised("test")<line_sep>p.parameters().addParameters([IECore.BoolParameter("a" "" <false>) IECore.CompoundVectorParameter("c" "" members=[IECore.StringVectorParameter("s" "" IECore.StringVectorData()) IECore.BoolVectorParameter("b" "" IECore.BoolVectorData()) ])])<line_sep>p["c"]["s"].setValue(IECore.StringVectorData(["1" "2" "3"]))<line_sep>p["c"]["b"].setValue(IECore.BoolVectorData([<true> <false> <true>]))<line_sep>v=p.parameters().getValue().copy()<line_sep>preset=IECore.BasicPreset(p p.parameters())<line_sep>self.assertTrue(preset.applicableTo(p p.parameters()))<line_sep>p.parameters().setValue(p.parameters().defaultValue)<line_sep>self.assertNotEqual(p.parameters().getValue() v)<line_sep>preset(p p.parameters())<line_sep>self.assertEqual(p.parameters().getValue() v)<block_end><def_stmt>tearDown self<block_start>savePath=os.path.abspath(os.path.join(os.path.dirname(__file__) "data" "basicPreset"))<line_sep>paths=(os.path.join(savePath "basicPresetTest") os.path.join(savePath "basicPresetTest.cob") os.path.join(savePath "basicPresetTest2.cob") os.path.join(savePath "basicPresetTestClassLoader") )<for_stmt>p paths<block_start><if_stmt>os.path.isdir(p)<block_start>shutil.rmtree(p)<block_end><elif_stmt>os.path.isfile(p)<block_start>os.remove(p)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__="biotite"<line_sep>__author__="<NAME>"<line_sep>__all__=["Copyable"]<import_stmt>abc<class_stmt>Copyable(metaclass=abc.ABCMeta)<block_start>"""
Base class for all objects, that should be copyable.
The public method `copy()` first creates a fresh instance of the
class of the instance, that is copied via the `__copy_create__()`
method. All variables, that could not be set via the constructor,
are then copied via `__copy_fill__()`, starting with the method in
the uppermost base class and ending with the class of the instance
to be copied.
This approach solves the problem of encapsulated variables in
superclasses.
"""<def_stmt>copy self<block_start>"""
Create a deep copy of this object.
Returns
-------
copy
A copy of this object.
"""<line_sep>clone=self.__copy_create__()<line_sep>self.__copy_fill__(clone)<line_sep><return>clone<block_end><def_stmt>__copy_create__ self<block_start>"""
Instantiate a new object of this class.
Only the constructor should be called in this method.
All further attributes, that need to be copied are handled
in `__copy_fill__()`
Do not call the `super()` method here.
This method must be overridden, if the constructor takes
parameters.
Returns
-------
copy
A freshly instantiated copy of *self*.
"""<line_sep><return>type(self)()<block_end><def_stmt>__copy_fill__ self clone<block_start>"""
Copy all necessary attributes to the new object.
Always call the `super()` method as first statement.
Parameters
----------
clone
The freshly instantiated copy of *self*.
"""<line_sep><pass><block_end><block_end> |
<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>copy<import_from_stmt>numpy.testing assert_allclose<import_from_stmt>keras.utils CustomObjectScope<import_from_stmt>keras.layers wrappers Input Layer<import_from_stmt>keras.layers RNN<import_from_stmt>keras layers<import_from_stmt>keras.models Sequential Model model_from_json<import_from_stmt>keras backend<as>K<import_from_stmt>keras.utils.generic_utils object_list_uid to_list<line_sep>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support TimeDistributed and RNN yet')<def_stmt>test_TimeDistributed # first, test with Dense layer
<block_start>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(2) input_shape=(3 4)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.fit(np.random.random((10 3 4)) np.random.random((10 3 2)) epochs=1 batch_size=10)<line_sep># test config
model.get_config()<line_sep># test when specifying a batch_input_shape
test_input=np.random.random((1 3 4))<line_sep>test_output=model.predict(test_input)<line_sep>weights=model.layers[0].get_weights()<line_sep>reference=Sequential()<line_sep>reference.add(wrappers.TimeDistributed(layers.Dense(2) batch_input_shape=(1 3 4)))<line_sep>reference.add(layers.Activation('relu'))<line_sep>reference.compile(optimizer='rmsprop' loss='mse')<line_sep>reference.layers[0].set_weights(weights)<line_sep>reference_output=reference.predict(test_input)<line_sep>assert_allclose(test_output reference_output atol=1e-05)<line_sep># test with Embedding
model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Embedding(5 6) batch_input_shape=(10 3 4) dtype='int32'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.fit(np.random.randint(5 size=(10 3 4) dtype='int32') np.random.random((10 3 4 6)) epochs=1 batch_size=10)<line_sep># compare to not using batch_input_shape
test_input=np.random.randint(5 size=(10 3 4) dtype='int32')<line_sep>test_output=model.predict(test_input)<line_sep>weights=model.layers[0].get_weights()<line_sep>reference=Sequential()<line_sep>reference.add(wrappers.TimeDistributed(layers.Embedding(5 6) input_shape=(3 4) dtype='int32'))<line_sep>reference.compile(optimizer='rmsprop' loss='mse')<line_sep>reference.layers[0].set_weights(weights)<line_sep>reference_output=reference.predict(test_input)<line_sep>assert_allclose(test_output reference_output atol=1e-05)<line_sep># test with Conv2D
model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Conv2D(5 (2 2) padding='same') input_shape=(2 4 4 3)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.train_on_batch(np.random.random((1 2 4 4 3)) np.random.random((1 2 4 4 5)))<line_sep>model=model_from_json(model.to_json())<line_sep>model.summary()<line_sep># test stacked layers
model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(2) input_shape=(3 4)))<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(3)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.fit(np.random.random((10 3 4)) np.random.random((10 3 3)) epochs=1 batch_size=10)<line_sep># test wrapping Sequential model
model=Sequential()<line_sep>model.add(layers.Dense(3 input_dim=2))<line_sep>outer_model=Sequential()<line_sep>outer_model.add(wrappers.TimeDistributed(model input_shape=(3 2)))<line_sep>outer_model.compile(optimizer='rmsprop' loss='mse')<line_sep>outer_model.fit(np.random.random((10 3 2)) np.random.random((10 3 3)) epochs=1 batch_size=10)<line_sep># test with functional API
x=Input(shape=(3 2))<line_sep>y=wrappers.TimeDistributed(model)(x)<line_sep>outer_model=Model(x y)<line_sep>outer_model.compile(optimizer='rmsprop' loss='mse')<line_sep>outer_model.fit(np.random.random((10 3 2)) np.random.random((10 3 3)) epochs=1 batch_size=10)<line_sep># test with BatchNormalization
model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.BatchNormalization(center=<true> scale=<true>) name='bn' input_shape=(10 2)))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep># Assert that mean and variance are 0 and 1.
td=model.layers[0]<assert_stmt>np.array_equal(td.get_weights()[2] np.array([0 0]))<assert_stmt>np.array_equal(td.get_weights()[3] np.array([1 1]))<line_sep># Train
model.train_on_batch(np.random.normal(loc=2 scale=2 size=(1 10 2)) np.broadcast_to(np.array([0 1]) (1 10 2)))<line_sep># Assert that mean and variance changed.
<assert_stmt><not>np.array_equal(td.get_weights()[2] np.array([0 0]))<assert_stmt><not>np.array_equal(td.get_weights()[3] np.array([1 1]))<line_sep># Verify input_map has one mapping from inputs to reshaped inputs.
uid=object_list_uid(model.inputs)<assert_stmt>len(td._input_map.keys())<eq>1<assert_stmt>uid<in>td._input_map<assert_stmt>K.int_shape(td._input_map[uid])<eq>(<none> 2)<block_end>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support TimeDistributed and RNN yet')@pytest.mark.skipif((K.backend()<eq>'cntk') reason='Flaky with CNTK backend')<def_stmt>test_TimeDistributed_learning_phase # test layers that need learning_phase to be set
<block_start>np.random.seed(1234)<line_sep>x=Input(shape=(3 2))<line_sep>y=wrappers.TimeDistributed(layers.Dropout(.999))(x training=<true>)<line_sep>model=Model(x y)<line_sep>y=model.predict(np.random.random((10 3 2)))<line_sep>assert_allclose(np.mean(y) 0. atol=1e-1 rtol=1e-1)<block_end>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support TimeDistributed and RNN yet')<def_stmt>test_TimeDistributed_trainable # test layers that need learning_phase to be set
<block_start>x=Input(shape=(3 2))<line_sep>layer=wrappers.TimeDistributed(layers.BatchNormalization())<line_sep>_=layer(x)<assert_stmt>len(layer.updates)<eq>2<assert_stmt>len(layer.trainable_weights)<eq>2<line_sep>layer.trainable=<false><assert_stmt>len(layer.updates)<eq>0<assert_stmt>len(layer.trainable_weights)<eq>0<line_sep>layer.trainable=<true><assert_stmt>len(layer.updates)<eq>2<assert_stmt>len(layer.trainable_weights)<eq>2<block_end>@pytest.mark.skipif((K.backend()<eq>'cntk'<or>K.backend()<eq>'mxnet') reason='Unknown timestamps for RNN not supported in CNTK and MXNet.')<def_stmt>test_TimeDistributed_with_masked_embedding_and_unspecified_shape # test with unspecified shape and Embeddings with mask_zero
<block_start>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Embedding(5 6 mask_zero=<true>) input_shape=(<none> <none>)))<line_sep># the shape so far: (N, t_1, t_2, 6)
model.add(wrappers.TimeDistributed(layers.SimpleRNN(7 return_sequences=<true>)))<line_sep>model.add(wrappers.TimeDistributed(layers.SimpleRNN(8 return_sequences=<false>)))<line_sep>model.add(layers.SimpleRNN(1 return_sequences=<false>))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model_input=np.random.randint(low=1 high=5 size=(10 3 4) dtype='int32')<for_stmt>i range(4)<block_start>model_input[i i: i:]=0<block_end>model.fit(model_input np.random.random((10 1)) epochs=1 batch_size=10)<line_sep>mask_outputs=[model.layers[0].compute_mask(model.input)]<for_stmt>layer model.layers[1:]<block_start>mask_outputs.append(layer.compute_mask(layer.input mask_outputs[-1]))<block_end>func=K.function([model.input] mask_outputs[:-1])<line_sep>mask_outputs_val=func([model_input])<line_sep>ref_mask_val_0=model_input<g>0# embedding layer
ref_mask_val_1=ref_mask_val_0# first RNN layer
ref_mask_val_2=np.any(ref_mask_val_1 axis=-1)# second RNN layer
ref_mask_val=[ref_mask_val_0 ref_mask_val_1 ref_mask_val_2]<for_stmt>i range(3)<block_start><assert_stmt>np.array_equal(mask_outputs_val[i] ref_mask_val[i])<block_end><assert_stmt>mask_outputs[-1]<is><none><block_end># final layer
@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support TimeDistributed and RNN yet')<def_stmt>test_TimeDistributed_with_masking_layer # test with Masking layer
<block_start>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Masking(mask_value=0. ) input_shape=(<none> 4)))<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(5)))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model_input=np.random.randint(low=1 high=5 size=(10 3 4))<for_stmt>i range(4)<block_start>model_input[i i: :]=0.<block_end>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.fit(model_input np.random.random((10 3 5)) epochs=1 batch_size=6)<line_sep>mask_outputs=[model.layers[0].compute_mask(model.input)]<line_sep>mask_outputs<augadd>[model.layers[1].compute_mask(model.layers[1].input mask_outputs[-1])]<line_sep>func=K.function([model.input] mask_outputs)<line_sep>mask_outputs_val=func([model_input])<assert_stmt>np.array_equal(mask_outputs_val[0] np.any(model_input axis=-1))<assert_stmt>np.array_equal(mask_outputs_val[1] np.any(model_input axis=-1))<block_end><def_stmt>test_regularizers <block_start>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(2 kernel_regularizer='l1') input_shape=(3 4)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<assert_stmt>len(model.layers[0].layer.losses)<eq>1<assert_stmt>len(model.layers[0].losses)<eq>1<assert_stmt>len(model.layers[0].get_losses_for(<none>))<eq>1<assert_stmt>len(model.losses)<eq>1<line_sep>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(2 activity_regularizer='l1') input_shape=(3 4)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<assert_stmt>len(model.losses)<eq>1<block_end><def_stmt>test_Bidirectional <block_start>rnn=layers.SimpleRNN<line_sep>samples=2<line_sep>dim=2<line_sep>timesteps=2<line_sep>output_dim=2<line_sep>dropout_rate=0.2<for_stmt>mode ['sum' 'concat']<block_start>x=np.random.random((samples timesteps dim))<line_sep>target_dim=2<times>output_dim<if>mode<eq>'concat'<else>output_dim<line_sep>y=np.random.random((samples target_dim))<line_sep># test with Sequential model
model=Sequential()<line_sep>model.add(wrappers.Bidirectional(rnn(output_dim dropout=dropout_rate recurrent_dropout=dropout_rate) merge_mode=mode input_shape=(timesteps dim)))<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<line_sep># test config
model.get_config()<line_sep>model=model_from_json(model.to_json())<line_sep>model.summary()<line_sep># test stacked bidirectional layers
model=Sequential()<line_sep>model.add(wrappers.Bidirectional(rnn(output_dim return_sequences=<true>) merge_mode=mode input_shape=(timesteps dim)))<line_sep>model.add(wrappers.Bidirectional(rnn(output_dim) merge_mode=mode))<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<line_sep># test with functional API
inputs=Input((timesteps dim))<line_sep>outputs=wrappers.Bidirectional(rnn(output_dim dropout=dropout_rate recurrent_dropout=dropout_rate) merge_mode=mode)(inputs)<line_sep>model=Model(inputs outputs)<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<line_sep># Bidirectional and stateful
inputs=Input(batch_shape=(1 timesteps dim))<line_sep>outputs=wrappers.Bidirectional(rnn(output_dim stateful=<true>) merge_mode=mode)(inputs)<line_sep>model=Model(inputs outputs)<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<block_end><block_end>@pytest.mark.skipif((K.backend()<eq>'cntk') reason='Unknown timestamps not supported in CNTK.')<def_stmt>test_Bidirectional_dynamic_timesteps # test with functional API with dynamic length
<block_start>rnn=layers.SimpleRNN<line_sep>samples=2<line_sep>dim=2<line_sep>timesteps=2<line_sep>output_dim=2<line_sep>dropout_rate=0.2<for_stmt>mode ['sum' 'concat']<block_start>x=np.random.random((samples timesteps dim))<line_sep>target_dim=2<times>output_dim<if>mode<eq>'concat'<else>output_dim<line_sep>y=np.random.random((samples target_dim))<line_sep>inputs=Input((<none> dim))<line_sep>outputs=wrappers.Bidirectional(rnn(output_dim dropout=dropout_rate recurrent_dropout=dropout_rate) merge_mode=mode)(inputs)<line_sep>model=Model(inputs outputs)<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<block_end><block_end>@pytest.mark.parametrize('merge_mode' ['sum' 'mul' 'ave' 'concat' <none>])<def_stmt>test_Bidirectional_merged_value merge_mode<block_start>rnn=layers.LSTM<line_sep>samples=2<line_sep>dim=5<line_sep>timesteps=3<line_sep>units=3<line_sep>X=[np.random.rand(samples timesteps dim)]<if_stmt>merge_mode<eq>'sum'<block_start>merge_func=<lambda>y y_rev:y+y_rev<block_end><elif_stmt>merge_mode<eq>'mul'<block_start>merge_func=<lambda>y y_rev:y<times>y_rev<block_end><elif_stmt>merge_mode<eq>'ave'<block_start>merge_func=<lambda>y y_rev:(y+y_rev)/2<block_end><elif_stmt>merge_mode<eq>'concat'<block_start>merge_func=<lambda>y y_rev:np.concatenate((y y_rev) axis=-1)<block_end><else_stmt><block_start>merge_func=<lambda>y y_rev:[y y_rev]<block_end># basic case
inputs=Input((timesteps dim))<line_sep>layer=wrappers.Bidirectional(rnn(units return_sequences=<true>) merge_mode=merge_mode)<line_sep>f_merged=K.function([inputs] to_list(layer(inputs)))<line_sep>f_forward=K.function([inputs] [layer.forward_layer.call(inputs)])<line_sep>f_backward=K.function([inputs] [K.reverse(layer.backward_layer.call(inputs) 1)])<line_sep>y_merged=f_merged(X)<line_sep>y_expected=to_list(merge_func(f_forward(X)[0] f_backward(X)[0]))<assert_stmt>len(y_merged)<eq>len(y_expected)<for_stmt>x1,x2 zip(y_merged y_expected)<block_start>assert_allclose(x1 x2 atol=1e-5)<block_end># test return_state
inputs=Input((timesteps dim))<line_sep>layer=wrappers.Bidirectional(rnn(units return_state=<true>) merge_mode=merge_mode)<line_sep>f_merged=K.function([inputs] layer(inputs))<line_sep>f_forward=K.function([inputs] layer.forward_layer.call(inputs))<line_sep>f_backward=K.function([inputs] layer.backward_layer.call(inputs))<line_sep>n_states=len(layer.layer.states)<line_sep>y_merged=f_merged(X)<line_sep>y_forward=f_forward(X)<line_sep>y_backward=f_backward(X)<line_sep>y_expected=to_list(merge_func(y_forward[0] y_backward[0]))<assert_stmt>len(y_merged)<eq>len(y_expected)+n_states<times>2<for_stmt>x1,x2 zip(y_merged y_expected)<block_start>assert_allclose(x1 x2 atol=1e-5)<block_end># test if the state of a BiRNN is the concatenation of the underlying RNNs
y_merged=y_merged[-n_states<times>2:]<line_sep>y_forward=y_forward[-n_states:]<line_sep>y_backward=y_backward[-n_states:]<for_stmt>state_birnn,state_inner zip(y_merged y_forward+y_backward)<block_start>assert_allclose(state_birnn state_inner atol=1e-5)<block_end><block_end>@pytest.mark.skipif(K.backend()<eq>'theano'<or>K.backend()<eq>'mxnet' reason='Not supported.')@pytest.mark.parametrize('merge_mode' ['sum' 'concat' <none>])<def_stmt>test_Bidirectional_dropout merge_mode<block_start>rnn=layers.LSTM<line_sep>samples=2<line_sep>dim=5<line_sep>timesteps=3<line_sep>units=3<line_sep>X=[np.random.rand(samples timesteps dim)]<line_sep>inputs=Input((timesteps dim))<line_sep>wrapped=wrappers.Bidirectional(rnn(units dropout=0.2 recurrent_dropout=0.2) merge_mode=merge_mode)<line_sep>outputs=to_list(wrapped(inputs training=<true>))<assert_stmt>all(<not>getattr(x '_uses_learning_phase')<for>x outputs)<line_sep>inputs=Input((timesteps dim))<line_sep>wrapped=wrappers.Bidirectional(rnn(units dropout=0.2 return_state=<true>) merge_mode=merge_mode)<line_sep>outputs=to_list(wrapped(inputs))<assert_stmt>all(x._uses_learning_phase<for>x outputs)<line_sep>model=Model(inputs outputs)<assert_stmt>model.uses_learning_phase<line_sep>y1=to_list(model.predict(X))<line_sep>y2=to_list(model.predict(X))<for_stmt>x1,x2 zip(y1 y2)<block_start>assert_allclose(x1 x2 atol=1e-5)<block_end><block_end><def_stmt>test_Bidirectional_state_reuse <block_start>rnn=layers.LSTM<line_sep>samples=2<line_sep>dim=5<line_sep>timesteps=3<line_sep>units=3<line_sep>input1=Input((timesteps dim))<line_sep>layer=wrappers.Bidirectional(rnn(units return_state=<true> return_sequences=<true>))<line_sep>state=layer(input1)[1:]<line_sep># test passing invalid initial_state: passing a tensor
input2=Input((timesteps dim))<with_stmt>pytest.raises(ValueError)<block_start>output=wrappers.Bidirectional(rnn(units))(input2 initial_state=state[0])<block_end># test valid usage: passing a list
output=wrappers.Bidirectional(rnn(units))(input2 initial_state=state)<line_sep>model=Model([input1 input2] output)<assert_stmt>len(model.layers)<eq>4<assert_stmt>isinstance(model.layers[-1].input list)<line_sep>inputs=[np.random.rand(samples timesteps dim) np.random.rand(samples timesteps dim)]<line_sep>outputs=model.predict(inputs)<block_end>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support custom RNN cell yet')<def_stmt>test_Bidirectional_with_constants <block_start><class_stmt>RNNCellWithConstants(Layer)<block_start><def_stmt>__init__ self units **kwargs<block_start>self.units=units<line_sep>self.state_size=units<line_sep>super(RNNCellWithConstants self).__init__(**kwargs)<block_end><def_stmt>build self input_shape<block_start><if_stmt><not>isinstance(input_shape list)<block_start><raise>TypeError('expects constants shape')<block_end>[input_shape constant_shape]=input_shape<line_sep># will (and should) raise if more than one constant passed
self.input_kernel=self.add_weight(shape=(input_shape[-1] self.units) initializer='uniform' name='kernel')<line_sep>self.recurrent_kernel=self.add_weight(shape=(self.units self.units) initializer='uniform' name='recurrent_kernel')<line_sep>self.constant_kernel=self.add_weight(shape=(constant_shape[-1] self.units) initializer='uniform' name='constant_kernel')<line_sep>self.built=<true><block_end><def_stmt>call self inputs states constants<block_start>[prev_output]=states<line_sep>[constant]=constants<line_sep>h_input=K.dot(inputs self.input_kernel)<line_sep>h_state=K.dot(prev_output self.recurrent_kernel)<line_sep>h_const=K.dot(constant self.constant_kernel)<line_sep>output=h_input+h_state+h_const<line_sep><return>output [output]<block_end><def_stmt>get_config self<block_start>config={'units':self.units}<line_sep>base_config=super(RNNCellWithConstants self).get_config()<line_sep><return>dict(list(base_config.items())+list(config.items()))<block_end><block_end># Test basic case.
x=Input((5 5))<line_sep>c=Input((3 ))<line_sep>cell=RNNCellWithConstants(32)<line_sep>custom_objects={'RNNCellWithConstants':RNNCellWithConstants}<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional(RNN(cell))<block_end>y=layer(x constants=c)<line_sep>model=Model([x c] y)<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.train_on_batch([np.zeros((6 5 5)) np.zeros((6 3))] np.zeros((6 64)))<line_sep># Test basic case serialization.
x_np=np.random.random((6 5 5))<line_sep>c_np=np.random.random((6 3))<line_sep>y_np=model.predict([x_np c_np])<line_sep>weights=model.get_weights()<line_sep>config=layer.get_config()<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional.from_config(copy.deepcopy(config))<block_end>y=layer(x constants=c)<line_sep>model=Model([x c] y)<line_sep>model.set_weights(weights)<line_sep>y_np_2=model.predict([x_np c_np])<line_sep>assert_allclose(y_np y_np_2 atol=1e-4)<line_sep># test flat list inputs
<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional.from_config(copy.deepcopy(config))<block_end>y=layer([x c])<line_sep>model=Model([x c] y)<line_sep>model.set_weights(weights)<line_sep>y_np_3=model.predict([x_np c_np])<line_sep>assert_allclose(y_np y_np_3 atol=1e-4)<block_end>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support custom RNN cell yet')<def_stmt>test_Bidirectional_with_constants_layer_passing_initial_state <block_start><class_stmt>RNNCellWithConstants(Layer)<block_start><def_stmt>__init__ self units **kwargs<block_start>self.units=units<line_sep>self.state_size=units<line_sep>super(RNNCellWithConstants self).__init__(**kwargs)<block_end><def_stmt>build self input_shape<block_start><if_stmt><not>isinstance(input_shape list)<block_start><raise>TypeError('expects constants shape')<block_end>[input_shape constant_shape]=input_shape<line_sep># will (and should) raise if more than one constant passed
self.input_kernel=self.add_weight(shape=(input_shape[-1] self.units) initializer='uniform' name='kernel')<line_sep>self.recurrent_kernel=self.add_weight(shape=(self.units self.units) initializer='uniform' name='recurrent_kernel')<line_sep>self.constant_kernel=self.add_weight(shape=(constant_shape[-1] self.units) initializer='uniform' name='constant_kernel')<line_sep>self.built=<true><block_end><def_stmt>call self inputs states constants<block_start>[prev_output]=states<line_sep>[constant]=constants<line_sep>h_input=K.dot(inputs self.input_kernel)<line_sep>h_state=K.dot(prev_output self.recurrent_kernel)<line_sep>h_const=K.dot(constant self.constant_kernel)<line_sep>output=h_input+h_state+h_const<line_sep><return>output [output]<block_end><def_stmt>get_config self<block_start>config={'units':self.units}<line_sep>base_config=super(RNNCellWithConstants self).get_config()<line_sep><return>dict(list(base_config.items())+list(config.items()))<block_end><block_end># Test basic case.
x=Input((5 5))<line_sep>c=Input((3 ))<line_sep>s_for=Input((32 ))<line_sep>s_bac=Input((32 ))<line_sep>cell=RNNCellWithConstants(32)<line_sep>custom_objects={'RNNCellWithConstants':RNNCellWithConstants}<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional(RNN(cell))<block_end>y=layer(x initial_state=[s_for s_bac] constants=c)<line_sep>model=Model([x s_for s_bac c] y)<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.train_on_batch([np.zeros((6 5 5)) np.zeros((6 32)) np.zeros((6 32)) np.zeros((6 3))] np.zeros((6 64)))<line_sep># Test basic case serialization.
x_np=np.random.random((6 5 5))<line_sep>s_fw_np=np.random.random((6 32))<line_sep>s_bk_np=np.random.random((6 32))<line_sep>c_np=np.random.random((6 3))<line_sep>y_np=model.predict([x_np s_fw_np s_bk_np c_np])<line_sep>weights=model.get_weights()<line_sep>config=layer.get_config()<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional.from_config(copy.deepcopy(config))<block_end>y=layer(x initial_state=[s_for s_bac] constants=c)<line_sep>model=Model([x s_for s_bac c] y)<line_sep>model.set_weights(weights)<line_sep>y_np_2=model.predict([x_np s_fw_np s_bk_np c_np])<line_sep>assert_allclose(y_np y_np_2 atol=1e-4)<line_sep># verify that state is used
y_np_2_different_s=model.predict([x_np s_fw_np+10. s_bk_np+10. c_np])<with_stmt>pytest.raises(AssertionError)<block_start>assert_allclose(y_np y_np_2_different_s atol=1e-4)<block_end># test flat list inputs
<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional.from_config(copy.deepcopy(config))<block_end>y=layer([x s_for s_bac c])<line_sep>model=Model([x s_for s_bac c] y)<line_sep>model.set_weights(weights)<line_sep>y_np_3=model.predict([x_np s_fw_np s_bk_np c_np])<line_sep>assert_allclose(y_np y_np_3 atol=1e-4)<block_end><def_stmt>test_Bidirectional_trainable # test layers that need learning_phase to be set
<block_start>x=Input(shape=(3 2))<line_sep>layer=wrappers.Bidirectional(layers.SimpleRNN(3))<line_sep>_=layer(x)<assert_stmt>len(layer.trainable_weights)<eq>6<line_sep>layer.trainable=<false><assert_stmt>len(layer.trainable_weights)<eq>0<line_sep>layer.trainable=<true><assert_stmt>len(layer.trainable_weights)<eq>6<block_end><def_stmt>test_Bidirectional_updates <block_start>x=Input(shape=(3 2))<line_sep>layer=wrappers.Bidirectional(layers.SimpleRNN(3))<assert_stmt>len(layer.updates)<eq>0<assert_stmt>len(layer.get_updates_for(<none>))<eq>0<assert_stmt>len(layer.get_updates_for(x))<eq>0<line_sep>layer.forward_layer.add_update(0 inputs=x)<line_sep>layer.forward_layer.add_update(1 inputs=<none>)<line_sep>layer.backward_layer.add_update(0 inputs=x)<line_sep>layer.backward_layer.add_update(1 inputs=<none>)<assert_stmt>len(layer.updates)<eq>4<assert_stmt>len(layer.get_updates_for(<none>))<eq>2<assert_stmt>len(layer.get_updates_for(x))<eq>2<block_end><def_stmt>test_Bidirectional_losses <block_start>x=Input(shape=(3 2))<line_sep>layer=wrappers.Bidirectional(layers.SimpleRNN(3 kernel_regularizer='l1' bias_regularizer='l1'))<line_sep>_=layer(x)<assert_stmt>len(layer.losses)<eq>4<assert_stmt>len(layer.get_losses_for(<none>))<eq>4<assert_stmt>len(layer.get_losses_for(x))<eq>0<line_sep>layer.forward_layer.add_loss(0 inputs=x)<line_sep>layer.forward_layer.add_loss(1 inputs=<none>)<line_sep>layer.backward_layer.add_loss(0 inputs=x)<line_sep>layer.backward_layer.add_loss(1 inputs=<none>)<assert_stmt>len(layer.losses)<eq>8<assert_stmt>len(layer.get_losses_for(<none>))<eq>6<assert_stmt>len(layer.get_losses_for(x))<eq>2<block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main([__file__])<block_end> |
"""Lowest-common-denominator implementations of platform functionality."""<import_from_future_stmt> absolute_import division print_function with_statement<import_stmt>errno<import_stmt>socket<import_from_stmt>tornado.platform interface<class_stmt>Waker(interface.Waker)<block_start>"""Create an OS independent asynchronous pipe.
For use on platforms that don't have os.pipe() (or where pipes cannot
be passed to select()), but do have sockets. This includes Windows
and Jython.
"""<def_stmt>__init__ self# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
<block_start>self.writer=socket.socket()<line_sep># Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP socket.TCP_NODELAY 1)<line_sep>count=0<while_stmt>1<block_start>count<augadd>1<line_sep># Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a=socket.socket()<line_sep>a.bind(("127.0.0.1" 0))<line_sep>a.listen(1)<line_sep>connect_address=a.getsockname()# assigned (host, port) pair
<try_stmt><block_start>self.writer.connect(connect_address)<line_sep><break># success
<block_end><except_stmt>socket.error<as>detail<block_start><if_stmt>(<not>hasattr(errno 'WSAEADDRINUSE')<or>detail[0]<ne>errno.WSAEADDRINUSE)# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
<block_start><raise><block_end># (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
<if_stmt>count<ge>10# I've never seen it go above 2
<block_start>a.close()<line_sep>self.writer.close()<line_sep><raise>socket.error("Cannot bind trigger!")<block_end># Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()<block_end><block_end>self.reader,addr=a.accept()<line_sep>self.reader.setblocking(0)<line_sep>self.writer.setblocking(0)<line_sep>a.close()<line_sep>self.reader_fd=self.reader.fileno()<block_end><def_stmt>fileno self<block_start><return>self.reader.fileno()<block_end><def_stmt>write_fileno self<block_start><return>self.writer.fileno()<block_end><def_stmt>wake self<block_start><try_stmt><block_start>self.writer.send(b"x")<block_end><except_stmt>(IOError socket.error)<block_start><pass><block_end><block_end><def_stmt>consume self<block_start><try_stmt><block_start><while_stmt><true><block_start>result=self.reader.recv(1024)<if_stmt><not>result<block_start><break><block_end><block_end><block_end><except_stmt>(IOError socket.error)<block_start><pass><block_end><block_end><def_stmt>close self<block_start>self.reader.close()<line_sep>self.writer.close()<block_end><block_end> |
"""
Basic usage
===========
This example presents the basic usage of brokenaxes
"""<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>brokenaxes brokenaxes<import_stmt>numpy<as>np<line_sep>fig=plt.figure(figsize=(5 2))<line_sep>bax=brokenaxes(xlims=((0 .1) (.4 .7)) ylims=((-1 .7) (.79 1)) hspace=.05)<line_sep>x=np.linspace(0 1 100)<line_sep>bax.plot(x np.sin(10<times>x) label='sin')<line_sep>bax.plot(x np.cos(10<times>x) label='cos')<line_sep>bax.legend(loc=3)<line_sep>bax.set_xlabel('time')<line_sep>bax.set_ylabel('value')<line_sep> |
<import_stmt>clpy<import_stmt>clpy.sparse.base<line_sep>_preamble_atomic_add='''
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long* address_as_ull =
(unsigned long long*)address;
unsigned long long old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
'''<def_stmt>isintlike x<block_start><try_stmt><block_start><return>bool(int(x)<eq>x)<block_end><except_stmt>(TypeError ValueError)<block_start><return><false><block_end><block_end><def_stmt>isscalarlike x<block_start><return>clpy.isscalar(x)<or>(clpy.sparse.base.isdense(x)<and>x.ndim<eq>0)<block_end><def_stmt>isshape x<block_start><if_stmt><not>isinstance(x tuple)<or>len(x)<ne>2<block_start><return><false><block_end>m,n=x<line_sep><return>isintlike(m)<and>isintlike(n)<block_end> |
<import_from_stmt>pytest raises<import_from_stmt>discopy.cartesian *<def_stmt>test_Box_repr <block_start>f=Box('f' 1 2 <lambda>x:(x x))<assert_stmt>"Box('f', 1, 2"<in>repr(f)<block_end><def_stmt>test_Function_str <block_start>f=Function(2 1 <lambda>x y:x+y)<assert_stmt>'Function(dom=2, cod=1,'<in>str(f)<block_end><def_stmt>test_Function_call <block_start>f=Swap(2 1)<line_sep>values=(2 3)<with_stmt>raises(TypeError)<as>err<block_start>f(*values)<block_end><assert_stmt>str(err.value)<eq>messages.expected_input_length(f values)<block_end><def_stmt>test_Function_then <block_start>f,g=Function(2 1 <lambda>x y:x+y) Function(1 1 <lambda>x:x+1)<assert_stmt>Function.id(2).then(*(f g))(20 21)<eq>42<block_end><def_stmt>test_Function_then_err <block_start>f=Function(2 1 <lambda>x y:x+y)<line_sep>g=(<lambda>x:x )<with_stmt>raises(TypeError)<as>err<block_start>f<rshift>g<block_end><assert_stmt>str(err.value)<eq>messages.type_err(Function g)<line_sep>g=Function.id(2)<with_stmt>raises(AxiomError)<as>err<block_start>f<rshift>g<block_end><assert_stmt>str(err.value)<eq>messages.does_not_compose(f g)<block_end><def_stmt>test_Function_tensor <block_start><assert_stmt>Function.id(3)(1 2 3)<eq>Function.id(0).tensor(*(3<times>[Function.id(1)]))(1 2 3)<block_end><def_stmt>test_Function_tensor_err <block_start>f=Function(2 1 <lambda>x y:x+y)<line_sep>g=(<lambda>x:x )<with_stmt>raises(TypeError)<as>err<block_start>f@g<block_end><assert_stmt>str(err.value)<eq>messages.type_err(Function g)<block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>shutil<line_sep>cwd_path=os.getcwd()<line_sep>sys.path.append(os.path.join(os.path.dirname(cwd_path) 'rt-thread' 'tools'))<line_sep># BSP dist function
<def_stmt>dist_do_building BSP_ROOT dist_dir<block_start><import_from_stmt>mkdist bsp_copy_files<import_stmt>rtconfig<line_sep>library_dir=os.path.join(dist_dir 'libraries')<line_sep>print("=> copy nrf52 bsp libraries")<line_sep>library_path=os.path.join(os.path.dirname(BSP_ROOT) 'libraries')<line_sep>bsp_copy_files(library_path library_dir)<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>os<import_stmt>json<import_from_stmt>splash defaults<import_from_stmt>splash.utils to_bytes path_join_secure<import_from_stmt>splash.errors BadOption<class_stmt>RenderOptions(object)<block_start>"""
Options that control how to render a response.
"""<line_sep>_REQUIRED=object()<def_stmt>__init__ self data max_timeout<block_start>self.data=data<line_sep>self.max_timeout=max_timeout<block_end>@classmethod<def_stmt>raise_error cls argument description type='bad_argument' **kwargs<block_start>params={'type':type 'argument':argument 'description':description}<line_sep>params.update(kwargs)<line_sep><raise>BadOption(params)<block_end>@classmethod<def_stmt>fromrequest cls request max_timeout<block_start>"""
Initialize options from a Twisted Request.
"""<line_sep># 1. GET / POST data
data={key.decode('utf-8'):values[0].decode('utf-8')<for>key,values request.args.items()}<if_stmt>request.method<eq>b'POST'<block_start>content_type=request.getHeader(b'content-type')<if_stmt>content_type<block_start>request.content.seek(0)<line_sep># 2. application/json POST data
<if_stmt>b'application/json'<in>content_type<block_start><try_stmt><block_start>content=request.content.read().decode('utf-8')<line_sep>data.update(json.loads(content))<block_end><except_stmt>ValueError<as>e<block_start><raise>BadOption({'type':'invalid_json' 'description':"Can't decode JSON" 'message':str(e) })<block_end><block_end># 3. js_source from application/javascript POST requests
<if_stmt>b'application/javascript'<in>content_type<block_start>data['js_source']=request.content.read().decode('utf-8')<block_end>request.content.seek(0)<block_end><block_end>data['uid']=id(request)<line_sep><return>cls(data max_timeout)<block_end><def_stmt>get_expired_args self cache<block_start>"""
Return a list of argument names from load_args which can't be loaded
"""<line_sep><return>cache.get_missing(self.get_load_args().items())<block_end><def_stmt>save_args_to_cache self cache<block_start>"""
Process save_args and put all values to cache.
Return a list of (name, key) pairs.
"""<line_sep>save_args=self.get_save_args()<line_sep>save_values=[self.data.get(name)<for>name save_args]<line_sep>keys=cache.add_many(save_values)<line_sep><return>list(zip(save_args keys))<block_end><def_stmt>load_cached_args self cache<block_start>load_args=self.get_load_args()<for_stmt>name,key (load_args<or>{}).items()<block_start>self.data[name]=cache[key]<block_end><block_end><def_stmt>get self name default=_REQUIRED type=str range=<none><block_start>value=self.data.get(name)<if_stmt>value<is><not><none><block_start><if_stmt>type<is><not><none><block_start><try_stmt><block_start>value=type(value)<block_end><except_stmt>ValueError<block_start>msg="Argument %r has a wrong type"%(name )<line_sep>self.raise_error(name msg required_type=type.__name__)<block_end><block_end><if_stmt>range<is><not><none><and><not>(range[0]<le>value<le>range[1])<block_start>self.raise_error(name 'Argument is out of the allowed range' min=range[0] max=range[1] value=value)<block_end><return>value<block_end><elif_stmt>default<is>self._REQUIRED<block_start>self.raise_error(name 'Required argument is missing: %s'%name type='argument_required')<block_end><else_stmt><block_start><return>default<block_end><block_end><def_stmt>_get_bool self name default=_REQUIRED<block_start><return>self.get(name default type=int range=(0 1))<block_end><def_stmt>_get_url self name default=_REQUIRED<block_start>url=self.get(name default type=<none>)<if_stmt>isinstance(url bytes)<block_start>url=url.decode('utf8')<block_end><return>url<block_end><def_stmt>get_uid self<block_start><return>self.get('uid')<block_end><def_stmt>get_url self<block_start><return>self._get_url("url")<block_end><def_stmt>get_baseurl self<block_start><return>self._get_url("baseurl" default=<none>)<block_end><def_stmt>get_wait self<block_start><return>self.get("wait" defaults.WAIT_TIME type=float range=(0 self.get_timeout()))<block_end><def_stmt>get_timeout self<block_start>default=min(self.max_timeout defaults.TIMEOUT)<line_sep><return>self.get("timeout" default type=float range=(0 self.max_timeout))<block_end><def_stmt>get_resource_timeout self<block_start><return>self.get("resource_timeout" defaults.RESOURCE_TIMEOUT type=float range=(0 1e6))<block_end><def_stmt>get_response_body self<block_start><return>self._get_bool("response_body" defaults.RESPONSE_BODY_ENABLED)<block_end><def_stmt>get_request_body self<block_start><return>self._get_bool("request_body" defaults.REQUEST_BODY_ENABLED)<block_end><def_stmt>get_images self<block_start><return>self._get_bool("images" defaults.AUTOLOAD_IMAGES)<block_end><def_stmt>get_proxy self<block_start><return>self.get("proxy" default=<none>)<block_end><def_stmt>get_js_source self<block_start><return>self.get("js_source" default=<none>)<block_end><def_stmt>get_width self<block_start><return>self.get("width" <none> type=int range=(1 defaults.MAX_WIDTH))<block_end><def_stmt>get_height self<block_start><return>self.get("height" <none> type=int range=(1 defaults.MAX_HEIGTH))<block_end><def_stmt>get_scale_method self<block_start>scale_method=self.get("scale_method" defaults.IMAGE_SCALE_METHOD)<line_sep>allowed_scale_methods=['raster' 'vector']<if_stmt>scale_method<not><in>allowed_scale_methods<block_start>self.raise_error(argument='scale_method' description="Invalid 'scale_method': %s"%scale_method allowed=allowed_scale_methods received=scale_method )<block_end><return>scale_method<block_end><def_stmt>get_quality self<block_start><return>self.get("quality" defaults.JPEG_QUALITY type=int range=(0 100))<block_end><def_stmt>get_http_method self<block_start>method=self.get("http_method" "GET")<if_stmt>method.upper()<not><in>["POST" "GET"]<block_start>self.raise_error("http_method" "Unsupported HTTP method {}".format(method))<block_end><return>method<block_end><def_stmt>get_body self<block_start>body=self.get("body" <none> to_bytes)<line_sep>method=self.get("http_method" "GET").upper()<if_stmt>method<eq>'GET'<and>body<block_start>self.raise_error("body" "GET request should not have a body")<block_end><return>body<block_end><def_stmt>get_render_all self wait=<none><block_start>result=self._get_bool("render_all" <false>)<if_stmt>result<eq>1<and>wait<eq>0<block_start>self.raise_error("render_all" "Pass non-zero 'wait' to render full webpage")<block_end><return>result<block_end><def_stmt>get_lua_source self<block_start><return>self.get("lua_source")<block_end><def_stmt>get_js_profile self js_profiles_path<block_start>js_profile=self.get("js" default=<none>)<if_stmt><not>js_profile<block_start><return>js_profile<block_end><if_stmt>js_profiles_path<is><none><block_start>self.raise_error('js' 'Javascript profiles are not enabled on server')<block_end><try_stmt><block_start>profile_dir=path_join_secure(js_profiles_path js_profile)<block_end><except_stmt>ValueError<as>e# security check fails
<block_start>print(e)<line_sep>self.raise_error('js' 'Javascript profile does not exist')<block_end><if_stmt><not>os.path.isdir(profile_dir)<block_start>self.raise_error('js' 'Javascript profile does not exist')<block_end><return>profile_dir<block_end><def_stmt>get_headers self<block_start>headers=self.get("headers" default=<none> type=<none>)<if_stmt>headers<is><none><block_start><return>headers<block_end><if_stmt><not>isinstance(headers (list tuple dict))<block_start>self.raise_error(argument='headers' description="'headers' must be either a JSON array of "<concat>"(name, value) pairs or a JSON object")<block_end><if_stmt>isinstance(headers (list tuple))<block_start><for_stmt>el headers<block_start>string_only=all(isinstance(e str)<for>e el)<if_stmt><not>(isinstance(el (list tuple))<and>len(el)<eq>2<and>string_only)<block_start>self.raise_error(argument='headers' description="'headers' must be either a JSON array of "<concat>"(name, value) pairs or a JSON object")<block_end><block_end><block_end><return>headers<block_end><def_stmt>get_save_args self<block_start>save_args=self.get("save_args" default=<none> type=<none>)<if_stmt>save_args<is><none><block_start><return>[]<block_end><if_stmt>isinstance(save_args str)# comma-separated string
<block_start>save_args=save_args.split(',')<block_end><if_stmt><not>isinstance(save_args list)<block_start>self.raise_error(argument="save_args" description="'save_args' should be either a comma-separated "<concat>"string or a JSON array with argument names" )<block_end># JSON array
<if_stmt><not>all(isinstance(a str)<for>a save_args)<block_start>self.raise_error(argument="save_args" description="'save_args' should be a list of strings" )<block_end><return>save_args<block_end><def_stmt>get_load_args self<block_start>load_args=self.get("load_args" default=<none> type=<none>)<if_stmt>load_args<is><none><block_start><return>{}<block_end><if_stmt>isinstance(load_args str)<block_start><try_stmt><block_start>load_args=dict(kv.split("=" 1)<for>kv load_args.split(';'))<block_end><except_stmt>ValueError<block_start>self.raise_error(argument="load_args" description="'load_args' string value is not a "<concat>"semicolon-separated list of name=hash pairs")<block_end><block_end><if_stmt><not>isinstance(load_args dict)<block_start>self.raise_error(argument="load_args" description="'load_args' should be either a JSON object with "<concat>"argument hashes or a semicolon-separated list "<concat>"of name=hash pairs")<block_end><return>load_args<block_end><def_stmt>get_viewport self wait=<none><block_start>viewport=self.get("viewport" defaults.VIEWPORT_SIZE)<if_stmt>viewport<eq>'full'<block_start><if_stmt>wait<eq>0<block_start>self.raise_error("viewport" "Pass non-zero 'wait' to render full webpage")<block_end><block_end><else_stmt><block_start><try_stmt><block_start>validate_size_str(viewport)<block_end><except_stmt>ValueError<as>e<block_start>self.raise_error("viewport" str(e))<block_end><block_end><return>viewport<block_end><def_stmt>get_filters self pool=<none> adblock_rules=<none><block_start>filter_names=self.get('filters' '')<line_sep>filter_names=[f<for>f filter_names.split(',')<if>f]<if_stmt>pool<is><none><and>adblock_rules<is><none># skip validation
<block_start><return>filter_names<block_end><if_stmt><not>filter_names<block_start><return>filter_names<block_end><if_stmt>pool<is><not><none><block_start>adblock_rules=pool.network_manager_factory.adblock_rules<if_stmt>adblock_rules<is><none><block_start>self.raise_error("filters" "Invalid filter names: %s"%(filter_names ))<block_end><block_end><if_stmt>adblock_rules<is><not><none><block_start>unknown_filters=adblock_rules.get_unknown_filters(filter_names)<if_stmt>unknown_filters<block_start>self.raise_error("filters" "Invalid filter names: %s"%(unknown_filters ))<block_end><block_end><return>filter_names<block_end><def_stmt>get_allowed_domains self<block_start>allowed_domains=self.get("allowed_domains" default=<none>)<if_stmt>allowed_domains<is><not><none><block_start><return>allowed_domains.split(',')<block_end><block_end><def_stmt>get_allowed_content_types self<block_start>content_types=self.get("allowed_content_types" default=['*'])<if_stmt>isinstance(content_types str)<block_start>content_types=list(filter(<none> content_types.split(',')))<block_end><return>content_types<block_end><def_stmt>get_forbidden_content_types self<block_start>content_types=self.get("forbidden_content_types" default=[])<if_stmt>isinstance(content_types str)<block_start>content_types=list(filter(<none> content_types.split(',')))<block_end><return>content_types<block_end><def_stmt>get_html5_media self<block_start><return>self._get_bool("html5_media" defaults.HTML5_MEDIA_ENABLED)<block_end><def_stmt>get_engine self browser_engines_enabled=<none><block_start>engine=self.get("engine" default="webkit" type=str)<if_stmt>engine<not><in>{"webkit" "chromium"}<block_start>self.raise_error("engine" "Unknown render engine {}".format(engine))<block_end><if_stmt>browser_engines_enabled<is><not><none><block_start><if_stmt>engine<not><in>browser_engines_enabled<block_start>self.raise_error("engine" "Disabled render engine {}".format(engine))<block_end><block_end><return>engine<block_end><def_stmt>get_http2 self<block_start>engine=self.get_engine()<if_stmt>self.get_engine()<eq>"webkit"<block_start>default=defaults.WEBKIT_HTTP2_ENABLED<block_end><else_stmt><block_start><assert_stmt>engine<eq>'chromium'<line_sep>default=defaults.CHROMIUM_HTTP2_ENABLED<block_end><return>self._get_bool("http2" default)<block_end><def_stmt>get_common_params self js_profiles_path<block_start>wait=self.get_wait()<line_sep><return>{'url':self.get_url() 'baseurl':self.get_baseurl() 'wait':wait 'resource_timeout':self.get_resource_timeout() 'viewport':self.get_viewport(wait) 'render_all':self.get_render_all(wait) 'images':self.get_images() 'headers':self.get_headers() 'proxy':self.get_proxy() 'js_profile':self.get_js_profile(js_profiles_path) 'js_source':self.get_js_source() 'http_method':self.get_http_method() 'body':self.get_body() 'html5_media':self.get_html5_media() 'http2':self.get_http2() # 'lua': self.get_lua(),
}<block_end><def_stmt>get_image_params self<block_start><return>{'width':self.get_width() 'height':self.get_height() 'scale_method':self.get_scale_method()}<block_end><def_stmt>get_png_params self<block_start><return>self.get_image_params()<block_end><def_stmt>get_jpeg_params self<block_start>params={'quality':self.get_quality()}<line_sep>params.update(self.get_image_params())<line_sep><return>params<block_end><def_stmt>get_include_params self<block_start><return>dict(html=self._get_bool("html" defaults.DO_HTML) iframes=self._get_bool("iframes" defaults.DO_IFRAMES) png=self._get_bool("png" defaults.DO_PNG) jpeg=self._get_bool("jpeg" defaults.DO_JPEG) script=self._get_bool("script" defaults.SHOW_SCRIPT) console=self._get_bool("console" defaults.SHOW_CONSOLE) history=self._get_bool("history" defaults.SHOW_HISTORY) har=self._get_bool("har" defaults.SHOW_HAR) )<block_end><block_end><def_stmt>validate_size_str size_str<block_start>"""
Validate size string in WxH format.
Can be used to validate both viewport and window size strings. Does not
special-case ``'full'`` viewport. Raises ``ValueError`` if anything goes
wrong.
:param size_str: string to validate
"""<line_sep>max_width=defaults.VIEWPORT_MAX_WIDTH<line_sep>max_heigth=defaults.VIEWPORT_MAX_HEIGTH<line_sep>max_area=defaults.VIEWPORT_MAX_AREA<try_stmt><block_start>w,h=map(int size_str.split('x'))<block_end><except_stmt>ValueError<block_start><raise>ValueError("Invalid viewport format: %s"%size_str)<block_end><else_stmt><block_start><if_stmt><not>((0<l>w<le>max_width)<and>(0<l>h<le>max_heigth)<and>(w<times>h<l>max_area))<block_start><raise>ValueError("Viewport (%dx%d, area=%d) is out of range (%dx%d, area=%d)"%(w h w<times>h max_width max_heigth max_area))<block_end><block_end><block_end> |
# Set up configuration variables
__all__=['custom_viewer' 'qglue' 'test']<import_stmt>os<import_stmt>sys<import_from_stmt>pkg_resources get_distribution DistributionNotFound<try_stmt><block_start>__version__=get_distribution('glue-core').version<block_end><except_stmt>DistributionNotFound<block_start>__version__='undefined'<block_end><import_from_stmt>._mpl_backend MatplotlibBackendSetter<line_sep>sys.meta_path.append(MatplotlibBackendSetter())<import_from_stmt>glue.viewers.custom.helper custom_viewer<line_sep># Load user's configuration file
<import_from_stmt>.config load_configuration<line_sep>env=load_configuration()<import_from_stmt>.qglue qglue<import_from_stmt>.main load_plugins# noqa
<def_stmt>test no_optional_skip=<false><block_start><import_from_stmt>pytest main<line_sep>root=os.path.abspath(os.path.dirname(__file__))<line_sep>args=[root '-x']<if_stmt>no_optional_skip<block_start>args.append('--no-optional-skip')<block_end><return>main(args=args)<block_end><import_from_stmt>glue._settings_helpers load_settings<line_sep>load_settings()<line_sep># In PyQt 5.5+, PyQt overrides the default exception catching and fatally
# crashes the Qt application without printing out any details about the error.
# Below we revert the exception hook to the original Python one. Note that we
# can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect
# the default excepthook is in place and override it.
<def_stmt>handle_exception exc_type exc_value exc_traceback<block_start>sys.__excepthook__(exc_type exc_value exc_traceback)<block_end>sys.excepthook=handle_exception<line_sep> |
<import_stmt>requests<import_from_stmt>allauth.socialaccount.providers.oauth2.views OAuth2Adapter OAuth2CallbackView OAuth2LoginView <import_from_stmt>.provider DropboxOAuth2Provider<class_stmt>DropboxOAuth2Adapter(OAuth2Adapter)<block_start>provider_id=DropboxOAuth2Provider.id<line_sep>access_token_url="https://api.dropbox.com/oauth2/token"<line_sep>authorize_url="https://www.dropbox.com/oauth2/authorize"<line_sep>profile_url="https://api.dropbox.com/2/users/get_current_account"<line_sep>redirect_uri_protocol="https"<def_stmt>complete_login self request app token **kwargs<block_start>response=requests.post(self.profile_url headers={"Authorization":"Bearer %s"%(token.token )} )<line_sep>response.raise_for_status()<line_sep><return>self.get_provider().sociallogin_from_response(request response.json())<block_end><block_end>oauth_login=OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)<line_sep>oauth_callback=OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)<line_sep> |
"""
3D visualization primitives based on Plotly.
We might want to instead use a more powerful library like Open3D.
Plotly however supports animations, buttons and sliders.
1) Initialize a figure with `fig = init_figure()`
2) Plot points, cameras, lines, or create a slider animation.
3) Call `fig.show()` to render the figure.
"""<import_stmt>plotly.graph_objects<as>go<import_stmt>numpy<as>np<import_from_stmt>..pixlib.geometry.utils to_homogeneous<def_stmt>init_figure height=800<block_start>"""Initialize a 3D figure."""<line_sep>fig=go.Figure()<line_sep>fig.update_layout(height=height scene_camera=dict(eye=dict(x=0. y=-.1 z=-2) up=dict(x=0 y=-1. z=0)) scene=dict(xaxis=dict(showbackground=<false>) yaxis=dict(showbackground=<false>) aspectmode='data' dragmode='orbit') margin=dict(l=0 r=0 b=0 t=0 pad=0))<line_sep># noqa E741
<return>fig<block_end><def_stmt>plot_points fig pts color='rgba(255, 0, 0, 1)' ps=2<block_start>"""Plot a set of 3D points."""<line_sep>x,y,z=pts.T<line_sep>tr=go.Scatter3d(x=x y=y z=z mode='markers' marker_size=ps marker_color=color marker_line_width=.2)<line_sep>fig.add_trace(tr)<block_end><def_stmt>plot_camera fig R t K color='rgb(0, 0, 255)'<block_start>"""Plot a camera as a cone with camera frustum."""<line_sep>x,y,z=t<line_sep>u,v,[email protected]([0 0 1])<line_sep>tr=go.Cone(x=[x] y=[y] z=[z] u=[u] v=[v] w=[w] anchor='tip' showscale=<false> colorscale=[[0 color] [1 color]] sizemode='absolute')<line_sep>fig.add_trace(tr)<line_sep>W,H=K[0 2]<times>2 K[1 2]<times>2<line_sep>corners=np.array([[0 0] [W 0] [W H] [0 H] [0 0]])<line_sep>corners=to_homogeneous(corners)@np.linalg.inv(K).T<line_sep>corners=(corners/2)@R.T+t<line_sep>x,y,z=corners.T<line_sep>tr=go.Scatter3d(x=x y=y z=z line=dict(color='rgba(0, 0, 0, .5)') marker=dict(size=0.0001) showlegend=<false>)<line_sep>fig.add_trace(tr)<block_end><def_stmt>create_slider_animation fig traces<block_start>"""Create a slider that animates a list of traces (e.g. 3D points)."""<line_sep>slider={'steps':[]}<line_sep>frames=[]<line_sep>fig.add_trace(traces[0])<line_sep>idx=len(fig.data)-1<for_stmt>i,tr enumerate(traces)<block_start>frames.append(go.Frame(name=str(i) traces=[idx] data=[tr]))<line_sep>step={"args":[[str(i)] {"frame":{"redraw":<true>} "mode":"immediate"}] "label":i "method":"animate"}<line_sep>slider['steps'].append(step)<block_end>fig.frames=tuple(frames)<line_sep>fig.layout.sliders=(slider )<block_end> |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>unittest<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>parameterized parameterized<import_from_stmt>monai.networks eval_mode<import_from_stmt>monai.networks.blocks SubpixelUpsample<import_from_stmt>monai.networks.layers.factories Conv<line_sep>TEST_CASE_SUBPIXEL=[]<for_stmt>inch range(1 5)<block_start><for_stmt>dim range(1 4)<block_start><for_stmt>factor range(1 3)<block_start>test_case=[{"dimensions":dim "in_channels":inch "scale_factor":factor} (2 inch *([8]<times>dim)) (2 inch *([8<times>factor]<times>dim)) ]<line_sep>TEST_CASE_SUBPIXEL.append(test_case)<block_end><block_end><block_end>TEST_CASE_SUBPIXEL_2D_EXTRA=[{"dimensions":2 "in_channels":2 "scale_factor":3} (2 2 8 4) # different size for H and W
(2 2 24 12) ]<line_sep>TEST_CASE_SUBPIXEL_3D_EXTRA=[{"dimensions":3 "in_channels":1 "scale_factor":2} (2 1 16 8 4) # different size for H, W and D
(2 1 32 16 8) ]<line_sep>conv_block=nn.Sequential(Conv[Conv.CONV 3](1 4 kernel_size=1) Conv[Conv.CONV 3](4 8 kernel_size=3 stride=1 padding=1))<line_sep>TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA=[{"dimensions":3 "in_channels":1 "scale_factor":2 "conv_block":conv_block} (2 1 16 8 4) # different size for H, W and D
(2 1 32 16 8) ]<line_sep>TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_2D_EXTRA)<line_sep>TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_3D_EXTRA)<line_sep>TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA)<line_sep># add every test back with the pad/pool sequential component omitted
<for_stmt>tests list(TEST_CASE_SUBPIXEL)<block_start>args:dict=tests[0]# type: ignore
args=dict(args)<line_sep>args["apply_pad_pool"]=<false><line_sep>TEST_CASE_SUBPIXEL.append([args tests[1] tests[2]])<block_end><class_stmt>TestSUBPIXEL(unittest.TestCase)<block_start>@parameterized.expand(TEST_CASE_SUBPIXEL)<def_stmt>test_subpixel_shape self input_param input_shape expected_shape<block_start>net=SubpixelUpsample(**input_param)<with_stmt>eval_mode(net)<block_start>result=net.forward(torch.randn(input_shape))<line_sep>self.assertEqual(result.shape expected_shape)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.autograd Function Variable<import_from_stmt>torch.nn Module<def_stmt>check_type var t name<block_start><if_stmt>var.dtype<is><not>t<block_start><raise>TypeError("{} must be {}".format(name t))<block_end><block_end><def_stmt>check_contiguous var name<block_start><if_stmt><not>var.is_contiguous()<block_start><raise>ValueError("{} must be contiguous".format(name))<block_end><block_end><def_stmt>check_dim var dim name<block_start><if_stmt>len(var.shape)<ne>dim<block_start><raise>ValueError("{} must be {}D".format(name dim))<block_end><block_end><def_stmt>certify_inputs log_probs labels lengths label_lengths# check_type(log_probs, torch.float32, "log_probs")
<block_start>check_type(labels torch.int32 "labels")<line_sep>check_type(label_lengths torch.int32 "label_lengths")<line_sep>check_type(lengths torch.int32 "lengths")<line_sep>check_contiguous(log_probs "log_probs")<line_sep>check_contiguous(labels "labels")<line_sep>check_contiguous(label_lengths "label_lengths")<line_sep>check_contiguous(lengths "lengths")<if_stmt>lengths.shape[0]<ne>log_probs.shape[0]<block_start><raise>ValueError(f"Must have a length per example. "<concat>f"Given lengths dim: {lengths.shape[0]}, "<concat>f"Log probs dim : {log_probs.shape[0]}")<block_end><if_stmt>label_lengths.shape[0]<ne>log_probs.shape[0]<block_start><raise>ValueError("Must have a label length per example. "<concat>f"Given label lengths dim : {label_lengths.shape[0]}, "<concat>f"Log probs dim : {log_probs.shape[0]}")<block_end>check_dim(log_probs 4 "log_probs")<line_sep>check_dim(labels 2 "labels")<line_sep>check_dim(lengths 1 "lenghts")<line_sep>check_dim(label_lengths 1 "label_lenghts")<line_sep>max_T=torch.max(lengths)<line_sep>max_U=torch.max(label_lengths)<line_sep>T,U=log_probs.shape[1:3]<if_stmt>T<ne>max_T<block_start><raise>ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")<block_end><if_stmt>U<ne>max_U+1<block_start><raise>ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")<block_end><block_end><def_stmt>_assert_no_grad tensor<block_start><assert_stmt><not>tensor.requires_grad ("gradients only computed for log_probs - please "<concat>"mark other tensors as not requiring gradients")<block_end><def_stmt>forward_pass log_probs labels blank<block_start>"""
Computes probability of the forward variable alpha.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the forward variable probabilities - alpha of shape [T, U]
and the log likelihood of this forward step.
"""<line_sep>T,U,_=log_probs.shape<line_sep>alphas=np.zeros((T U) dtype='f')<for_stmt>t range(1 T)<block_start>alphas[t 0]=alphas[t-1 0]+log_probs[t-1 0 blank]<block_end><for_stmt>u range(1 U)<block_start>alphas[0 u]=alphas[0 u-1]+log_probs[0 u-1 labels[u-1]]<block_end><for_stmt>t range(1 T)<block_start><for_stmt>u range(1 U)<block_start>no_emit=alphas[t-1 u]+log_probs[t-1 u blank]<line_sep>emit=alphas[t u-1]+log_probs[t u-1 labels[u-1]]<line_sep>alphas[t u]=np.logaddexp(emit no_emit)<block_end><block_end>loglike=alphas[T-1 U-1]+log_probs[T-1 U-1 blank]<line_sep><return>alphas loglike<block_end><def_stmt>backward_pass log_probs labels blank<block_start>"""
Computes probability of the backward variable beta.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the backward variable probabilities - beta of shape [T, U]
and the log likelihood of this backward step.
"""<line_sep>T,U,_=log_probs.shape<line_sep>betas=np.zeros((T U) dtype='f')<line_sep>betas[T-1 U-1]=log_probs[T-1 U-1 blank]<for_stmt>t reversed(range(T-1))<block_start>betas[t U-1]=betas[t+1 U-1]+log_probs[t U-1 blank]<block_end><for_stmt>u reversed(range(U-1))<block_start>betas[T-1 u]=betas[T-1 u+1]+log_probs[T-1 u labels[u]]<block_end><for_stmt>t reversed(range(T-1))<block_start><for_stmt>u reversed(range(U-1))<block_start>no_emit=betas[t+1 u]+log_probs[t u blank]<line_sep>emit=betas[t u+1]+log_probs[t u labels[u]]<line_sep>betas[t u]=np.logaddexp(emit no_emit)<block_end><block_end><return>betas betas[0 0]<block_end><def_stmt>compute_gradient log_probs alphas betas labels blank fastemit_lambda<block_start>"""
Computes the gradients of the log_probs with respect to the log probability of this step occuring.
Args:
Args:
log_probs: Tensor of shape [T, U, V+1]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Tensor of shape [T, U] which represents the backward variable.
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
Gradients of shape [T, U, V+1] with respect to the forward log probability
"""<line_sep>T,U,_=log_probs.shape<line_sep>grads=np.full(log_probs.shape -float("inf"))<line_sep>log_like=betas[0 0]# == alphas[T - 1, U - 1] + betas[T - 1, U - 1]
# // grad to last blank transition
grads[T-1 U-1 blank]=alphas[T-1 U-1]<line_sep>grads[:T-1 : blank]=alphas[:T-1 :]+betas[1: :]<line_sep># // grad to label transition
<for_stmt>u,l enumerate(labels)<block_start>grads[: u l]=alphas[: u]+betas[: u+1]<block_end>grads=-np.exp(grads+log_probs-log_like)<if_stmt>fastemit_lambda<g>0.0<block_start><for_stmt>u,l enumerate(labels)<block_start>grads[: u l]=(1.0+fastemit_lambda)<times>grads[: u l]<block_end><block_end><return>grads<block_end><def_stmt>fastemit_regularization log_probs labels alphas betas blank fastemit_lambda<block_start>"""
Describes the computation of FastEmit regularization from the paper -
[FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148)
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Unused. Labels of shape [B, U]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Unused. Tensor of shape [T, U] which represents the backward variable.
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
The regularized negative log likelihood - lambda * P˜(At, u|x)
"""<line_sep># General calculation of the fastemit regularization alignments
T,U,_=log_probs.shape<line_sep># alignment = np.zeros((T, U), dtype='float32')
#
# for t in range(0, T):
# alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1]
#
# for t in range(0, T):
# for u in range(0, U - 1):
# emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1]
# alignment[t, u] = emit
# reg = fastemit_lambda * (alignment[T - 1, U - 1])
# The above is equivalent to below, without need of computing above
# reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1])
# The above is also equivalent to below, without need of computing the betas alignment matrix
reg=fastemit_lambda<times>(alphas[T-1 U-1]+log_probs[T-1 U-1 blank])<line_sep><return>-reg<block_end><def_stmt>transduce log_probs labels blank=0 fastemit_lambda=0.0<block_start>"""
Args:
log_probs: 3D array with shape
[input len, output len + 1, vocab size]
labels: 1D array with shape [output time steps]
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
float: The negative log-likelihood
3D array: Gradients with respect to the
unnormalized input actications
2d arrays: Alphas matrix (TxU)
2d array: Betas matrix (TxU)
"""<line_sep>alphas,ll_forward=forward_pass(log_probs labels blank)<line_sep>betas,ll_backward=backward_pass(log_probs labels blank)<line_sep>grads=compute_gradient(log_probs alphas betas labels blank fastemit_lambda)<line_sep><return>-ll_forward grads alphas betas<block_end><def_stmt>transduce_batch log_probs labels flen glen blank=0 fastemit_lambda=0.0<block_start>"""
Compute the transducer loss of the batch.
Args:
log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax.
labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning.
flen: Length vector of the acoustic sequence.
glen: Length vector of the target sequence.
blank: Id of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix.
"""<line_sep>grads=np.zeros_like(log_probs)<line_sep>costs=[]<for_stmt>b range(log_probs.shape[0])<block_start>t=int(flen[b])<line_sep>u=int(glen[b])+1<line_sep>ll,g,alphas,betas=transduce(log_probs[b :t :u :] labels[b :u-1] blank fastemit_lambda)<line_sep>grads[b :t :u :]=g<line_sep>reg=fastemit_regularization(log_probs[b :t :u :] labels[b :u-1] alphas betas blank fastemit_lambda)<line_sep>ll<augadd>reg<line_sep>costs.append(ll)<block_end><return>costs grads<block_end><class_stmt>_RNNT(Function)<block_start>@staticmethod<def_stmt>forward ctx acts labels act_lens label_lens blank fastemit_lambda<block_start>costs,grads=transduce_batch(acts.detach().cpu().numpy() labels.cpu().numpy() act_lens.cpu().numpy() label_lens.cpu().numpy() blank fastemit_lambda )<line_sep>costs=torch.FloatTensor([sum(costs)])<line_sep>grads=torch.Tensor(grads).to(acts)<line_sep>ctx.grads=grads<line_sep><return>costs<block_end>@staticmethod<def_stmt>backward ctx grad_output<block_start><return>ctx.grads <none> <none> <none> <none> <none><block_end><block_end><class_stmt>RNNTLoss(Module)<block_start>"""
Parameters:
`blank_label` (int): default 0 - label index of blank token
fastemit_lambda: Float scaling factor for FastEmit regularization.
"""<def_stmt>__init__ self blank:int=0 fastemit_lambda:float=0.0<block_start>super(RNNTLoss self).__init__()<line_sep>self.blank=blank<line_sep>self.fastemit_lambda=fastemit_lambda<line_sep>self.rnnt=_RNNT.apply<block_end><def_stmt>forward self acts labels act_lens label_lens<block_start><assert_stmt>len(labels.size())<eq>2<line_sep>_assert_no_grad(labels)<line_sep>_assert_no_grad(act_lens)<line_sep>_assert_no_grad(label_lens)<line_sep>certify_inputs(acts labels act_lens label_lens)<line_sep>acts=torch.nn.functional.log_softmax(acts -1)<line_sep><return>self.rnnt(acts labels act_lens label_lens self.blank self.fastemit_lambda)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>loss=RNNTLoss(fastemit_lambda=0.01)<line_sep>torch.manual_seed(0)<line_sep>acts=torch.randn(1 2 5 3)<line_sep>labels=torch.tensor([[0 2 1 2]] dtype=torch.int32)<line_sep>act_lens=torch.tensor([2] dtype=torch.int32)<line_sep>label_lens=torch.tensor([len(labels[0])] dtype=torch.int32)<line_sep>loss_val=loss(acts labels act_lens label_lens)<block_end> |
#coding:utf8
'''
Created on 2013-8-21
@author: lan (www.9miao.com)
'''<import_stmt>itertools<import_stmt>datetime<def_stmt>safeunicode obj encoding='utf-8'<block_start>r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""<line_sep>t=type(obj)<if_stmt>t<is>unicode<block_start><return>obj<block_end><elif_stmt>t<is>str<block_start><return>obj.decode(encoding)<block_end><elif_stmt>t<in>[int float bool]<block_start><return>unicode(obj)<block_end><elif_stmt>hasattr(obj '__unicode__')<or>isinstance(obj unicode)<block_start><return>unicode(obj)<block_end><else_stmt><block_start><return>str(obj).decode(encoding)<block_end><block_end><def_stmt>safestr obj encoding='utf-8'<block_start>r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""<if_stmt>isinstance(obj unicode)<block_start><return>obj.encode(encoding)<block_end><elif_stmt>isinstance(obj str)<block_start><return>obj<block_end><elif_stmt>hasattr(obj 'next')# iterator
<block_start><return>itertools.imap(safestr obj)<block_end><else_stmt><block_start><return>str(obj)<block_end><block_end><def_stmt>sqlify obj<block_start>"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""<line_sep># because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
<if_stmt>obj<is><none><block_start><return>'NULL'<block_end><elif_stmt>obj<is><true><block_start><return>"'t'"<block_end><elif_stmt>obj<is><false><block_start><return>"'f'"<block_end><elif_stmt>datetime<and>isinstance(obj datetime.datetime)<block_start><return>repr(obj.isoformat())<block_end><else_stmt><block_start><if_stmt>isinstance(obj unicode)<block_start>obj=obj.encode('utf8')<block_end><return>repr(obj)<block_end><block_end><def_stmt>sqllist lst<block_start>"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""<if_stmt>isinstance(lst basestring)<block_start><return>lst<block_end><else_stmt><block_start><return>', '.join(lst)<block_end><block_end><def_stmt>_sqllist values<block_start>"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""<line_sep>items=[]<line_sep>items.append('(')<for_stmt>i,v enumerate(values)<block_start><if_stmt>i<ne>0<block_start>items.append(', ')<block_end>items.append(sqlparam(v))<block_end>items.append(')')<line_sep><return>SQLQuery(items)<block_end><def_stmt>sqlquote a<block_start>"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""<if_stmt>isinstance(a list)<block_start><return>_sqllist(a)<block_end><else_stmt><block_start><return>sqlparam(a).sqlquery()<block_end><block_end><def_stmt>_interpolate sformat<block_start>"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""<import_from_stmt>tokenize tokenprog<line_sep>tokenprog=tokenprog<def_stmt>matchorfail text pos<block_start>match=tokenprog.match(text pos)<if_stmt>match<is><none><block_start><raise>_ItplError(text pos)<block_end><return>match match.end()<block_end>namechars="abcdefghijklmnopqrstuvwxyz"<concat>"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"<line_sep>chunks=[]<line_sep>pos=0<while_stmt>1<block_start>dollar=sformat.find("$" pos)<if_stmt>dollar<l>0<block_start><break><block_end>nextchar=sformat[dollar+1]<if_stmt>nextchar<eq>"{"<block_start>chunks.append((0 sformat[pos:dollar]))<line_sep>pos,level=dollar+2 1<while_stmt>level<block_start>match,pos=matchorfail(sformat pos)<line_sep>tstart,tend=match.regs[3]<line_sep>token=sformat[tstart:tend]<if_stmt>token<eq>"{"<block_start>level=level+1<block_end><elif_stmt>token<eq>"}"<block_start>level=level-1<block_end><block_end>chunks.append((1 sformat[dollar+2:pos-1]))<block_end><elif_stmt>nextchar<in>namechars<block_start>chunks.append((0 sformat[pos:dollar]))<line_sep>match,pos=matchorfail(sformat dollar+1)<while_stmt>pos<l>len(sformat)<block_start><if_stmt>sformat[pos]<eq>"."<and>pos+1<l>len(sformat)<and>sformat[pos+1]<in>namechars<block_start>match,pos=matchorfail(sformat pos+1)<block_end><elif_stmt>sformat[pos]<in>"(["<block_start>pos,level=pos+1 1<while_stmt>level<block_start>match,pos=matchorfail(sformat pos)<line_sep>tstart,tend=match.regs[3]<line_sep>token=sformat[tstart:tend]<if_stmt>token[0]<in>"(["<block_start>level=level+1<block_end><elif_stmt>token[0]<in>")]"<block_start>level=level-1<block_end><block_end><block_end><else_stmt><block_start><break><block_end><block_end>chunks.append((1 sformat[dollar+1:pos]))<block_end><else_stmt><block_start>chunks.append((0 sformat[pos:dollar+1]))<line_sep>pos=dollar+1+(nextchar<eq>"$")<block_end><block_end><if_stmt>pos<l>len(sformat)<block_start>chunks.append((0 sformat[pos:]))<block_end><return>chunks<block_end><def_stmt>sqlwhere dictionary grouping=' AND '<block_start>"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""<line_sep><return>SQLQuery.join([k+' = '+sqlparam(v)<for>k,v dictionary.items()] grouping)<block_end><def_stmt>reparam string_ dictionary<block_start>"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""<line_sep>dictionary=dictionary.copy()# eval mucks with it
result=[]<for_stmt>live,chunk _interpolate(string_)<block_start><if_stmt>live<block_start>v=eval(chunk dictionary)<line_sep>result.append(sqlquote(v))<block_end><else_stmt><block_start>result.append(chunk)<block_end><block_end><return>SQLQuery.join(result '')<block_end><class_stmt>UnknownParamstyle(Exception)<block_start>"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""<line_sep><pass><block_end><class_stmt>_ItplError(ValueError)<block_start><def_stmt>__init__ self text pos<block_start>ValueError.__init__(self)<line_sep>self.text=text<line_sep>self.pos=pos<block_end><def_stmt>__str__ self<block_start><return>"unfinished expression in %s at char %d"%(repr(self.text) self.pos)<block_end><block_end><class_stmt>SQLParam(object)<block_start>"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""<line_sep>__slots__=["value"]<def_stmt>__init__ self value<block_start>self.value=value<block_end><def_stmt>get_marker self paramstyle='pyformat'<block_start><if_stmt>paramstyle<eq>'qmark'<block_start><return>'?'<block_end><elif_stmt>paramstyle<eq>'numeric'<block_start><return>':1'<block_end><elif_stmt>paramstyle<is><none><or>paramstyle<in>['format' 'pyformat']<block_start><return>'%s'<block_end><raise>UnknownParamstyle paramstyle<block_end><def_stmt>sqlquery self<block_start><return>SQLQuery([self])<block_end><def_stmt>__add__ self other<block_start><return>self.sqlquery()+other<block_end><def_stmt>__radd__ self other<block_start><return>other+self.sqlquery()<block_end><def_stmt>__str__ self<block_start><return>str(self.value)<block_end><def_stmt>__repr__ self<block_start><return>'<param: %s>'%repr(self.value)<block_end><block_end>sqlparam=SQLParam<class_stmt>SQLQuery(object)<block_start>"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""<line_sep>__slots__=["items"]<line_sep># tested in sqlquote's docstring
<def_stmt>__init__ self items=<none><block_start>r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""<if_stmt>items<is><none><block_start>self.items=[]<block_end><elif_stmt>isinstance(items list)<block_start>self.items=items<block_end><elif_stmt>isinstance(items SQLParam)<block_start>self.items=[items]<block_end><elif_stmt>isinstance(items SQLQuery)<block_start>self.items=list(items.items)<block_end><else_stmt><block_start>self.items=[items]<block_end># Take care of SQLLiterals
<for_stmt>i,item enumerate(self.items)<block_start><if_stmt>isinstance(item SQLParam)<and>isinstance(item.value SQLLiteral)<block_start>self.items[i]=item.value.v<block_end><block_end><block_end><def_stmt>append self value<block_start>self.items.append(value)<block_end><def_stmt>__add__ self other<block_start><if_stmt>isinstance(other basestring)<block_start>items=[other]<block_end><elif_stmt>isinstance(other SQLQuery)<block_start>items=other.items<block_end><else_stmt><block_start><return>NotImplemented<block_end><return>SQLQuery(self.items+items)<block_end><def_stmt>__radd__ self other<block_start><if_stmt>isinstance(other basestring)<block_start>items=[other]<block_end><else_stmt><block_start><return>NotImplemented<block_end><return>SQLQuery(items+self.items)<block_end><def_stmt>__iadd__ self other<block_start><if_stmt>isinstance(other (basestring SQLParam))<block_start>self.items.append(other)<block_end><elif_stmt>isinstance(other SQLQuery)<block_start>self.items.extend(other.items)<block_end><else_stmt><block_start><return>NotImplemented<block_end><return>self<block_end><def_stmt>__len__ self<block_start><return>len(self.query())<block_end><def_stmt>query self paramstyle=<none><block_start>"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""<line_sep>s=[]<for_stmt>x self.items<block_start><if_stmt>isinstance(x SQLParam)<block_start>x=x.get_marker(paramstyle)<line_sep>s.append(safestr(x))<block_end><else_stmt><block_start>x=safestr(x)<line_sep># automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
<if_stmt>paramstyle<in>['format' 'pyformat']<block_start><if_stmt>'%'<in>x<and>'%%'<not><in>x<block_start>x=x.replace('%' '%%')<block_end><block_end>s.append(x)<block_end><block_end><return>"".join(s)<block_end><def_stmt>values self<block_start>"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""<line_sep><return>[i.value<for>i self.items<if>isinstance(i SQLParam)]<block_end><def_stmt>join items sep=' ' prefix=<none> suffix=<none> target=<none><block_start>"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""<if_stmt>target<is><none><block_start>target=SQLQuery()<block_end>target_items=target.items<if_stmt>prefix<block_start>target_items.append(prefix)<block_end><for_stmt>i,item enumerate(items)<block_start><if_stmt>i<ne>0<block_start>target_items.append(sep)<block_end><if_stmt>isinstance(item SQLQuery)<block_start>target_items.extend(item.items)<block_end><else_stmt><block_start>target_items.append(item)<block_end><block_end><if_stmt>suffix<block_start>target_items.append(suffix)<block_end><return>target<block_end>join=staticmethod(join)<def_stmt>_str self<block_start><try_stmt><block_start><return>self.query()%tuple([sqlify(x)<for>x self.values()])<block_end><except_stmt>(ValueError TypeError)<block_start><return>self.query()<block_end><block_end><def_stmt>__str__ self<block_start><return>safestr(self._str())<block_end><def_stmt>__unicode__ self<block_start><return>safeunicode(self._str())<block_end><def_stmt>__repr__ self<block_start><return>'<sql: %s>'%repr(str(self))<block_end><block_end><class_stmt>SQLLiteral<block_start>"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""<def_stmt>__init__ self v<block_start>self.v=v<block_end><def_stmt>__repr__ self<block_start><return>self.v<block_end><block_end><class_stmt>SQLProducer<block_start>"""Database"""<def_stmt>__init__ self<block_start>"""Creates a database.
"""<line_sep><pass><block_end><def_stmt>query self sql_query processed=<false> svars=<none><block_start>"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""<if_stmt>svars<is><none><block_start>svars={}<block_end><if_stmt><not>processed<and><not>isinstance(sql_query SQLQuery)<block_start>sql_query=reparam(sql_query svars)<block_end><return>sql_query<block_end><def_stmt>sql_clauses self what tables where group order limit offset<block_start><return>(('SELECT' what) ('FROM' sqllist(tables)) ('WHERE' where) ('GROUP BY' group) ('ORDER BY' order) ('LIMIT' limit) ('OFFSET' offset))<block_end><def_stmt>gen_clause self sql val svars<block_start><if_stmt>isinstance(val (int long))<block_start><if_stmt>sql<eq>'WHERE'<block_start>nout='id = '+sqlquote(val)<block_end><else_stmt><block_start>nout=SQLQuery(val)<block_end><block_end><elif_stmt>isinstance(val (list tuple))<and>len(val)<eq>2<block_start>nout=SQLQuery(val[0] val[1])# backwards-compatibility
<block_end><elif_stmt>isinstance(val SQLQuery)<block_start>nout=val<block_end><else_stmt><block_start>nout=reparam(val svars)<block_end><def_stmt>xjoin a b<block_start><if_stmt>a<and>b<block_start><return>a+' '+b<block_end><else_stmt><block_start><return>a<or>b<block_end><block_end><return>xjoin(sql nout)<block_end><def_stmt>_where self where svars<block_start><if_stmt>isinstance(where (int long))<block_start>where="id = "+sqlparam(where)<block_end><elif_stmt>isinstance(where (list tuple))<and>len(where)<eq>2<block_start>where=SQLQuery(where[0] where[1])<block_end><elif_stmt>isinstance(where SQLQuery)<block_start><pass><block_end><else_stmt><block_start>where=reparam(where svars)<block_end><return>where<block_end><def_stmt>select self tables svars=<none> what='*' where=<none> order=<none> group=<none> limit=<none> offset=<none> _test=<false><block_start>"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""<if_stmt>svars<is><none><block_start>svars={}<block_end>sql_clauses=self.sql_clauses(what tables where group order limit offset)<line_sep>clauses=[self.gen_clause(sql val svars)<for>sql,val sql_clauses<if>val<is><not><none>]<line_sep>qout=SQLQuery.join(clauses)<if_stmt>_test<block_start><return>qout<block_end><return>self.query(qout processed=<true>)<block_end><def_stmt>insert self tablename seqname=<none> _test=<false> **values<block_start>"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""<def_stmt>q x<block_start><return>"("+x+")"<block_end><if_stmt>values<block_start>_keys=SQLQuery.join(values.keys() ', ')<line_sep>_values=SQLQuery.join([sqlparam(v)<for>v values.values()] ', ')<line_sep>sql_query="INSERT INTO %s "%tablename+q(_keys)+' VALUES '+q(_values)<block_end><else_stmt><block_start>sql_query=SQLQuery(self._get_insert_default_values_query(tablename))<block_end><return>sql_query<block_end><def_stmt>_get_insert_default_values_query self table<block_start><return>"INSERT INTO %s DEFAULT VALUES"%table<block_end><def_stmt>multiple_insert self tablename values seqname=<none> _test=<false><block_start>"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "<EMAIL>"}, {"name": "bar", "email": "<EMAIL>"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', '<EMAIL>'), ('bar', '<EMAIL>')">
"""<if_stmt><not>values<block_start><return>[]<block_end><if_stmt><not>self.supports_multiple_insert<block_start>out=[self.insert(tablename seqname=seqname _test=_test **v)<for>v values]<if_stmt>seqname<is><false><block_start><return><none><block_end><else_stmt><block_start><return>out<block_end><block_end>keys=values[0].keys()<line_sep>#@@ make sure all keys are valid
# make sure all rows have same keys.
<for_stmt>v values<block_start><if_stmt>v.keys()<ne>keys<block_start><raise>ValueError 'Bad data'<block_end><block_end>sql_query=SQLQuery('INSERT INTO %s (%s) VALUES '%(tablename ', '.join(keys)))<for_stmt>i,row enumerate(values)<block_start><if_stmt>i<ne>0<block_start>sql_query.append(", ")<block_end>SQLQuery.join([SQLParam(row[k])<for>k keys] sep=", " target=sql_query prefix="(" suffix=")")<block_end><if_stmt>_test<block_start><return>sql_query<block_end>db_cursor=self._db_cursor()<if_stmt>seqname<is><not><false><block_start>sql_query=self._process_insert_query(sql_query tablename seqname)<block_end><if_stmt>isinstance(sql_query tuple)# for some databases, a separate query has to be made to find
# the id of the inserted row.
<block_start>q1,q2=sql_query<line_sep>self._db_execute(db_cursor q1)<line_sep>self._db_execute(db_cursor q2)<block_end><else_stmt><block_start>self._db_execute(db_cursor sql_query)<block_end><try_stmt><block_start>out=db_cursor.fetchone()[0]<line_sep>out=range(out-len(values)+1 out+1)<block_end><except_stmt>Exception<block_start>out=<none><block_end><if_stmt><not>self.ctx.transactions<block_start>self.ctx.commit()<block_end><return>out<block_end><def_stmt>update self tables where svars=<none> _test=<false> **values<block_start>"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""<if_stmt>svars<is><none><block_start>svars={}<block_end>where=self._where(where svars)<line_sep>query=("UPDATE "+sqllist(tables)+" SET "+sqlwhere(values ', ')+" WHERE "+where)<if_stmt>_test<block_start><return>query<block_end>db_cursor=self._db_cursor()<line_sep>self._db_execute(db_cursor query)<if_stmt><not>self.ctx.transactions<block_start>self.ctx.commit()<block_end><return>db_cursor.rowcount<block_end><def_stmt>delete self table where using=<none> svars=<none> _test=<false><block_start>"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""<if_stmt>svars<is><none><block_start>svars={}<block_end>where=self._where(where svars)<line_sep>q='DELETE FROM '+table<if_stmt>using<block_start>q<augadd>' USING '+sqllist(using)<block_end><if_stmt>where<block_start>q<augadd>' WHERE '+where<block_end><return>q<block_end><block_end>sqlproducer=SQLProducer()<line_sep> |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
<import_stmt>numpy<import_stmt>scipy<import_stmt>unittest<import_stmt>time<import_from_stmt>nearpy Engine<import_from_stmt>nearpy.distances CosineDistance<import_from_stmt>nearpy.hashes RandomBinaryProjections HashPermutations HashPermutationMapper<def_stmt>example2 # Dimension of feature space
<block_start>DIM=100<line_sep># Number of data points (dont do too much because of exact search)
POINTS=20000<line_sep>##########################################################
print('Performing indexing with HashPermutations...')<line_sep>t0=time.time()<line_sep># Create permutations meta-hash
permutations=HashPermutations('permut')<line_sep># Create binary hash as child hash
rbp_perm=RandomBinaryProjections('rbp_perm' 14)<line_sep>rbp_conf={'num_permutation':50 'beam_size':10 'num_neighbour':100}<line_sep># Add rbp as child hash of permutations hash
permutations.add_child_hash(rbp_perm rbp_conf)<line_sep># Create engine
engine_perm=Engine(DIM lshashes=[permutations] distance=CosineDistance())<line_sep># First index some random vectors
matrix=numpy.zeros((POINTS DIM))<for_stmt>i range(POINTS)<block_start>v=numpy.random.randn(DIM)<line_sep>matrix[i]=v<line_sep>engine_perm.store_vector(v)<block_end># Then update permuted index
permutations.build_permuted_index()<line_sep>t1=time.time()<line_sep>print('Indexing took %f seconds'%(t1-t0))<line_sep># Get random query vector
query=numpy.random.randn(DIM)<line_sep># Do random query on engine 3
print('\nNeighbour distances with HashPermutations:')<line_sep>print(' -> Candidate count is %d'%engine_perm.candidate_count(query))<line_sep>results=engine_perm.neighbours(query)<line_sep>dists=[x[2]<for>x results]<line_sep>print(dists)<line_sep># Real neighbours
print('\nReal neighbour distances:')<line_sep>query=query.reshape((DIM))<line_sep>dists=CosineDistance().distance(matrix query)<line_sep>dists=dists.reshape((-1 ))<line_sep>dists=sorted(dists)<line_sep>print(dists[:10])<line_sep>##########################################################
print('\nPerforming indexing with HashPermutationMapper...')<line_sep>t0=time.time()<line_sep># Create permutations meta-hash
permutations2=HashPermutationMapper('permut2')<line_sep># Create binary hash as child hash
rbp_perm2=RandomBinaryProjections('rbp_perm2' 14)<line_sep># Add rbp as child hash of permutations hash
permutations2.add_child_hash(rbp_perm2)<line_sep># Create engine
engine_perm2=Engine(DIM lshashes=[permutations2] distance=CosineDistance())<line_sep># First index some random vectors
matrix=numpy.zeros((POINTS DIM))<for_stmt>i range(POINTS)<block_start>v=numpy.random.randn(DIM)<line_sep>matrix[i]=v<line_sep>engine_perm2.store_vector(v)<block_end>t1=time.time()<line_sep>print('Indexing took %f seconds'%(t1-t0))<line_sep># Get random query vector
query=numpy.random.randn(DIM)<line_sep># Do random query on engine 4
print('\nNeighbour distances with HashPermutationMapper:')<line_sep>print(' -> Candidate count is %d'%engine_perm2.candidate_count(query))<line_sep>results=engine_perm2.neighbours(query)<line_sep>dists=[x[2]<for>x results]<line_sep>print(dists)<line_sep># Real neighbours
print('\nReal neighbour distances:')<line_sep>query=query.reshape((DIM))<line_sep>dists=CosineDistance().distance(matrix query)<line_sep>dists=dists.reshape((-1 ))<line_sep>dists=sorted(dists)<line_sep>print(dists[:10])<line_sep>##########################################################
print('\nPerforming indexing with multiple binary hashes...')<line_sep>t0=time.time()<line_sep>hashes=[]<for_stmt>k range(20)<block_start>hashes.append(RandomBinaryProjections('rbp_%d'%k 10))<block_end># Create engine
engine_rbps=Engine(DIM lshashes=hashes distance=CosineDistance())<line_sep># First index some random vectors
matrix=numpy.zeros((POINTS DIM))<for_stmt>i range(POINTS)<block_start>v=numpy.random.randn(DIM)<line_sep>matrix[i]=v<line_sep>engine_rbps.store_vector(v)<block_end>t1=time.time()<line_sep>print('Indexing took %f seconds'%(t1-t0))<line_sep># Get random query vector
query=numpy.random.randn(DIM)<line_sep># Do random query on engine 4
print('\nNeighbour distances with multiple binary hashes:')<line_sep>print(' -> Candidate count is %d'%engine_rbps.candidate_count(query))<line_sep>results=engine_rbps.neighbours(query)<line_sep>dists=[x[2]<for>x results]<line_sep>print(dists)<line_sep># Real neighbours
print('\nReal neighbour distances:')<line_sep>query=query.reshape((DIM))<line_sep>dists=CosineDistance().distance(matrix query)<line_sep>dists=dists.reshape((-1 ))<line_sep>dists=sorted(dists)<line_sep>print(dists[:10])<line_sep>##########################################################
<block_end> |
# Copyright (c) 1999-2008 <NAME> and <NAME>
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
<import_from_stmt>slicc.ast.DeclAST DeclAST<import_from_stmt>slicc.symbols.Type Type<class_stmt>TypeDeclAST(DeclAST)<block_start><def_stmt>__init__ self slicc type_ast pairs field_asts<block_start>super(TypeDeclAST self).__init__(slicc pairs)<line_sep>self.type_ast=type_ast<line_sep>self.field_asts=field_asts<block_end><def_stmt>__repr__ self<block_start><return>"[TypeDecl: %r]"%(self.type_ast)<block_end><def_stmt>files self parent=<none><block_start><if_stmt>"external"<in>self<block_start><return>set()<block_end><if_stmt>parent<block_start>ident="%s_%s"%(parent self.type_ast.ident)<block_end><else_stmt><block_start>ident=self.type_ast.ident<block_end><return>set(("%s.hh"%ident "%s.cc"%ident))<block_end><def_stmt>generate self<block_start>ident=str(self.type_ast)<line_sep>machine=self.symtab.state_machine<line_sep># Make the new type
new_type=Type(self.symtab ident self.location self.pairs self.state_machine)<if_stmt>machine<block_start>machine.addType(new_type)<block_end>self.symtab.newSymbol(new_type)<line_sep>self.symtab.pushFrame()<line_sep># Add all of the fields of the type to it
<for_stmt>field self.field_asts<block_start>field.generate(new_type)<block_end>self.symtab.popFrame()<block_end><block_end> |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__="biotite"<line_sep>__author__="<NAME>"<line_sep>__all__=["File" "TextFile" "InvalidFileError"]<import_stmt>abc<import_stmt>io<import_stmt>warnings<import_from_stmt>.copyable Copyable<import_stmt>copy<class_stmt>File(Copyable metaclass=abc.ABCMeta)<block_start>"""
Base class for all file classes.
The constructor creates an empty file, that can be filled with data
using the class specific setter methods.
Conversely, the class method :func:`read()` reads a file from disk
(or a file-like object from other sources).
In order to write the instance content into a file the
:func:`write()` method is used.
"""<def_stmt>__init__ self# Support for deprecated instance method 'read()':
# When creating an instance, the 'read()' class method is
# replaced by the instance method, so that subsequent
# 'read()' calls are delegated to the instance method
<block_start>self.read=self._deprecated_read<block_end>@[email protected]<def_stmt>read cls file<block_start>"""
Parse a file (or file-like object).
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Returns
-------
file_object : File
An instance from the respective :class:`File` subclass
representing the parsed file.
"""<line_sep><pass><block_end><def_stmt>_deprecated_read self file *args **kwargs<block_start>"""
Support for deprecated instance method :func:`read()`.
Internally this calls the :func:`read()` class method and
replaces the data in `self` with the data from the newly created
:class:`File` object
"""<line_sep>warnings.warn("Instance method 'read()' is deprecated, "<concat>"use class method instead" DeprecationWarning)<line_sep>cls=type(self)<line_sep>new_file=cls.read(file *args **kwargs)<line_sep>self.__dict__.update(new_file.__dict__)<block_end>@abc.abstractmethod<def_stmt>write self file<block_start>"""
Write the contents of this :class:`File` object into a file.
Parameters
----------
file_name : file-like object or str
The file to be written to.
Alternatively a file path can be supplied.
"""<line_sep><pass><block_end><block_end><class_stmt>TextFile(File metaclass=abc.ABCMeta)<block_start>"""
Base class for all line based text files.
When reading a file, the text content is saved as list of strings,
one for each line.
When writing a file, this list is written into the file.
Attributes
----------
lines : list
List of string representing the lines in the text file.
PROTECTED: Do not modify from outside.
"""<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.lines=[]<block_end>@classmethod<def_stmt>read cls file *args **kwargs# File name
<block_start><if_stmt>isinstance(file str)<block_start><with_stmt>open(file "r")<as>f<block_start>lines=f.read().splitlines()<block_end><block_end># File object
<else_stmt><block_start><if_stmt><not>is_text(file)<block_start><raise>TypeError("A file opened in 'text' mode is required")<block_end>lines=file.read().splitlines()<block_end>file_object=cls(*args **kwargs)<line_sep>file_object.lines=lines<line_sep><return>file_object<block_end>@staticmethod<def_stmt>read_iter file<block_start>"""
Create an iterator over each line of the given text file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Yields
------
line : str
The current line in the file.
"""<line_sep># File name
<if_stmt>isinstance(file str)<block_start><with_stmt>open(file "r")<as>f<block_start><while_stmt><true><block_start>line=f.readline()<if_stmt><not>line<block_start><break><block_end><yield>line<block_end><block_end><block_end># File object
<else_stmt><block_start><if_stmt><not>is_text(file)<block_start><raise>TypeError("A file opened in 'text' mode is required")<block_end><while_stmt><true><block_start>line=file.readline()<if_stmt><not>line<block_start><break><block_end><yield>line<block_end><block_end><block_end><def_stmt>write self file<block_start>"""
Write the contents of this object into a file
(or file-like object).
Parameters
----------
file_name : file-like object or str
The file to be written to.
Alternatively a file path can be supplied.
"""<if_stmt>isinstance(file str)<block_start><with_stmt>open(file "w")<as>f<block_start>f.write("\n".join(self.lines)+"\n")<block_end><block_end><else_stmt><block_start><if_stmt><not>is_text(file)<block_start><raise>TypeError("A file opened in 'text' mode is required")<block_end>file.write("\n".join(self.lines)+"\n")<block_end><block_end><def_stmt>__copy_fill__ self clone<block_start>super().__copy_fill__(clone)<line_sep>clone.lines=copy.copy(self.lines)<block_end><def_stmt>__str__ self<block_start><return>("\n".join(self.lines))<block_end><block_end><class_stmt>InvalidFileError(Exception)<block_start>"""
Indicates that the file is not suitable for the requested action,
either because the file does not contain the required data or
because the file is malformed.
"""<line_sep><pass><block_end><def_stmt>wrap_string text width<block_start>"""
A much simpler and hence much more efficient version of
`textwrap.wrap()`.
This function simply wraps the given `text` after `width`
characters, ignoring sentences, whitespaces, etc.
"""<line_sep>lines=[]<for_stmt>i range(0 len(text) width)<block_start>lines.append(text[i:i+width])<block_end><return>lines<block_end><def_stmt>is_binary file<block_start><if_stmt>isinstance(file io.BufferedIOBase)<block_start><return><true><block_end># for file wrappers, e.g. 'TemporaryFile'
<elif_stmt>hasattr(file "file")<and>isinstance(file.file io.BufferedIOBase)<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>is_text file<block_start><if_stmt>isinstance(file io.TextIOBase)<block_start><return><true><block_end># for file wrappers, e.g. 'TemporaryFile'
<elif_stmt>hasattr(file "file")<and>isinstance(file.file io.TextIOBase)<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass=cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass" ## jet input
jets=cms.InputTag("selectedPatJets") ## lepton input
leps=cms.InputTag("selectedPatMuons") ## maximum number of jets to be considered
maxNJets=cms.int32(4) ## nominal WMass parameter (in GeV)
wMass=cms.double(80.4) ## use b-tagging two distinguish between light and b jets
useBTagging=cms.bool(<false>) ## choose algorithm for b-tagging
bTagAlgorithm=cms.string("trackCountingHighEffBJetTags") ## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets=cms.double(1.0) maxBDiscLightJets=cms.double(3.0))<line_sep> |
<import_from_stmt>libsaas http parsers<import_from_stmt>libsaas.services base<import_from_stmt>libsaas.services.twilio resource<class_stmt>ApplicationsBase(resource.TwilioResource)<block_start>path='Applications'<block_end><class_stmt>Application(ApplicationsBase)<block_start><def_stmt>create self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>Applications(ApplicationsBase)<block_start>@base.apimethod<def_stmt>get self FriendlyName=<none> Page=<none> PageSize=<none> AfterSid=<none><block_start>"""
Fetch the Applications belonging to an account.
:var FriendlyName: Only return the Account resources with friendly
names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""<line_sep>params=resource.get_params(<none> locals())<line_sep>request=http.Request('GET' self.get_url() params)<line_sep><return>request parsers.parse_json<block_end><def_stmt>update self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><def_stmt>delete self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>ConnectAppsBase(resource.TwilioResource)<block_start>path='ConnectApps'<def_stmt>create self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><def_stmt>delete self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>ConnectApp(ConnectAppsBase)<block_start><pass><block_end><class_stmt>ConnectApps(ConnectAppsBase)<block_start>@base.apimethod<def_stmt>get self Page=<none> PageSize=<none> AfterSid=<none><block_start>"""
Fetch the Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""<line_sep>params=resource.get_params(<none> locals())<line_sep>request=http.Request('GET' self.get_url() params)<line_sep><return>request parsers.parse_json<block_end><def_stmt>update self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>AuthorizedConnectAppsBase(resource.TwilioResource)<block_start>path='AuthorizedConnectApps'<def_stmt>create self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><def_stmt>update self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><def_stmt>delete self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>AuthorizedConnectApp(AuthorizedConnectAppsBase)<block_start><pass><block_end><class_stmt>AuthorizedConnectApps(AuthorizedConnectAppsBase)<block_start>@base.apimethod<def_stmt>get self Page=<none> PageSize=<none> AfterSid=<none><block_start>"""
Fetch the Authorized Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""<line_sep>params=resource.get_params(<none> locals())<line_sep>request=http.Request('GET' self.get_url() params)<line_sep><return>request parsers.parse_json<block_end><block_end> |
<import_from_stmt>vyper ast<as>vy_ast<def_stmt>test_output_class <block_start>old_node=vy_ast.parse_to_ast("foo = 42")<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt>isinstance(new_node vy_ast.Int)<block_end><def_stmt>test_source <block_start>old_node=vy_ast.parse_to_ast("foo = 42")<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt>old_node.src<eq>new_node.src<assert_stmt>old_node.node_source_code<eq>new_node.node_source_code<block_end><def_stmt>test_kwargs <block_start>old_node=vy_ast.parse_to_ast("42").body[0].value<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt>old_node.value<eq>42<assert_stmt>new_node.value<eq>666<block_end><def_stmt>test_compare_nodes <block_start>old_node=vy_ast.parse_to_ast("foo = 42")<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt><not>vy_ast.compare_nodes(old_node new_node)<block_end><def_stmt>test_new_node_has_no_parent <block_start>old_node=vy_ast.parse_to_ast("foo = 42")<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt>new_node._parent<is><none><assert_stmt>new_node._depth<eq>0<block_end> |
# This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string.
# You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings.
# It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API.
#
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# For more information visit:
# https://robodk.com/doc/en/PythonAPI/robolink.html
<import_from_stmt>robolink *# RoboDK API
# JSON tools
<import_stmt>json<line_sep># Start the RoboDK API
RDK=Robolink()<line_sep># Ask the user to select a robot arm (6 axis robot wich can have external axes)
robot=RDK.ItemUserPick("Select a robot arm" ITEM_TYPE_ROBOT_ARM)<line_sep># Default optimization settings test template
AxesOptimSettings={# Optimization parameters:
"Active":1 # Use generic axes optimization: 0=Disabled or 1=Enabled
"Algorithm":2 # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead
"MaxIter":650 # Max. number of iterations
"Tol":0.0016 # Tolerance to stop iterations
# Absolute Reference joints (double):
"AbsJnt_1":104.17 "AbsJnt_2":11.22 "AbsJnt_3":15.97 "AbsJnt_4":-87.48 "AbsJnt_5":-75.36 "AbsJnt_6":63.03 "AbsJnt_7":174.13 "AbsJnt_8":173.60 "AbsJnt_9":0 # Using Absolute reference joints (0: No, 1: Yes):
"AbsOn_1":1 "AbsOn_2":1 "AbsOn_3":1 "AbsOn_4":1 "AbsOn_5":1 "AbsOn_6":1 "AbsOn_7":1 "AbsOn_8":1 "AbsOn_9":1 # Weight for absolute reference joints (double):
"AbsW_1":100 "AbsW_2":100 "AbsW_3":100 "AbsW_4":89 "AbsW_5":90 "AbsW_6":92 "AbsW_7":92 "AbsW_8":96 "AbsW_9":50 # Using for relative joint motion smoothing (0: No, 1: Yes):
"RelOn_1":1 "RelOn_2":1 "RelOn_3":1 "RelOn_4":1 "RelOn_5":1 "RelOn_6":1 "RelOn_7":1 "RelOn_8":1 "RelOn_9":1 # Weight for relative joint motion (double):
"RelW_1":5 "RelW_2":47 "RelW_3":44 "RelW_4":43 "RelW_5":36 "RelW_6":47 "RelW_7":53 "RelW_8":59 "RelW_9":0 }<line_sep># Update one value, for example, make it active:
ToUpdate={}<line_sep>ToUpdate["Active"]=1<line_sep>json_str=json.dumps(json.dumps(ToUpdate))<line_sep>status=robot.setParam("OptimAxes" json_str)<line_sep>print(status)<line_sep># Example to make a partial or full update
count=1<while_stmt><true><block_start><for_stmt>i range(7)# Partial update
<block_start>ToUpdate={}<line_sep>ToUpdate["AbsJnt_"+str(i+1)]=(count+i)<times>4<line_sep>ToUpdate["AbsOn_"+str(i+1)]=count%2<line_sep>ToUpdate["AbsW_"+str(i+1)]=(count+i)<line_sep>json_str=json.dumps(json.dumps(ToUpdate))<line_sep>status=robot.setParam("OptimAxes" json_str)<line_sep>print(status)<line_sep># Full update
#OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4
#OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i)
#OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2
<block_end># Full update
#print(robot.setParam("OptimAxes", str(AxesOptimSettings)))
count=count+1<line_sep># Read settings
json_data=robot.setParam("OptimAxes")<line_sep>json_object=json.loads(json_data)<line_sep>print(json.dumps(json_object indent=4))<line_sep>pause(0.2)<block_end># Example to read the current axes optimization settings:
<while_stmt><true><block_start>json_data=robot.setParam("OptimAxes")<line_sep>json_object=json.loads(json_data)<line_sep>print(json.dumps(json_object indent=4))<line_sep>pause(0.2)<block_end> |
'''
This file contains test cases for tflearn
'''<import_stmt>tensorflow.compat.v1<as>tf<import_stmt>tflearn<import_stmt>unittest<class_stmt>TestActivations(unittest.TestCase)<block_start>'''
This class contains test cases for the functions in tflearn/activations.py
'''<line_sep>PLACES=4# Number of places to match when testing floating point values
<def_stmt>test_linear self<block_start>f=tflearn.linear<line_sep># Case 1
x=tf.placeholder(tf.float32 shape=())<line_sep>self.assertEqual(f(x) x)<line_sep># Case 2
x=tf.placeholder(tf.int64 shape=())<line_sep>self.assertEqual(f(x) x)<block_end><def_stmt>test_tanh self<block_start>f=tflearn.tanh<line_sep>x=tf.placeholder(tf.float32 shape=())<with_stmt>tf.Session()<as>sess# Case 1
<block_start>self.assertEqual(sess.run(f(x) feed_dict={x:0}) 0)<line_sep># Case 2
self.assertAlmostEqual(sess.run(f(x) feed_dict={x:0.5}) 0.4621 places=TestActivations.PLACES)<line_sep># Case 3
self.assertAlmostEqual(sess.run(f(x) feed_dict={x:-0.25}) -0.2449 places=TestActivations.PLACES)<block_end><block_end><def_stmt>test_leaky_relu self<block_start>f=<lambda>x:tflearn.leaky_relu(x alpha=0.2)<line_sep>x=tf.placeholder(tf.float32 shape=())<with_stmt>tf.Session()<as>sess# Case 1
<block_start>self.assertEqual(sess.run(f(x) feed_dict={x:0}) 0)<line_sep># Case 2
self.assertAlmostEqual(sess.run(f(x) feed_dict={x:1}) 1 places=TestActivations.PLACES)<line_sep># Case 3
self.assertAlmostEqual(sess.run(f(x) feed_dict={x:-1}) -0.2 places=TestActivations.PLACES)<line_sep># Case 4
self.assertAlmostEqual(sess.run(f(x) feed_dict={x:-5}) -1 places=TestActivations.PLACES)<block_end><block_end><def_stmt>test_apply_activation self<block_start>lrelu_02=<lambda>x:tflearn.leaky_relu(x alpha=0.2)<line_sep>x=tf.constant(-0.25 tf.float32)<with_stmt>tf.Session()<as>sess# Case 1: 'linear'
<block_start>self.assertEqual(sess.run(tflearn.activation(x 'linear')) -0.25)<line_sep># Case 2: 'relu'
self.assertEqual(sess.run(tflearn.activation(x 'relu')) 0)<line_sep># Case 3: 'leaky_relu'
self.assertAlmostEqual(sess.run(tflearn.activation(x 'leaky_relu')) -0.025 places=TestActivations.PLACES)<line_sep># Case 4: 'tanh'
self.assertAlmostEqual(sess.run(tflearn.activation(x 'tanh')) -0.2449 places=TestActivations.PLACES)<line_sep># Case 5: lrelu_02 (callable)
self.assertAlmostEqual(sess.run(tflearn.activation(x lrelu_02)) -0.05 places=TestActivations.PLACES)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
# This file is part of Patsy
# Copyright (C) 2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# Regression tests for fixed bugs (when not otherwise better covered somewhere
# else)
<import_from_stmt>patsy EvalEnvironment dmatrix build_design_matrices PatsyError Origin <def_stmt>test_issue_11 # Give a sensible error message for level mismatches
# (At some points we've failed to put an origin= on these errors)
<block_start>env=EvalEnvironment.capture()<line_sep>data={"X":[0 1 2 3] "Y":[1 2 3 4]}<line_sep>formula="C(X) + Y"<line_sep>new_data={"X":[0 0 1 2 3 3 4] "Y":[1 2 3 4 5 6 7]}<line_sep>info=dmatrix(formula data)<try_stmt><block_start>build_design_matrices([info.design_info] new_data)<block_end><except_stmt>PatsyError<as>e<block_start><assert_stmt>e.origin<eq>Origin(formula 0 4)<block_end><else_stmt><block_start><assert_stmt><false><block_end><block_end> |
<class_stmt>Foo<block_start><pass><block_end><class_stmt>Bar(Foo)<block_start><def_stmt>__init__ self<block_start>super(Bar self).__init__()<block_end><block_end># [super-with-arguments]
<class_stmt>Baz(Foo)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end><block_end><class_stmt>Qux(Foo)<block_start><def_stmt>__init__ self<block_start>super(Bar self).__init__()<block_end><block_end><class_stmt>NotSuperCall(Foo)<block_start><def_stmt>__init__ self<block_start>super.test(Bar self).__init__()<block_end><block_end><class_stmt>InvalidSuperCall(Foo)<block_start><def_stmt>__init__ self<block_start>super(InvalidSuperCall.__class__ self).__init__()<block_end><block_end><def_stmt>method_accepting_cls cls self# Using plain `super()` is not valid here, since there's no `__class__` cell found
# (Exact exception would be 'RuntimeError: super(): __class__ cell not found')
# Instead, we expect to *not* see a warning about `super-with-arguments`.
# Explicitly passing `cls`, and `self` to `super()` is what's required.
<block_start>super(cls self).__init__()<block_end> |
<import_stmt>logging<line_sep>logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' datefmt='%Y/%m/%d %H:%M:%S' level=logging.INFO )<line_sep>logger=logging.getLogger("Main")<import_stmt>os random<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>processing convert_examples_to_features read_squad_examples<import_from_stmt>processing ChineseFullTokenizer<import_from_stmt>pytorch_pretrained_bert.my_modeling BertConfig<import_from_stmt>optimization BERTAdam<import_stmt>config<import_from_stmt>utils read_and_convert divide_parameters<import_from_stmt>modeling BertForQASimple BertForQASimpleAdaptorTraining<import_from_stmt>textbrewer DistillationConfig TrainingConfig BasicTrainer<import_from_stmt>torch.utils.data TensorDataset DataLoader RandomSampler<import_from_stmt>functools partial<import_from_stmt>train_eval predict<def_stmt>args_check args<block_start><if_stmt>os.path.exists(args.output_dir)<and>os.listdir(args.output_dir)<block_start>logger.warning("Output directory () already exists and is not empty.")<block_end><if_stmt>args.gradient_accumulation_steps<l>1<block_start><raise>ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(args.gradient_accumulation_steps))<block_end><if_stmt><not>args.do_train<and><not>args.do_predict<block_start><raise>ValueError("At least one of `do_train` or `do_predict` must be True.")<block_end><if_stmt>args.local_rank<eq>-1<or>args.no_cuda<block_start>device=torch.device("cuda"<if>torch.cuda.is_available()<and><not>args.no_cuda<else>"cpu")<line_sep>n_gpu=torch.cuda.device_count()<if><not>args.no_cuda<else>0<block_end><else_stmt><block_start>device=torch.device("cuda" args.local_rank)<line_sep>n_gpu=1<line_sep>torch.distributed.init_process_group(backend='nccl')<block_end>logger.info("device %s n_gpu %d distributed training %r" device n_gpu bool(args.local_rank<ne>-1))<line_sep>args.n_gpu=n_gpu<line_sep>args.device=device<line_sep><return>device n_gpu<block_end><def_stmt>main #parse arguments
<block_start>config.parse()<line_sep>args=config.args<for_stmt>k,v vars(args).items()<block_start>logger.info(f"{k}:{v}")<block_end>#set seeds
torch.manual_seed(args.random_seed)<line_sep>torch.cuda.manual_seed_all(args.random_seed)<line_sep>np.random.seed(args.random_seed)<line_sep>random.seed(args.random_seed)<line_sep>#arguments check
device,n_gpu=args_check(args)<line_sep>os.makedirs(args.output_dir exist_ok=<true>)<line_sep>forward_batch_size=int(args.train_batch_size/args.gradient_accumulation_steps)<line_sep>args.forward_batch_size=forward_batch_size<line_sep>#load bert config
bert_config_S=BertConfig.from_json_file(args.bert_config_file_S)<assert_stmt>args.max_seq_length<le>bert_config_S.max_position_embeddings<line_sep>#read data
train_examples=<none><line_sep>train_features=<none><line_sep>eval_examples=<none><line_sep>eval_features=<none><line_sep>num_train_steps=<none><line_sep>tokenizer=ChineseFullTokenizer(vocab_file=args.vocab_file do_lower_case=args.do_lower_case)<line_sep>convert_fn=partial(convert_examples_to_features tokenizer=tokenizer max_seq_length=args.max_seq_length doc_stride=args.doc_stride max_query_length=args.max_query_length)<if_stmt>args.do_train<block_start>train_examples,train_features=read_and_convert(args.train_file is_training=<true> do_lower_case=args.do_lower_case read_fn=read_squad_examples convert_fn=convert_fn)<if_stmt>args.fake_file_1<block_start>fake_examples1,fake_features1=read_and_convert(args.fake_file_1 is_training=<true> do_lower_case=args.do_lower_case read_fn=read_squad_examples convert_fn=convert_fn)<line_sep>train_examples<augadd>fake_examples1<line_sep>train_features<augadd>fake_features1<block_end><if_stmt>args.fake_file_2<block_start>fake_examples2,fake_features2=read_and_convert(args.fake_file_2 is_training=<true> do_lower_case=args.do_lower_case read_fn=read_squad_examples convert_fn=convert_fn)<line_sep>train_examples<augadd>fake_examples2<line_sep>train_features<augadd>fake_features2<block_end>num_train_steps=int(len(train_features)/args.train_batch_size)<times>args.num_train_epochs<block_end><if_stmt>args.do_predict<block_start>eval_examples,eval_features=read_and_convert(args.predict_file is_training=<false> do_lower_case=args.do_lower_case read_fn=read_squad_examples convert_fn=convert_fn)<block_end>#Build Model and load checkpoint
model_S=BertForQASimple(bert_config_S args)<line_sep>#Load student
<if_stmt>args.load_model_type<eq>'bert'<block_start><assert_stmt>args.init_checkpoint_S<is><not><none><line_sep>state_dict_S=torch.load(args.init_checkpoint_S map_location='cpu')<line_sep>state_weight={k[5:]:v<for>k,v state_dict_S.items()<if>k.startswith('bert.')}<line_sep>missing_keys,_=model_S.bert.load_state_dict(state_weight strict=<false>)<assert_stmt>len(missing_keys)<eq>0<block_end><elif_stmt>args.load_model_type<eq>'all'<block_start><assert_stmt>args.tuned_checkpoint_S<is><not><none><line_sep>state_dict_S=torch.load(args.tuned_checkpoint_S map_location='cpu')<line_sep>model_S.load_state_dict(state_dict_S)<block_end><else_stmt><block_start>logger.info("Model is randomly initialized.")<block_end>model_S.to(device)<if_stmt>args.local_rank<ne>-1<or>n_gpu<g>1<block_start><if_stmt>args.local_rank<ne>-1<block_start><raise>NotImplementedError<block_end><elif_stmt>n_gpu<g>1<block_start>model_S=torch.nn.DataParallel(model_S)<block_end><block_end>#,output_device=n_gpu-1)
<if_stmt>args.do_train#parameters
<block_start>params=list(model_S.named_parameters())<line_sep>all_trainable_params=divide_parameters(params lr=args.learning_rate)<line_sep>logger.info("Length of all_trainable_params: %d" len(all_trainable_params))<line_sep>optimizer=BERTAdam(all_trainable_params lr=args.learning_rate warmup=args.warmup_proportion t_total=num_train_steps schedule=args.schedule s_opt1=args.s_opt1 s_opt2=args.s_opt2 s_opt3=args.s_opt3)<line_sep>logger.info("***** Running training *****")<line_sep>logger.info(" Num orig examples = %d" len(train_examples))<line_sep>logger.info(" Num split examples = %d" len(train_features))<line_sep>logger.info(" Forward batch size = %d" forward_batch_size)<line_sep>logger.info(" Num backward steps = %d" num_train_steps)<line_sep>########### DISTILLATION ###########
train_config=TrainingConfig(gradient_accumulation_steps=args.gradient_accumulation_steps ckpt_frequency=args.ckpt_frequency log_dir=args.output_dir output_dir=args.output_dir device=args.device)<line_sep>distiller=BasicTrainer(train_config=train_config model=model_S adaptor=BertForQASimpleAdaptorTraining)<line_sep>all_input_ids=torch.tensor([f.input_ids<for>f train_features] dtype=torch.long)<line_sep>all_input_mask=torch.tensor([f.input_mask<for>f train_features] dtype=torch.long)<line_sep>all_doc_mask=torch.tensor([f.doc_mask<for>f train_features] dtype=torch.float)<line_sep>all_segment_ids=torch.tensor([f.segment_ids<for>f train_features] dtype=torch.long)<line_sep>all_start_positions=torch.tensor([f.start_position<for>f train_features] dtype=torch.long)<line_sep>all_end_positions=torch.tensor([f.end_position<for>f train_features] dtype=torch.long)<line_sep>train_dataset=TensorDataset(all_input_ids all_segment_ids all_input_mask all_doc_mask all_start_positions all_end_positions)<if_stmt>args.local_rank<eq>-1<block_start>train_sampler=RandomSampler(train_dataset)<block_end><else_stmt><block_start><raise>NotImplementedError<block_end>train_dataloader=DataLoader(train_dataset sampler=train_sampler batch_size=args.forward_batch_size drop_last=<true>)<line_sep>callback_func=partial(predict eval_examples=eval_examples eval_features=eval_features args=args)<with_stmt>distiller<block_start>distiller.train(optimizer scheduler=<none> dataloader=train_dataloader num_epochs=args.num_train_epochs callback=callback_func)<block_end><block_end><if_stmt><not>args.do_train<and>args.do_predict<block_start>res=predict(model_S eval_examples eval_features step=0 args=args)<line_sep>print(res)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
"""The Ray autoscaler uses tags/labels to associate metadata with instances."""<line_sep># Tag for the name of the node
TAG_RAY_NODE_NAME="ray-node-name"<line_sep># Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag
# value says 'type' instead of 'kind'.
TAG_RAY_NODE_KIND="ray-node-type"<line_sep>NODE_KIND_HEAD="head"<line_sep>NODE_KIND_WORKER="worker"<line_sep>NODE_KIND_UNMANAGED="unmanaged"<line_sep># Tag for user defined node types (e.g., m4xl_spot). This is used for multi
# node type clusters.
TAG_RAY_USER_NODE_TYPE="ray-user-node-type"<line_sep># Tag for autofilled node types for legacy cluster yamls without multi
# node type defined in the cluster configs.
NODE_TYPE_LEGACY_HEAD="ray-legacy-head-node-type"<line_sep>NODE_TYPE_LEGACY_WORKER="ray-legacy-worker-node-type"<line_sep># Tag that reports the current state of the node (e.g. Updating, Up-to-date)
TAG_RAY_NODE_STATUS="ray-node-status"<line_sep>STATUS_UNINITIALIZED="uninitialized"<line_sep>STATUS_WAITING_FOR_SSH="waiting-for-ssh"<line_sep>STATUS_SYNCING_FILES="syncing-files"<line_sep>STATUS_SETTING_UP="setting-up"<line_sep>STATUS_UPDATE_FAILED="update-failed"<line_sep>STATUS_UP_TO_DATE="up-to-date"<line_sep># Tag uniquely identifying all nodes of a cluster
TAG_RAY_CLUSTER_NAME="ray-cluster-name"<line_sep># Hash of the node launch config, used to identify out-of-date nodes
TAG_RAY_LAUNCH_CONFIG="ray-launch-config"<line_sep># Hash of the node runtime config, used to determine if updates are needed
TAG_RAY_RUNTIME_CONFIG="ray-runtime-config"<line_sep># Hash of the contents of the directories specified by the file_mounts config
# if the node is a worker, this also hashes content of the directories
# specified by the cluster_synced_files config
TAG_RAY_FILE_MOUNTS_CONTENTS="ray-file-mounts-contents"<line_sep> |
<import_from_future_stmt> with_statement<import_from_stmt>.. Lock NeedRegenerationException<import_from_stmt>..util NameRegistry<import_from_stmt>. exception<import_from_stmt>..util PluginLoader memoized_property coerce_string_conf<import_from_stmt>.util function_key_generator function_multi_key_generator<import_from_stmt>.api NO_VALUE CachedValue<import_from_stmt>.proxy ProxyBackend<import_from_stmt>..util compat<import_stmt>time<import_stmt>datetime<import_from_stmt>numbers Number<import_from_stmt>functools wraps<import_stmt>threading<line_sep>_backend_loader=PluginLoader("dogpile.cache")<line_sep>register_backend=_backend_loader.register<import_from_stmt>. backends# noqa
value_version=1<line_sep>"""An integer placed in the :class:`.CachedValue`
so that new versions of dogpile.cache can detect cached
values from a previous, backwards-incompatible version.
"""<class_stmt>RegionInvalidationStrategy(object)<block_start>"""Region invalidation strategy interface
Implement this interface and pass implementation instance
to :meth:`.CacheRegion.configure` to override default region invalidation.
Example::
class CustomInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._soft_invalidated = None
self._hard_invalidated = None
def invalidate(self, hard=None):
if hard:
self._soft_invalidated = None
self._hard_invalidated = time.time()
else:
self._soft_invalidated = time.time()
self._hard_invalidated = None
def is_invalidated(self, timestamp):
return ((self._soft_invalidated and
timestamp < self._soft_invalidated) or
(self._hard_invalidated and
timestamp < self._hard_invalidated))
def was_hard_invalidated(self):
return bool(self._hard_invalidated)
def is_hard_invalidated(self, timestamp):
return (self._hard_invalidated and
timestamp < self._hard_invalidated)
def was_soft_invalidated(self):
return bool(self._soft_invalidated)
def is_soft_invalidated(self, timestamp):
return (self._soft_invalidated and
timestamp < self._soft_invalidated)
The custom implementation is injected into a :class:`.CacheRegion`
at configure time using the
:paramref:`.CacheRegion.configure.region_invalidator` parameter::
region = CacheRegion()
region = region.configure(region_invalidator=CustomInvalidationStrategy())
Invalidation strategies that wish to have access to the
:class:`.CacheRegion` itself should construct the invalidator given the
region as an argument::
class MyInvalidator(RegionInvalidationStrategy):
def __init__(self, region):
self.region = region
# ...
# ...
region = CacheRegion()
region = region.configure(region_invalidator=MyInvalidator(region))
.. versionadded:: 0.6.2
.. seealso::
:paramref:`.CacheRegion.configure.region_invalidator`
"""<def_stmt>invalidate self hard=<true><block_start>"""Region invalidation.
:class:`.CacheRegion` propagated call.
The default invalidation system works by setting
a current timestamp (using ``time.time()``) to consider all older
timestamps effectively invalidated.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>is_hard_invalidated self timestamp<block_start>"""Check timestamp to determine if it was hard invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in hard mode.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>is_soft_invalidated self timestamp<block_start>"""Check timestamp to determine if it was soft invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in soft mode.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>is_invalidated self timestamp<block_start>"""Check timestamp to determine if it was invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>was_soft_invalidated self<block_start>"""Indicate the region was invalidated in soft mode.
:return: Boolean. True if region was invalidated in soft mode.
"""<line_sep><raise>NotImplementedError()<block_end><def_stmt>was_hard_invalidated self<block_start>"""Indicate the region was invalidated in hard mode.
:return: Boolean. True if region was invalidated in hard mode.
"""<line_sep><raise>NotImplementedError()<block_end><block_end><class_stmt>DefaultInvalidationStrategy(RegionInvalidationStrategy)<block_start><def_stmt>__init__ self<block_start>self._is_hard_invalidated=<none><line_sep>self._invalidated=<none><block_end><def_stmt>invalidate self hard=<true><block_start>self._is_hard_invalidated=bool(hard)<line_sep>self._invalidated=time.time()<block_end><def_stmt>is_invalidated self timestamp<block_start><return>(self._invalidated<is><not><none><and>timestamp<l>self._invalidated)<block_end><def_stmt>was_hard_invalidated self<block_start><return>self._is_hard_invalidated<is><true><block_end><def_stmt>is_hard_invalidated self timestamp<block_start><return>self.was_hard_invalidated()<and>self.is_invalidated(timestamp)<block_end><def_stmt>was_soft_invalidated self<block_start><return>self._is_hard_invalidated<is><false><block_end><def_stmt>is_soft_invalidated self timestamp<block_start><return>self.was_soft_invalidated()<and>self.is_invalidated(timestamp)<block_end><block_end><class_stmt>CacheRegion(object)<block_start>"""A front end to a particular cache backend.
:param name: Optional, a string name for the region.
This isn't used internally
but can be accessed via the ``.name`` parameter, helpful
for configuring a region from a config file.
:param function_key_generator: Optional. A
function that will produce a "cache key" given
a data creation function and arguments, when using
the :meth:`.CacheRegion.cache_on_arguments` method.
The structure of this function
should be two levels: given the data creation function,
return a new function that generates the key based on
the given arguments. Such as::
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
region = make_region(
function_key_generator = my_key_generator
).configure(
"dogpile.cache.dbm",
expiration_time=300,
arguments={
"filename":"file.dbm"
}
)
The ``namespace`` is that passed to
:meth:`.CacheRegion.cache_on_arguments`. It's not consulted
outside this function, so in fact can be of any form.
For example, it can be passed as a tuple, used to specify
arguments to pluck from \**kw::
def my_key_generator(namespace, fn):
def generate_key(*arg, **kw):
return ":".join(
[kw[k] for k in namespace] +
[str(x) for x in arg]
)
return generate_key
Where the decorator might be used as::
@my_region.cache_on_arguments(namespace=('x', 'y'))
def my_function(a, b, **kw):
return my_data()
.. seealso::
:func:`.function_key_generator` - default key generator
:func:`.kwarg_function_key_generator` - optional gen that also
uses keyword arguments
:param function_multi_key_generator: Optional.
Similar to ``function_key_generator`` parameter, but it's used in
:meth:`.CacheRegion.cache_multi_on_arguments`. Generated function
should return list of keys. For example::
def my_multi_key_generator(namespace, fn, **kw):
namespace = fn.__name__ + (namespace or '')
def generate_keys(*args):
return [namespace + ':' + str(a) for a in args]
return generate_keys
:param key_mangler: Function which will be used on all incoming
keys before passing to the backend. Defaults to ``None``,
in which case the key mangling function recommended by
the cache backend will be used. A typical mangler
is the SHA1 mangler found at :func:`.sha1_mangle_key`
which coerces keys into a SHA1
hash, so that the string length is fixed. To
disable all key mangling, set to ``False``. Another typical
mangler is the built-in Python function ``str``, which can be used
to convert non-string or Unicode keys to bytestrings, which is
needed when using a backend such as bsddb or dbm under Python 2.x
in conjunction with Unicode keys.
:param async_creation_runner: A callable that, when specified,
will be passed to and called by dogpile.lock when
there is a stale value present in the cache. It will be passed the
mutex and is responsible releasing that mutex when finished.
This can be used to defer the computation of expensive creator
functions to later points in the future by way of, for example, a
background thread, a long-running queue, or a task manager system
like Celery.
For a specific example using async_creation_runner, new values can
be created in a background thread like so::
import threading
def async_creation_runner(cache, somekey, creator, mutex):
''' Used by dogpile.core:Lock when appropriate '''
def runner():
try:
value = creator()
cache.set(somekey, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
region = make_region(
async_creation_runner=async_creation_runner,
).configure(
'dogpile.cache.memcached',
expiration_time=5,
arguments={
'url': '127.0.0.1:11211',
'distributed_lock': True,
}
)
Remember that the first request for a key with no associated
value will always block; async_creator will not be invoked.
However, subsequent requests for cached-but-expired values will
still return promptly. They will be refreshed by whatever
asynchronous means the provided async_creation_runner callable
implements.
By default the async_creation_runner is disabled and is set
to ``None``.
.. versionadded:: 0.4.2 added the async_creation_runner
feature.
"""<def_stmt>__init__ self name=<none> function_key_generator=function_key_generator function_multi_key_generator=function_multi_key_generator key_mangler=<none> async_creation_runner=<none> <block_start>"""Construct a new :class:`.CacheRegion`."""<line_sep>self.name=name<line_sep>self.function_key_generator=function_key_generator<line_sep>self.function_multi_key_generator=function_multi_key_generator<line_sep>self.key_mangler=self._user_defined_key_mangler=key_mangler<line_sep>self.async_creation_runner=async_creation_runner<line_sep>self.region_invalidator=DefaultInvalidationStrategy()<block_end><def_stmt>configure self backend expiration_time=<none> arguments=<none> _config_argument_dict=<none> _config_prefix=<none> wrap=<none> replace_existing_backend=<false> region_invalidator=<none><block_start>"""Configure a :class:`.CacheRegion`.
The :class:`.CacheRegion` itself
is returned.
:param backend: Required. This is the name of the
:class:`.CacheBackend` to use, and is resolved by loading
the class from the ``dogpile.cache`` entrypoint.
:param expiration_time: Optional. The expiration time passed
to the dogpile system. May be passed as an integer number
of seconds, or as a ``datetime.timedelta`` value.
.. versionadded 0.5.0
``expiration_time`` may be optionally passed as a
``datetime.timedelta`` value.
The :meth:`.CacheRegion.get_or_create`
method as well as the :meth:`.CacheRegion.cache_on_arguments`
decorator (though note: **not** the :meth:`.CacheRegion.get`
method) will call upon the value creation function after this
time period has passed since the last generation.
:param arguments: Optional. The structure here is passed
directly to the constructor of the :class:`.CacheBackend`
in use, though is typically a dictionary.
:param wrap: Optional. A list of :class:`.ProxyBackend`
classes and/or instances, each of which will be applied
in a chain to ultimately wrap the original backend,
so that custom functionality augmentation can be applied.
.. versionadded:: 0.5.0
.. seealso::
:ref:`changing_backend_behavior`
:param replace_existing_backend: if True, the existing cache backend
will be replaced. Without this flag, an exception is raised if
a backend is already configured.
.. versionadded:: 0.5.7
:param region_invalidator: Optional. Override default invalidation
strategy with custom implementation of
:class:`.RegionInvalidationStrategy`.
.. versionadded:: 0.6.2
"""<if_stmt>"backend"<in>self.__dict__<and><not>replace_existing_backend<block_start><raise>exception.RegionAlreadyConfigured("This region is already "<concat>"configured with backend: %s. "<concat>"Specify replace_existing_backend=True to replace."%self.backend)<block_end>backend_cls=_backend_loader.load(backend)<if_stmt>_config_argument_dict<block_start>self.backend=backend_cls.from_config_dict(_config_argument_dict _config_prefix)<block_end><else_stmt><block_start>self.backend=backend_cls(arguments<or>{})<block_end><if_stmt><not>expiration_time<or>isinstance(expiration_time Number)<block_start>self.expiration_time=expiration_time<block_end><elif_stmt>isinstance(expiration_time datetime.timedelta)<block_start>self.expiration_time=int(compat.timedelta_total_seconds(expiration_time))<block_end><else_stmt><block_start><raise>exception.ValidationError('expiration_time is not a number or timedelta.')<block_end><if_stmt><not>self._user_defined_key_mangler<block_start>self.key_mangler=self.backend.key_mangler<block_end>self._lock_registry=NameRegistry(self._create_mutex)<if_stmt>getattr(wrap '__iter__' <false>)<block_start><for_stmt>wrapper reversed(wrap)<block_start>self.wrap(wrapper)<block_end><block_end><if_stmt>region_invalidator<block_start>self.region_invalidator=region_invalidator<block_end><return>self<block_end><def_stmt>wrap self proxy<block_start>''' Takes a ProxyBackend instance or class and wraps the
attached backend. '''<line_sep># if we were passed a type rather than an instance then
# initialize it.
<if_stmt>type(proxy)<eq>type<block_start>proxy=proxy()<block_end><if_stmt><not>issubclass(type(proxy) ProxyBackend)<block_start><raise>TypeError("Type %s is not a valid ProxyBackend"%type(proxy))<block_end>self.backend=proxy.wrap(self.backend)<block_end><def_stmt>_mutex self key<block_start><return>self._lock_registry.get(key)<block_end><class_stmt>_LockWrapper(object)<block_start>"""weakref-capable wrapper for threading.Lock"""<def_stmt>__init__ self<block_start>self.lock=threading.Lock()<block_end><def_stmt>acquire self wait=<true><block_start><return>self.lock.acquire(wait)<block_end><def_stmt>release self<block_start>self.lock.release()<block_end><block_end><def_stmt>_create_mutex self key<block_start>mutex=self.backend.get_mutex(key)<if_stmt>mutex<is><not><none><block_start><return>mutex<block_end><else_stmt><block_start><return>self._LockWrapper()<block_end><block_end><def_stmt>invalidate self hard=<true><block_start>"""Invalidate this :class:`.CacheRegion`.
The default invalidation system works by setting
a current timestamp (using ``time.time()``)
representing the "minimum creation time" for
a value. Any retrieved value whose creation
time is prior to this timestamp
is considered to be stale. It does not
affect the data in the cache in any way, and is also
local to this instance of :class:`.CacheRegion`.
Once set, the invalidation time is honored by
the :meth:`.CacheRegion.get_or_create`,
:meth:`.CacheRegion.get_or_create_multi` and
:meth:`.CacheRegion.get` methods.
The method supports both "hard" and "soft" invalidation
options. With "hard" invalidation,
:meth:`.CacheRegion.get_or_create` will force an immediate
regeneration of the value which all getters will wait for.
With "soft" invalidation, subsequent getters will return the
"old" value until the new one is available.
Usage of "soft" invalidation requires that the region or the method
is given a non-None expiration time.
.. versionadded:: 0.3.0
:param hard: if True, cache values will all require immediate
regeneration; dogpile logic won't be used. If False, the
creation time of existing values will be pushed back before
the expiration time so that a return+regen will be invoked.
.. versionadded:: 0.5.1
"""<line_sep>self.region_invalidator.invalidate(hard)<block_end><def_stmt>configure_from_config self config_dict prefix<block_start>"""Configure from a configuration dictionary
and a prefix.
Example::
local_region = make_region()
memcached_region = make_region()
# regions are ready to use for function
# decorators, but not yet for actual caching
# later, when config is available
myconfig = {
"cache.local.backend":"dogpile.cache.dbm",
"cache.local.arguments.filename":"/path/to/dbmfile.dbm",
"cache.memcached.backend":"dogpile.cache.pylibmc",
"cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1",
}
local_region.configure_from_config(myconfig, "cache.local.")
memcached_region.configure_from_config(myconfig,
"cache.memcached.")
"""<line_sep>config_dict=coerce_string_conf(config_dict)<line_sep><return>self.configure(config_dict["%sbackend"%prefix] expiration_time=config_dict.get("%sexpiration_time"%prefix <none>) _config_argument_dict=config_dict _config_prefix="%sarguments."%prefix wrap=config_dict.get("%swrap"%prefix <none>) )<block_end>@memoized_property<def_stmt>backend self<block_start><raise>exception.RegionNotConfigured("No backend is configured on this region.")<block_end>@property<def_stmt>is_configured self<block_start>"""Return True if the backend has been configured via the
:meth:`.CacheRegion.configure` method already.
.. versionadded:: 0.5.1
"""<line_sep><return>'backend'<in>self.__dict__<block_end><def_stmt>get self key expiration_time=<none> ignore_expiration=<false><block_start>"""Return a value from the cache, based on the given key.
If the value is not present, the method returns the token
``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionchanged:: 0.3.0
:meth:`.CacheRegion.get` now checks the value's creation time
against the expiration time, rather than returning
the value unconditionally.
The method also interprets the cached value in terms
of the current "invalidation" time as set by
the :meth:`.invalidate` method. If a value is present,
but its creation time is older than the current
invalidation time, the ``NO_VALUE`` token is returned.
Passing the flag ``ignore_expiration=True`` bypasses
the invalidation time check.
.. versionadded:: 0.3.0
Support for the :meth:`.CacheRegion.invalidate`
method.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param expiration_time: Optional expiration time value
which will supersede that configured on the :class:`.CacheRegion`
itself.
.. versionadded:: 0.3.0
:param ignore_expiration: if ``True``, the value is returned
from the cache if present, regardless of configured
expiration times or whether or not :meth:`.invalidate`
was called.
.. versionadded:: 0.3.0
"""<if_stmt>self.key_mangler<block_start>key=self.key_mangler(key)<block_end>value=self.backend.get(key)<line_sep>value=self._unexpired_value_fn(expiration_time ignore_expiration)(value)<line_sep><return>value.payload<block_end><def_stmt>_unexpired_value_fn self expiration_time ignore_expiration<block_start><if_stmt>ignore_expiration<block_start><return><lambda>value:value<block_end><else_stmt><block_start><if_stmt>expiration_time<is><none><block_start>expiration_time=self.expiration_time<block_end>current_time=time.time()<def_stmt>value_fn value<block_start><if_stmt>value<is>NO_VALUE<block_start><return>value<block_end><elif_stmt>expiration_time<is><not><none><and>current_time-value.metadata["ct"]<g>expiration_time<block_start><return>NO_VALUE<block_end><elif_stmt>self.region_invalidator.is_invalidated(value.metadata["ct"])<block_start><return>NO_VALUE<block_end><else_stmt><block_start><return>value<block_end><block_end><return>value_fn<block_end><block_end><def_stmt>get_multi self keys expiration_time=<none> ignore_expiration=<false><block_start>"""Return multiple values from the cache, based on the given keys.
Returns values as a list matching the keys given.
E.g.::
values = region.get_multi(["one", "two", "three"])
To convert values to a dictionary, use ``zip()``::
keys = ["one", "two", "three"]
values = region.get_multi(keys)
dictionary = dict(zip(keys, values))
Keys which aren't present in the list are returned as
the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False,
but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionadded:: 0.5.0
"""<if_stmt><not>keys<block_start><return>[]<block_end><if_stmt>self.key_mangler<block_start>keys=list(map(<lambda>key:self.key_mangler(key) keys))<block_end>backend_values=self.backend.get_multi(keys)<line_sep>_unexpired_value_fn=self._unexpired_value_fn(expiration_time ignore_expiration)<line_sep><return>[value.payload<if>value<is><not>NO_VALUE<else>value<for>value (_unexpired_value_fn(value)<for>value backend_values)]<block_end><def_stmt>get_or_create self key creator expiration_time=<none> should_cache_fn=<none><block_start>"""Return a cached value based on the given key.
If the value does not exist or is considered to be expired
based on its creation time, the given
creation function may or may not be used to recreate the value
and persist the newly generated value in the cache.
Whether or not the function is used depends on if the
*dogpile lock* can be acquired or not. If it can't, it means
a different thread or process is already running a creation
function for this key against the cache. When the dogpile
lock cannot be acquired, the method will block if no
previous value is available, until the lock is released and
a new value available. If a previous value
is available, that value is returned immediately without blocking.
If the :meth:`.invalidate` method has been called, and
the retrieved value's timestamp is older than the invalidation
timestamp, the value is unconditionally prevented from
being returned. The method will attempt to acquire the dogpile
lock to generate a new value, or will wait
until the lock is released to return the new value.
.. versionchanged:: 0.3.0
The value is unconditionally regenerated if the creation
time is older than the last call to :meth:`.invalidate`.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param creator: function which creates a new value.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
the value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
E.g.::
def dont_cache_none(value):
return value is not None
value = region.get_or_create("some key",
create_value,
should_cache_fn=dont_cache_none)
Above, the function returns the value of create_value() if
the cache is invalid, however if the return value is None,
it won't be cached.
.. versionadded:: 0.4.3
.. seealso::
:meth:`.CacheRegion.cache_on_arguments` - applies
:meth:`.get_or_create` to any function using a decorator.
:meth:`.CacheRegion.get_or_create_multi` - multiple key/value
version
"""<line_sep>orig_key=key<if_stmt>self.key_mangler<block_start>key=self.key_mangler(key)<block_end><def_stmt>get_value <block_start>value=self.backend.get(key)<if_stmt>(value<is>NO_VALUE<or>value.metadata['v']<ne>value_version<or>self.region_invalidator.is_hard_invalidated(value.metadata["ct"]))<block_start><raise>NeedRegenerationException()<block_end>ct=value.metadata["ct"]<if_stmt>self.region_invalidator.is_soft_invalidated(ct)<block_start>ct=time.time()-expiration_time-.0001<block_end><return>value.payload ct<block_end><def_stmt>gen_value <block_start>created_value=creator()<line_sep>value=self._value(created_value)<if_stmt><not>should_cache_fn<or>should_cache_fn(created_value)<block_start>self.backend.set(key value)<block_end><return>value.payload value.metadata["ct"]<block_end><if_stmt>expiration_time<is><none><block_start>expiration_time=self.expiration_time<block_end><if_stmt>(expiration_time<is><none><and>self.region_invalidator.was_soft_invalidated())<block_start><raise>exception.DogpileCacheException("Non-None expiration time required "<concat>"for soft invalidation")<block_end><if_stmt>expiration_time<eq>-1<block_start>expiration_time=<none><block_end><if_stmt>self.async_creation_runner<block_start><def_stmt>async_creator mutex<block_start><return>self.async_creation_runner(self orig_key creator mutex)<block_end><block_end><else_stmt><block_start>async_creator=<none><block_end><with_stmt>Lock(self._mutex(key) gen_value get_value expiration_time async_creator)<as>value<block_start><return>value<block_end><block_end><def_stmt>get_or_create_multi self keys creator expiration_time=<none> should_cache_fn=<none><block_start>"""Return a sequence of cached values based on a sequence of keys.
The behavior for generation of values based on keys corresponds
to that of :meth:`.Region.get_or_create`, with the exception that
the ``creator()`` function may be asked to generate any subset of
the given keys. The list of keys to be generated is passed to
``creator()``, and ``creator()`` should return the generated values
as a sequence corresponding to the order of the keys.
The method uses the same approach as :meth:`.Region.get_multi`
and :meth:`.Region.set_multi` to get and set values from the
backend.
If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend`
that modifies values, take note this function invokes
``.set_multi()`` for newly generated values using the same values it
returns to the calling function. A correct implementation of
``.set_multi()`` will not modify values in-place on the submitted
``mapping`` dict.
:param keys: Sequence of keys to be retrieved.
:param creator: function which accepts a sequence of keys and
returns a sequence of new values.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
each value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
.. versionadded:: 0.5.0
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""<def_stmt>get_value key<block_start>value=values.get(key NO_VALUE)<if_stmt>(value<is>NO_VALUE<or>value.metadata['v']<ne>value_version<or>self.region_invalidator.is_hard_invalidated(value.metadata['v']))# dogpile.core understands a 0 here as
# "the value is not available", e.g.
# _has_value() will return False.
<block_start><return>value.payload 0<block_end><else_stmt><block_start>ct=value.metadata["ct"]<if_stmt>self.region_invalidator.is_soft_invalidated(ct)<block_start>ct=time.time()-expiration_time-.0001<block_end><return>value.payload ct<block_end><block_end><def_stmt>gen_value <block_start><raise>NotImplementedError()<block_end><def_stmt>async_creator key mutex<block_start>mutexes[key]=mutex<block_end><if_stmt>expiration_time<is><none><block_start>expiration_time=self.expiration_time<block_end><if_stmt>(expiration_time<is><none><and>self.region_invalidator.was_soft_invalidated())<block_start><raise>exception.DogpileCacheException("Non-None expiration time required "<concat>"for soft invalidation")<block_end><if_stmt>expiration_time<eq>-1<block_start>expiration_time=<none><block_end>mutexes={}<line_sep>sorted_unique_keys=sorted(set(keys))<if_stmt>self.key_mangler<block_start>mangled_keys=[self.key_mangler(k)<for>k sorted_unique_keys]<block_end><else_stmt><block_start>mangled_keys=sorted_unique_keys<block_end>orig_to_mangled=dict(zip(sorted_unique_keys mangled_keys))<line_sep>values=dict(zip(mangled_keys self.backend.get_multi(mangled_keys)))<for_stmt>orig_key,mangled_key orig_to_mangled.items()<block_start><with_stmt>Lock(self._mutex(mangled_key) gen_value <lambda>:get_value(mangled_key) expiration_time async_creator=<lambda>mutex:async_creator(orig_key mutex))<block_start><pass><block_end><block_end><try_stmt><block_start><if_stmt>mutexes# sort the keys, the idea is to prevent deadlocks.
# though haven't been able to simulate one anyway.
<block_start>keys_to_get=sorted(mutexes)<line_sep>new_values=creator(*keys_to_get)<line_sep>values_w_created=dict((orig_to_mangled[k] self._value(v))<for>k,v zip(keys_to_get new_values))<if_stmt><not>should_cache_fn<block_start>self.backend.set_multi(values_w_created)<block_end><else_stmt><block_start>self.backend.set_multi(dict((k v)<for>k,v values_w_created.items()<if>should_cache_fn(v[0])))<block_end>values.update(values_w_created)<block_end><return>[values[orig_to_mangled[k]].payload<for>k keys]<block_end><finally_stmt><block_start><for_stmt>mutex mutexes.values()<block_start>mutex.release()<block_end><block_end><block_end><def_stmt>_value self value<block_start>"""Return a :class:`.CachedValue` given a value."""<line_sep><return>CachedValue(value {"ct":time.time() "v":value_version})<block_end><def_stmt>set self key value<block_start>"""Place a new value in the cache under the given key."""<if_stmt>self.key_mangler<block_start>key=self.key_mangler(key)<block_end>self.backend.set(key self._value(value))<block_end><def_stmt>set_multi self mapping<block_start>"""Place new values in the cache under the given keys.
.. versionadded:: 0.5.0
"""<if_stmt><not>mapping<block_start><return><block_end><if_stmt>self.key_mangler<block_start>mapping=dict((self.key_mangler(k) self._value(v))<for>k,v mapping.items())<block_end><else_stmt><block_start>mapping=dict((k self._value(v))<for>k,v mapping.items())<block_end>self.backend.set_multi(mapping)<block_end><def_stmt>delete self key<block_start>"""Remove a value from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
"""<if_stmt>self.key_mangler<block_start>key=self.key_mangler(key)<block_end>self.backend.delete(key)<block_end><def_stmt>delete_multi self keys<block_start>"""Remove multiple values from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
.. versionadded:: 0.5.0
"""<if_stmt>self.key_mangler<block_start>keys=list(map(<lambda>key:self.key_mangler(key) keys))<block_end>self.backend.delete_multi(keys)<block_end><def_stmt>cache_on_arguments self namespace=<none> expiration_time=<none> should_cache_fn=<none> to_str=compat.string_type function_key_generator=<none><block_start>"""A function decorator that will cache the return
value of the function using a key derived from the
function itself and its arguments.
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
E.g.::
@someregion.cache_on_arguments()
def generate_something(x, y):
return somedatabase.query(x, y)
The decorated function can then be called normally, where
data will be pulled from the cache region unless a new
value is needed::
result = generate_something(5, 6)
The function is also given an attribute ``invalidate()``, which
provides for invalidation of the value. Pass to ``invalidate()``
the same arguments you'd pass to the function itself to represent
a particular value::
generate_something.invalidate(5, 6)
Another attribute ``set()`` is added to provide extra caching
possibilities relative to the function. This is a convenience
method for :meth:`.CacheRegion.set` which will store a given
value directly without calling the decorated function.
The value to be cached is passed as the first argument, and the
arguments which would normally be passed to the function
should follow::
generate_something.set(3, 5, 6)
The above example is equivalent to calling
``generate_something(5, 6)``, if the function were to produce
the value ``3`` as the value to be cached.
.. versionadded:: 0.4.1 Added ``set()`` method to decorated function.
Similar to ``set()`` is ``refresh()``. This attribute will
invoke the decorated function and populate a new value into
the cache with the new value, as well as returning that value::
newvalue = generate_something.refresh(5, 6)
.. versionadded:: 0.5.0 Added ``refresh()`` method to decorated
function.
Lastly, the ``get()`` method returns either the value cached
for the given key, or the token ``NO_VALUE`` if no such key
exists::
value = generate_something.get(5, 6)
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
The default key generation will use the name
of the function, the module name for the function,
the arguments passed, as well as an optional "namespace"
parameter in order to generate a cache key.
Given a function ``one`` inside the module
``myapp.tools``::
@region.cache_on_arguments(namespace="foo")
def one(a, b):
return a + b
Above, calling ``one(3, 4)`` will produce a
cache key as follows::
myapp.tools:one|foo|3 4
The key generator will ignore an initial argument
of ``self`` or ``cls``, making the decorator suitable
(with caveats) for use with instance or class methods.
Given the example::
class MyClass(object):
@region.cache_on_arguments(namespace="foo")
def one(self, a, b):
return a + b
The cache key above for ``MyClass().one(3, 4)`` will
again produce the same cache key of ``myapp.tools:one|foo|3 4`` -
the name ``self`` is skipped.
The ``namespace`` parameter is optional, and is used
normally to disambiguate two functions of the same
name within the same module, as can occur when decorating
instance or class methods as below::
class MyClass(object):
@region.cache_on_arguments(namespace='MC')
def somemethod(self, x, y):
""
class MyOtherClass(object):
@region.cache_on_arguments(namespace='MOC')
def somemethod(self, x, y):
""
Above, the ``namespace`` parameter disambiguates
between ``somemethod`` on ``MyClass`` and ``MyOtherClass``.
Python class declaration mechanics otherwise prevent
the decorator from having awareness of the ``MyClass``
and ``MyOtherClass`` names, as the function is received
by the decorator before it becomes an instance method.
The function key generation can be entirely replaced
on a per-region basis using the ``function_key_generator``
argument present on :func:`.make_region` and
:class:`.CacheRegion`. If defaults to
:func:`.function_key_generator`.
:param namespace: optional string argument which will be
established as part of the cache key. This may be needed
to disambiguate functions of the same name within the same
source file, such as those
associated with classes - note that the decorator itself
can't see the parent class on a function as the class is
being declared.
:param expiration_time: if not None, will override the normal
expiration time.
May be specified as a callable, taking no arguments, that
returns a value to be used as the ``expiration_time``. This callable
will be called whenever the decorated function itself is called, in
caching or retrieving. Thus, this can be used to
determine a *dynamic* expiration time for the cached function
result. Example use cases include "cache the result until the
end of the day, week or time period" and "cache until a certain date
or time passes".
.. versionchanged:: 0.5.0
``expiration_time`` may be passed as a callable to
:meth:`.CacheRegion.cache_on_arguments`.
:param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`.
.. versionadded:: 0.4.3
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_key_generator: a function that will produce a
"cache key". This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""<line_sep>expiration_time_is_callable=compat.callable(expiration_time)<if_stmt>function_key_generator<is><none><block_start>function_key_generator=self.function_key_generator<block_end><def_stmt>decorator fn<block_start><if_stmt>to_str<is>compat.string_type# backwards compatible
<block_start>key_generator=function_key_generator(namespace fn)<block_end><else_stmt><block_start>key_generator=function_key_generator(namespace fn to_str=to_str)<block_end>@wraps(fn)<def_stmt>decorate *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep>@wraps(fn)<def_stmt>creator <block_start><return>fn(*arg **kw)<block_end>timeout=expiration_time()<if>expiration_time_is_callable<else>expiration_time<line_sep><return>self.get_or_create(key creator timeout should_cache_fn)<block_end><def_stmt>invalidate *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep>self.delete(key)<block_end><def_stmt>set_ value *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep>self.set(key value)<block_end><def_stmt>get *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep><return>self.get(key)<block_end><def_stmt>refresh *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep>value=fn(*arg **kw)<line_sep>self.set(key value)<line_sep><return>value<block_end>decorate.set=set_<line_sep>decorate.invalidate=invalidate<line_sep>decorate.refresh=refresh<line_sep>decorate.get=get<line_sep>decorate.original=fn<line_sep><return>decorate<block_end><return>decorator<block_end><def_stmt>cache_multi_on_arguments self namespace=<none> expiration_time=<none> should_cache_fn=<none> asdict=<false> to_str=compat.string_type function_multi_key_generator=<none><block_start>"""A function decorator that will cache multiple return
values from the function using a sequence of keys derived from the
function itself and the arguments passed to it.
This method is the "multiple key" analogue to the
:meth:`.CacheRegion.cache_on_arguments` method.
Example::
@someregion.cache_multi_on_arguments()
def generate_something(*keys):
return [
somedatabase.query(key)
for key in keys
]
The decorated function can be called normally. The decorator
will produce a list of cache keys using a mechanism similar to
that of :meth:`.CacheRegion.cache_on_arguments`, combining the
name of the function with the optional namespace and with the
string form of each key. It will then consult the cache using
the same mechanism as that of :meth:`.CacheRegion.get_multi`
to retrieve all current values; the originally passed keys
corresponding to those values which aren't generated or need
regeneration will be assembled into a new argument list, and
the decorated function is then called with that subset of
arguments.
The returned result is a list::
result = generate_something("key1", "key2", "key3")
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create_multi` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
Unlike the :meth:`.CacheRegion.cache_on_arguments` method,
:meth:`.CacheRegion.cache_multi_on_arguments` works only with
a single function signature, one which takes a simple list of
keys as arguments.
Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function
is also provided with a ``set()`` method, which here accepts a
mapping of keys and values to set in the cache::
generate_something.set({"k1": "value1",
"k2": "value2", "k3": "value3"})
...an ``invalidate()`` method, which has the effect of deleting
the given sequence of keys using the same mechanism as that of
:meth:`.CacheRegion.delete_multi`::
generate_something.invalidate("k1", "k2", "k3")
...a ``refresh()`` method, which will call the creation
function, cache the new values, and return them::
values = generate_something.refresh("k1", "k2", "k3")
...and a ``get()`` method, which will return values
based on the given arguments::
values = generate_something.get("k1", "k2", "k3")
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments`
have the same meaning as those passed to
:meth:`.CacheRegion.cache_on_arguments`.
:param namespace: optional string argument which will be
established as part of each cache key.
:param expiration_time: if not None, will override the normal
expiration time. May be passed as an integer or a
callable.
:param should_cache_fn: passed to
:meth:`.CacheRegion.get_or_create_multi`. This function is given a
value as returned by the creator, and only if it returns True will
that value be placed in the cache.
:param asdict: if ``True``, the decorated function should return
its result as a dictionary of keys->values, and the final result
of calling the decorated function will also be a dictionary.
If left at its default value of ``False``, the decorated function
should return its result as a list of values, and the final
result of calling the decorated function will also be a list.
When ``asdict==True`` if the dictionary returned by the decorated
function is missing keys, those keys will not be cached.
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_multi_key_generator: a function that will produce a
list of keys. This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_on_arguments`
:meth:`.CacheRegion.get_or_create_multi`
"""<line_sep>expiration_time_is_callable=compat.callable(expiration_time)<if_stmt>function_multi_key_generator<is><none><block_start>function_multi_key_generator=self.function_multi_key_generator<block_end><def_stmt>decorator fn<block_start>key_generator=function_multi_key_generator(namespace fn to_str=to_str)<line_sep>@wraps(fn)<def_stmt>decorate *arg **kw<block_start>cache_keys=arg<line_sep>keys=key_generator(*arg **kw)<line_sep>key_lookup=dict(zip(keys cache_keys))<line_sep>@wraps(fn)<def_stmt>creator *keys_to_create<block_start><return>fn(*[key_lookup[k]<for>k keys_to_create])<block_end>timeout=expiration_time()<if>expiration_time_is_callable<else>expiration_time<if_stmt>asdict<block_start><def_stmt>dict_create *keys<block_start>d_values=creator(*keys)<line_sep><return>[d_values.get(key_lookup[k] NO_VALUE)<for>k keys]<block_end><def_stmt>wrap_cache_fn value<block_start><if_stmt>value<is>NO_VALUE<block_start><return><false><block_end><elif_stmt><not>should_cache_fn<block_start><return><true><block_end><else_stmt><block_start><return>should_cache_fn(value)<block_end><block_end>result=self.get_or_create_multi(keys dict_create timeout wrap_cache_fn)<line_sep>result=dict((k v)<for>k,v zip(cache_keys result)<if>v<is><not>NO_VALUE)<block_end><else_stmt><block_start>result=self.get_or_create_multi(keys creator timeout should_cache_fn)<block_end><return>result<block_end><def_stmt>invalidate *arg<block_start>keys=key_generator(*arg)<line_sep>self.delete_multi(keys)<block_end><def_stmt>set_ mapping<block_start>keys=list(mapping)<line_sep>gen_keys=key_generator(*keys)<line_sep>self.set_multi(dict((gen_key mapping[key])<for>gen_key,key zip(gen_keys keys)))<block_end><def_stmt>get *arg<block_start>keys=key_generator(*arg)<line_sep><return>self.get_multi(keys)<block_end><def_stmt>refresh *arg<block_start>keys=key_generator(*arg)<line_sep>values=fn(*arg)<if_stmt>asdict<block_start>self.set_multi(dict(zip(keys [values[a]<for>a arg])))<line_sep><return>values<block_end><else_stmt><block_start>self.set_multi(dict(zip(keys values)))<line_sep><return>values<block_end><block_end>decorate.set=set_<line_sep>decorate.invalidate=invalidate<line_sep>decorate.refresh=refresh<line_sep>decorate.get=get<line_sep><return>decorate<block_end><return>decorator<block_end><block_end><def_stmt>make_region *arg **kw<block_start>"""Instantiate a new :class:`.CacheRegion`.
Currently, :func:`.make_region` is a passthrough
to :class:`.CacheRegion`. See that class for
constructor arguments.
"""<line_sep><return>CacheRegion(*arg **kw)<block_end> |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 60