content
stringlengths
0
1.55M
########################################################################## # # Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## <import_from_future_stmt> with_statement<import_stmt>os<import_stmt>sys<import_stmt>shutil<import_stmt>unittest<import_stmt>IECore<class_stmt>TestBasicPreset(unittest.TestCase)<block_start><def_stmt>testCopy self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.FloatParameter("b" "" 1.0) ])<line_sep>testObj2=IECore.Parameterised("testParameterised2")<line_sep>testObj2.parameters().addParameters([IECore.BoolParameter("a" "" <false>) IECore.FloatParameter("c" "" 0.0) ])<line_sep>p=IECore.BasicPreset(testObj testObj.parameters())<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertFalse(p.applicableTo(testObj2 testObj2.parameters()))<line_sep>testObj.parameters()["a"].setTypedValue(<false>)<line_sep>testObj.parameters()["b"].setTypedValue(0.0)<line_sep>p(testObj testObj.parameters())<line_sep>self.assertEqual(testObj.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj.parameters()["b"].getTypedValue() 1.0)<line_sep>p2=IECore.BasicPreset(testObj testObj.parameters() parameters=(testObj.parameters()["a"] ))<line_sep>self.assertTrue(p2.applicableTo(testObj testObj.parameters()))<line_sep>self.assertTrue(p2.applicableTo(testObj2 testObj.parameters()))<line_sep>p2(testObj2 testObj2.parameters())<line_sep>self.assertEqual(testObj2.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj2.parameters()["c"].getTypedValue() 0.0)<block_end><def_stmt>testLoad self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.FloatParameter("b" "" 1.0) ])<line_sep>testObj2=IECore.Parameterised("testParameterised1")<line_sep>testObj2.parameters().addParameters([IECore.BoolParameter("a" "" <false>) IECore.FloatParameter("c" "" 0.0) ])<line_sep>savePath=os.path.abspath(os.path.join(os.path.dirname(__file__) "data" "basicPreset"))<line_sep>messageHandler=IECore.CapturingMessageHandler()<with_stmt>messageHandler<block_start>p=IECore.BasicPreset(os.path.join(savePath "basicPresetLoadTest" "basicPresetLoadTest-1.cob"))<block_end>self.assertEqual(len(messageHandler.messages) 0)<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertFalse(p.applicableTo(testObj2 testObj2.parameters()))<line_sep>testObj.parameters()["a"].setTypedValue(<false>)<line_sep>testObj.parameters()["b"].setTypedValue(0.0)<line_sep>p(testObj testObj.parameters())<line_sep>self.assertEqual(testObj.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj.parameters()["b"].getTypedValue() 1.0)<block_end><def_stmt>testSave self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.FloatParameter("b" "" 1.0) ])<line_sep>testObj2=IECore.Parameterised("testParameterised1")<line_sep>testObj2.parameters().addParameters([IECore.BoolParameter("a" "" <false>) IECore.FloatParameter("c" "" 0.0) ])<line_sep>savePath=os.path.abspath(os.path.join(os.path.dirname(__file__) "data" "basicPreset"))<line_sep>preset=IECore.BasicPreset(testObj testObj.parameters())<line_sep># Save for the classLoader and check its there, we test the 'loadability' later... preset.save(savePath "basicPresetTest")<line_sep>self.assertTrue(os.path.isfile(os.path.join(savePath "basicPresetTest" "basicPresetTest-1.cob")))<line_sep>self.assertTrue(os.path.isfile(os.path.join(savePath "basicPresetTest" "basicPresetTest-1.py")))<line_sep># save without the classLoader and check its there preset.save(savePath "basicPresetTest" classLoadable=<false>)<line_sep>self.assertTrue(os.path.isfile(os.path.join(savePath "basicPresetTest.cob")))<line_sep># reload p=IECore.BasicPreset(os.path.join(savePath "basicPresetTest.cob"))<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertFalse(p.applicableTo(testObj2 testObj2.parameters()))<line_sep>testObj.parameters()["a"].setTypedValue(<false>)<line_sep>testObj.parameters()["b"].setTypedValue(0.0)<line_sep>p(testObj testObj.parameters())<line_sep>self.assertEqual(testObj.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj.parameters()["b"].getTypedValue() 1.0)<line_sep>preset2=IECore.BasicPreset(testObj testObj.parameters() parameters=(testObj.parameters()["a"] ))<line_sep>preset2.save(savePath "basicPresetTest2" classLoadable=<false>)<line_sep>#reload p2=IECore.BasicPreset(os.path.join(savePath "basicPresetTest2.cob"))<line_sep>self.assertTrue(p2.applicableTo(testObj testObj.parameters()))<line_sep>self.assertTrue(p2.applicableTo(testObj2 testObj.parameters()))<line_sep>p2(testObj2 testObj2.parameters())<line_sep>self.assertEqual(testObj2.parameters()["a"].getTypedValue() <true>)<line_sep>self.assertEqual(testObj2.parameters()["c"].getTypedValue() 0.0)<block_end><def_stmt>testClassLoader self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.FloatParameter("b" "" 1.0) ])<line_sep>savePath=os.path.abspath(os.path.join(os.path.dirname(__file__) "data" "basicPreset"))<line_sep>preset=IECore.BasicPreset(testObj testObj.parameters())<line_sep>preset.save(savePath "basicPresetTestClassLoader")<line_sep># make sure that no messages are emitted during loading messageHandler=IECore.CapturingMessageHandler()<with_stmt>messageHandler<block_start>loader=IECore.ClassLoader(IECore.SearchPath(savePath))<line_sep>p=loader.load("basicPresetTestClassLoader")()<block_end>self.assertEqual(len(messageHandler.messages) 0)<line_sep>self.assertTrue(isinstance(p IECore.BasicPreset))<line_sep>p.metadata()<block_end><def_stmt>testClasses self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.ClassParameter("b" "" "IECORE_OP_PATHS" os.path.join("maths" "multiply") 2) ])<line_sep>testObj2=IECore.Parameterised("testParameterised2")<line_sep>testObj2.parameters().addParameters([IECore.ClassParameter("c" "" "IECORE_OP_PATHS") ])<line_sep>classes1=testObj.parameters()["b"].getClass(<true>)<line_sep>classes2=testObj2.parameters()["c"].getClass(<true>)<line_sep>self.assertNotEqual(classes1[1:] classes2[1:])<line_sep>p=IECore.BasicPreset(testObj testObj.parameters()["b"])<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()["b"]))<line_sep>self.assertFalse(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertTrue(p.applicableTo(testObj2 testObj2.parameters()["c"]))<line_sep>p(testObj2 testObj2.parameters()["c"])<line_sep>classes1=testObj.parameters()["b"].getClass(<true>)<line_sep>classes2=testObj2.parameters()["c"].getClass(<true>)<line_sep>self.assertEqual(classes1[1:] classes2[1:])<block_end><def_stmt>testClassVectors self<block_start>testObj=IECore.Parameterised("testParameterised1")<line_sep>testObj.parameters().addParameters([IECore.BoolParameter("a" "" <true>) IECore.ClassVectorParameter("b" "" "IECORE_OP_PATHS") ])<line_sep>testObj.parameters()["b"].setClasses([("mult" os.path.join("maths" "multiply") 2) ("coIO" "compoundObjectInOut" 1) ])<line_sep>testObj2=IECore.Parameterised("testParameterised2")<line_sep>testObj2.parameters().addParameters([IECore.ClassVectorParameter("c" "" "IECORE_OP_PATHS") ])<line_sep>classes1=[c[1:]<for>c testObj.parameters()["b"].getClasses(<true>)]<line_sep>classes2=[c[1:]<for>c testObj2.parameters()["c"].getClasses(<true>)]<line_sep>self.assertNotEqual(classes1 classes2)<line_sep>p=IECore.BasicPreset(testObj testObj.parameters()["b"])<line_sep>self.assertTrue(p.applicableTo(testObj testObj.parameters()["b"]))<line_sep>self.assertFalse(p.applicableTo(testObj testObj.parameters()))<line_sep>self.assertTrue(p.applicableTo(testObj2 testObj2.parameters()["c"]))<line_sep>p(testObj2 testObj2.parameters()["c"])<line_sep>classes1=[c[1:]<for>c testObj.parameters()["b"].getClasses(<true>)]<line_sep>classes2=[c[1:]<for>c testObj2.parameters()["c"].getClasses(<true>)]<line_sep>self.assertEqual(classes1 classes2)<block_end><def_stmt>testCompoundVectorParameter self<block_start>p=IECore.Parameterised("test")<line_sep>p.parameters().addParameters([IECore.BoolParameter("a" "" <false>) IECore.CompoundVectorParameter("c" "" members=[IECore.StringVectorParameter("s" "" IECore.StringVectorData()) IECore.BoolVectorParameter("b" "" IECore.BoolVectorData()) ])])<line_sep>p["c"]["s"].setValue(IECore.StringVectorData(["1" "2" "3"]))<line_sep>p["c"]["b"].setValue(IECore.BoolVectorData([<true> <false> <true>]))<line_sep>v=p.parameters().getValue().copy()<line_sep>preset=IECore.BasicPreset(p p.parameters())<line_sep>self.assertTrue(preset.applicableTo(p p.parameters()))<line_sep>p.parameters().setValue(p.parameters().defaultValue)<line_sep>self.assertNotEqual(p.parameters().getValue() v)<line_sep>preset(p p.parameters())<line_sep>self.assertEqual(p.parameters().getValue() v)<block_end><def_stmt>tearDown self<block_start>savePath=os.path.abspath(os.path.join(os.path.dirname(__file__) "data" "basicPreset"))<line_sep>paths=(os.path.join(savePath "basicPresetTest") os.path.join(savePath "basicPresetTest.cob") os.path.join(savePath "basicPresetTest2.cob") os.path.join(savePath "basicPresetTestClassLoader") )<for_stmt>p paths<block_start><if_stmt>os.path.isdir(p)<block_start>shutil.rmtree(p)<block_end><elif_stmt>os.path.isfile(p)<block_start>os.remove(p)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__="biotite"<line_sep>__author__="<NAME>"<line_sep>__all__=["Copyable"]<import_stmt>abc<class_stmt>Copyable(metaclass=abc.ABCMeta)<block_start>""" Base class for all objects, that should be copyable. The public method `copy()` first creates a fresh instance of the class of the instance, that is copied via the `__copy_create__()` method. All variables, that could not be set via the constructor, are then copied via `__copy_fill__()`, starting with the method in the uppermost base class and ending with the class of the instance to be copied. This approach solves the problem of encapsulated variables in superclasses. """<def_stmt>copy self<block_start>""" Create a deep copy of this object. Returns ------- copy A copy of this object. """<line_sep>clone=self.__copy_create__()<line_sep>self.__copy_fill__(clone)<line_sep><return>clone<block_end><def_stmt>__copy_create__ self<block_start>""" Instantiate a new object of this class. Only the constructor should be called in this method. All further attributes, that need to be copied are handled in `__copy_fill__()` Do not call the `super()` method here. This method must be overridden, if the constructor takes parameters. Returns ------- copy A freshly instantiated copy of *self*. """<line_sep><return>type(self)()<block_end><def_stmt>__copy_fill__ self clone<block_start>""" Copy all necessary attributes to the new object. Always call the `super()` method as first statement. Parameters ---------- clone The freshly instantiated copy of *self*. """<line_sep><pass><block_end><block_end>
<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>copy<import_from_stmt>numpy.testing assert_allclose<import_from_stmt>keras.utils CustomObjectScope<import_from_stmt>keras.layers wrappers Input Layer<import_from_stmt>keras.layers RNN<import_from_stmt>keras layers<import_from_stmt>keras.models Sequential Model model_from_json<import_from_stmt>keras backend<as>K<import_from_stmt>keras.utils.generic_utils object_list_uid to_list<line_sep>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support TimeDistributed and RNN yet')<def_stmt>test_TimeDistributed # first, test with Dense layer <block_start>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(2) input_shape=(3 4)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.fit(np.random.random((10 3 4)) np.random.random((10 3 2)) epochs=1 batch_size=10)<line_sep># test config model.get_config()<line_sep># test when specifying a batch_input_shape test_input=np.random.random((1 3 4))<line_sep>test_output=model.predict(test_input)<line_sep>weights=model.layers[0].get_weights()<line_sep>reference=Sequential()<line_sep>reference.add(wrappers.TimeDistributed(layers.Dense(2) batch_input_shape=(1 3 4)))<line_sep>reference.add(layers.Activation('relu'))<line_sep>reference.compile(optimizer='rmsprop' loss='mse')<line_sep>reference.layers[0].set_weights(weights)<line_sep>reference_output=reference.predict(test_input)<line_sep>assert_allclose(test_output reference_output atol=1e-05)<line_sep># test with Embedding model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Embedding(5 6) batch_input_shape=(10 3 4) dtype='int32'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.fit(np.random.randint(5 size=(10 3 4) dtype='int32') np.random.random((10 3 4 6)) epochs=1 batch_size=10)<line_sep># compare to not using batch_input_shape test_input=np.random.randint(5 size=(10 3 4) dtype='int32')<line_sep>test_output=model.predict(test_input)<line_sep>weights=model.layers[0].get_weights()<line_sep>reference=Sequential()<line_sep>reference.add(wrappers.TimeDistributed(layers.Embedding(5 6) input_shape=(3 4) dtype='int32'))<line_sep>reference.compile(optimizer='rmsprop' loss='mse')<line_sep>reference.layers[0].set_weights(weights)<line_sep>reference_output=reference.predict(test_input)<line_sep>assert_allclose(test_output reference_output atol=1e-05)<line_sep># test with Conv2D model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Conv2D(5 (2 2) padding='same') input_shape=(2 4 4 3)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.train_on_batch(np.random.random((1 2 4 4 3)) np.random.random((1 2 4 4 5)))<line_sep>model=model_from_json(model.to_json())<line_sep>model.summary()<line_sep># test stacked layers model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(2) input_shape=(3 4)))<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(3)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.fit(np.random.random((10 3 4)) np.random.random((10 3 3)) epochs=1 batch_size=10)<line_sep># test wrapping Sequential model model=Sequential()<line_sep>model.add(layers.Dense(3 input_dim=2))<line_sep>outer_model=Sequential()<line_sep>outer_model.add(wrappers.TimeDistributed(model input_shape=(3 2)))<line_sep>outer_model.compile(optimizer='rmsprop' loss='mse')<line_sep>outer_model.fit(np.random.random((10 3 2)) np.random.random((10 3 3)) epochs=1 batch_size=10)<line_sep># test with functional API x=Input(shape=(3 2))<line_sep>y=wrappers.TimeDistributed(model)(x)<line_sep>outer_model=Model(x y)<line_sep>outer_model.compile(optimizer='rmsprop' loss='mse')<line_sep>outer_model.fit(np.random.random((10 3 2)) np.random.random((10 3 3)) epochs=1 batch_size=10)<line_sep># test with BatchNormalization model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.BatchNormalization(center=<true> scale=<true>) name='bn' input_shape=(10 2)))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep># Assert that mean and variance are 0 and 1. td=model.layers[0]<assert_stmt>np.array_equal(td.get_weights()[2] np.array([0 0]))<assert_stmt>np.array_equal(td.get_weights()[3] np.array([1 1]))<line_sep># Train model.train_on_batch(np.random.normal(loc=2 scale=2 size=(1 10 2)) np.broadcast_to(np.array([0 1]) (1 10 2)))<line_sep># Assert that mean and variance changed. <assert_stmt><not>np.array_equal(td.get_weights()[2] np.array([0 0]))<assert_stmt><not>np.array_equal(td.get_weights()[3] np.array([1 1]))<line_sep># Verify input_map has one mapping from inputs to reshaped inputs. uid=object_list_uid(model.inputs)<assert_stmt>len(td._input_map.keys())<eq>1<assert_stmt>uid<in>td._input_map<assert_stmt>K.int_shape(td._input_map[uid])<eq>(<none> 2)<block_end>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support TimeDistributed and RNN yet')@pytest.mark.skipif((K.backend()<eq>'cntk') reason='Flaky with CNTK backend')<def_stmt>test_TimeDistributed_learning_phase # test layers that need learning_phase to be set <block_start>np.random.seed(1234)<line_sep>x=Input(shape=(3 2))<line_sep>y=wrappers.TimeDistributed(layers.Dropout(.999))(x training=<true>)<line_sep>model=Model(x y)<line_sep>y=model.predict(np.random.random((10 3 2)))<line_sep>assert_allclose(np.mean(y) 0. atol=1e-1 rtol=1e-1)<block_end>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support TimeDistributed and RNN yet')<def_stmt>test_TimeDistributed_trainable # test layers that need learning_phase to be set <block_start>x=Input(shape=(3 2))<line_sep>layer=wrappers.TimeDistributed(layers.BatchNormalization())<line_sep>_=layer(x)<assert_stmt>len(layer.updates)<eq>2<assert_stmt>len(layer.trainable_weights)<eq>2<line_sep>layer.trainable=<false><assert_stmt>len(layer.updates)<eq>0<assert_stmt>len(layer.trainable_weights)<eq>0<line_sep>layer.trainable=<true><assert_stmt>len(layer.updates)<eq>2<assert_stmt>len(layer.trainable_weights)<eq>2<block_end>@pytest.mark.skipif((K.backend()<eq>'cntk'<or>K.backend()<eq>'mxnet') reason='Unknown timestamps for RNN not supported in CNTK and MXNet.')<def_stmt>test_TimeDistributed_with_masked_embedding_and_unspecified_shape # test with unspecified shape and Embeddings with mask_zero <block_start>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Embedding(5 6 mask_zero=<true>) input_shape=(<none> <none>)))<line_sep># the shape so far: (N, t_1, t_2, 6) model.add(wrappers.TimeDistributed(layers.SimpleRNN(7 return_sequences=<true>)))<line_sep>model.add(wrappers.TimeDistributed(layers.SimpleRNN(8 return_sequences=<false>)))<line_sep>model.add(layers.SimpleRNN(1 return_sequences=<false>))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model_input=np.random.randint(low=1 high=5 size=(10 3 4) dtype='int32')<for_stmt>i range(4)<block_start>model_input[i i: i:]=0<block_end>model.fit(model_input np.random.random((10 1)) epochs=1 batch_size=10)<line_sep>mask_outputs=[model.layers[0].compute_mask(model.input)]<for_stmt>layer model.layers[1:]<block_start>mask_outputs.append(layer.compute_mask(layer.input mask_outputs[-1]))<block_end>func=K.function([model.input] mask_outputs[:-1])<line_sep>mask_outputs_val=func([model_input])<line_sep>ref_mask_val_0=model_input<g>0# embedding layer ref_mask_val_1=ref_mask_val_0# first RNN layer ref_mask_val_2=np.any(ref_mask_val_1 axis=-1)# second RNN layer ref_mask_val=[ref_mask_val_0 ref_mask_val_1 ref_mask_val_2]<for_stmt>i range(3)<block_start><assert_stmt>np.array_equal(mask_outputs_val[i] ref_mask_val[i])<block_end><assert_stmt>mask_outputs[-1]<is><none><block_end># final layer @pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support TimeDistributed and RNN yet')<def_stmt>test_TimeDistributed_with_masking_layer # test with Masking layer <block_start>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Masking(mask_value=0. ) input_shape=(<none> 4)))<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(5)))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model_input=np.random.randint(low=1 high=5 size=(10 3 4))<for_stmt>i range(4)<block_start>model_input[i i: :]=0.<block_end>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.fit(model_input np.random.random((10 3 5)) epochs=1 batch_size=6)<line_sep>mask_outputs=[model.layers[0].compute_mask(model.input)]<line_sep>mask_outputs<augadd>[model.layers[1].compute_mask(model.layers[1].input mask_outputs[-1])]<line_sep>func=K.function([model.input] mask_outputs)<line_sep>mask_outputs_val=func([model_input])<assert_stmt>np.array_equal(mask_outputs_val[0] np.any(model_input axis=-1))<assert_stmt>np.array_equal(mask_outputs_val[1] np.any(model_input axis=-1))<block_end><def_stmt>test_regularizers <block_start>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(2 kernel_regularizer='l1') input_shape=(3 4)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<assert_stmt>len(model.layers[0].layer.losses)<eq>1<assert_stmt>len(model.layers[0].losses)<eq>1<assert_stmt>len(model.layers[0].get_losses_for(<none>))<eq>1<assert_stmt>len(model.losses)<eq>1<line_sep>model=Sequential()<line_sep>model.add(wrappers.TimeDistributed(layers.Dense(2 activity_regularizer='l1') input_shape=(3 4)))<line_sep>model.add(layers.Activation('relu'))<line_sep>model.compile(optimizer='rmsprop' loss='mse')<assert_stmt>len(model.losses)<eq>1<block_end><def_stmt>test_Bidirectional <block_start>rnn=layers.SimpleRNN<line_sep>samples=2<line_sep>dim=2<line_sep>timesteps=2<line_sep>output_dim=2<line_sep>dropout_rate=0.2<for_stmt>mode ['sum' 'concat']<block_start>x=np.random.random((samples timesteps dim))<line_sep>target_dim=2<times>output_dim<if>mode<eq>'concat'<else>output_dim<line_sep>y=np.random.random((samples target_dim))<line_sep># test with Sequential model model=Sequential()<line_sep>model.add(wrappers.Bidirectional(rnn(output_dim dropout=dropout_rate recurrent_dropout=dropout_rate) merge_mode=mode input_shape=(timesteps dim)))<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<line_sep># test config model.get_config()<line_sep>model=model_from_json(model.to_json())<line_sep>model.summary()<line_sep># test stacked bidirectional layers model=Sequential()<line_sep>model.add(wrappers.Bidirectional(rnn(output_dim return_sequences=<true>) merge_mode=mode input_shape=(timesteps dim)))<line_sep>model.add(wrappers.Bidirectional(rnn(output_dim) merge_mode=mode))<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<line_sep># test with functional API inputs=Input((timesteps dim))<line_sep>outputs=wrappers.Bidirectional(rnn(output_dim dropout=dropout_rate recurrent_dropout=dropout_rate) merge_mode=mode)(inputs)<line_sep>model=Model(inputs outputs)<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<line_sep># Bidirectional and stateful inputs=Input(batch_shape=(1 timesteps dim))<line_sep>outputs=wrappers.Bidirectional(rnn(output_dim stateful=<true>) merge_mode=mode)(inputs)<line_sep>model=Model(inputs outputs)<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<block_end><block_end>@pytest.mark.skipif((K.backend()<eq>'cntk') reason='Unknown timestamps not supported in CNTK.')<def_stmt>test_Bidirectional_dynamic_timesteps # test with functional API with dynamic length <block_start>rnn=layers.SimpleRNN<line_sep>samples=2<line_sep>dim=2<line_sep>timesteps=2<line_sep>output_dim=2<line_sep>dropout_rate=0.2<for_stmt>mode ['sum' 'concat']<block_start>x=np.random.random((samples timesteps dim))<line_sep>target_dim=2<times>output_dim<if>mode<eq>'concat'<else>output_dim<line_sep>y=np.random.random((samples target_dim))<line_sep>inputs=Input((<none> dim))<line_sep>outputs=wrappers.Bidirectional(rnn(output_dim dropout=dropout_rate recurrent_dropout=dropout_rate) merge_mode=mode)(inputs)<line_sep>model=Model(inputs outputs)<line_sep>model.compile(loss='mse' optimizer='sgd')<line_sep>model.fit(x y epochs=1 batch_size=1)<block_end><block_end>@pytest.mark.parametrize('merge_mode' ['sum' 'mul' 'ave' 'concat' <none>])<def_stmt>test_Bidirectional_merged_value merge_mode<block_start>rnn=layers.LSTM<line_sep>samples=2<line_sep>dim=5<line_sep>timesteps=3<line_sep>units=3<line_sep>X=[np.random.rand(samples timesteps dim)]<if_stmt>merge_mode<eq>'sum'<block_start>merge_func=<lambda>y y_rev:y+y_rev<block_end><elif_stmt>merge_mode<eq>'mul'<block_start>merge_func=<lambda>y y_rev:y<times>y_rev<block_end><elif_stmt>merge_mode<eq>'ave'<block_start>merge_func=<lambda>y y_rev:(y+y_rev)/2<block_end><elif_stmt>merge_mode<eq>'concat'<block_start>merge_func=<lambda>y y_rev:np.concatenate((y y_rev) axis=-1)<block_end><else_stmt><block_start>merge_func=<lambda>y y_rev:[y y_rev]<block_end># basic case inputs=Input((timesteps dim))<line_sep>layer=wrappers.Bidirectional(rnn(units return_sequences=<true>) merge_mode=merge_mode)<line_sep>f_merged=K.function([inputs] to_list(layer(inputs)))<line_sep>f_forward=K.function([inputs] [layer.forward_layer.call(inputs)])<line_sep>f_backward=K.function([inputs] [K.reverse(layer.backward_layer.call(inputs) 1)])<line_sep>y_merged=f_merged(X)<line_sep>y_expected=to_list(merge_func(f_forward(X)[0] f_backward(X)[0]))<assert_stmt>len(y_merged)<eq>len(y_expected)<for_stmt>x1,x2 zip(y_merged y_expected)<block_start>assert_allclose(x1 x2 atol=1e-5)<block_end># test return_state inputs=Input((timesteps dim))<line_sep>layer=wrappers.Bidirectional(rnn(units return_state=<true>) merge_mode=merge_mode)<line_sep>f_merged=K.function([inputs] layer(inputs))<line_sep>f_forward=K.function([inputs] layer.forward_layer.call(inputs))<line_sep>f_backward=K.function([inputs] layer.backward_layer.call(inputs))<line_sep>n_states=len(layer.layer.states)<line_sep>y_merged=f_merged(X)<line_sep>y_forward=f_forward(X)<line_sep>y_backward=f_backward(X)<line_sep>y_expected=to_list(merge_func(y_forward[0] y_backward[0]))<assert_stmt>len(y_merged)<eq>len(y_expected)+n_states<times>2<for_stmt>x1,x2 zip(y_merged y_expected)<block_start>assert_allclose(x1 x2 atol=1e-5)<block_end># test if the state of a BiRNN is the concatenation of the underlying RNNs y_merged=y_merged[-n_states<times>2:]<line_sep>y_forward=y_forward[-n_states:]<line_sep>y_backward=y_backward[-n_states:]<for_stmt>state_birnn,state_inner zip(y_merged y_forward+y_backward)<block_start>assert_allclose(state_birnn state_inner atol=1e-5)<block_end><block_end>@pytest.mark.skipif(K.backend()<eq>'theano'<or>K.backend()<eq>'mxnet' reason='Not supported.')@pytest.mark.parametrize('merge_mode' ['sum' 'concat' <none>])<def_stmt>test_Bidirectional_dropout merge_mode<block_start>rnn=layers.LSTM<line_sep>samples=2<line_sep>dim=5<line_sep>timesteps=3<line_sep>units=3<line_sep>X=[np.random.rand(samples timesteps dim)]<line_sep>inputs=Input((timesteps dim))<line_sep>wrapped=wrappers.Bidirectional(rnn(units dropout=0.2 recurrent_dropout=0.2) merge_mode=merge_mode)<line_sep>outputs=to_list(wrapped(inputs training=<true>))<assert_stmt>all(<not>getattr(x '_uses_learning_phase')<for>x outputs)<line_sep>inputs=Input((timesteps dim))<line_sep>wrapped=wrappers.Bidirectional(rnn(units dropout=0.2 return_state=<true>) merge_mode=merge_mode)<line_sep>outputs=to_list(wrapped(inputs))<assert_stmt>all(x._uses_learning_phase<for>x outputs)<line_sep>model=Model(inputs outputs)<assert_stmt>model.uses_learning_phase<line_sep>y1=to_list(model.predict(X))<line_sep>y2=to_list(model.predict(X))<for_stmt>x1,x2 zip(y1 y2)<block_start>assert_allclose(x1 x2 atol=1e-5)<block_end><block_end><def_stmt>test_Bidirectional_state_reuse <block_start>rnn=layers.LSTM<line_sep>samples=2<line_sep>dim=5<line_sep>timesteps=3<line_sep>units=3<line_sep>input1=Input((timesteps dim))<line_sep>layer=wrappers.Bidirectional(rnn(units return_state=<true> return_sequences=<true>))<line_sep>state=layer(input1)[1:]<line_sep># test passing invalid initial_state: passing a tensor input2=Input((timesteps dim))<with_stmt>pytest.raises(ValueError)<block_start>output=wrappers.Bidirectional(rnn(units))(input2 initial_state=state[0])<block_end># test valid usage: passing a list output=wrappers.Bidirectional(rnn(units))(input2 initial_state=state)<line_sep>model=Model([input1 input2] output)<assert_stmt>len(model.layers)<eq>4<assert_stmt>isinstance(model.layers[-1].input list)<line_sep>inputs=[np.random.rand(samples timesteps dim) np.random.rand(samples timesteps dim)]<line_sep>outputs=model.predict(inputs)<block_end>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support custom RNN cell yet')<def_stmt>test_Bidirectional_with_constants <block_start><class_stmt>RNNCellWithConstants(Layer)<block_start><def_stmt>__init__ self units **kwargs<block_start>self.units=units<line_sep>self.state_size=units<line_sep>super(RNNCellWithConstants self).__init__(**kwargs)<block_end><def_stmt>build self input_shape<block_start><if_stmt><not>isinstance(input_shape list)<block_start><raise>TypeError('expects constants shape')<block_end>[input_shape constant_shape]=input_shape<line_sep># will (and should) raise if more than one constant passed self.input_kernel=self.add_weight(shape=(input_shape[-1] self.units) initializer='uniform' name='kernel')<line_sep>self.recurrent_kernel=self.add_weight(shape=(self.units self.units) initializer='uniform' name='recurrent_kernel')<line_sep>self.constant_kernel=self.add_weight(shape=(constant_shape[-1] self.units) initializer='uniform' name='constant_kernel')<line_sep>self.built=<true><block_end><def_stmt>call self inputs states constants<block_start>[prev_output]=states<line_sep>[constant]=constants<line_sep>h_input=K.dot(inputs self.input_kernel)<line_sep>h_state=K.dot(prev_output self.recurrent_kernel)<line_sep>h_const=K.dot(constant self.constant_kernel)<line_sep>output=h_input+h_state+h_const<line_sep><return>output [output]<block_end><def_stmt>get_config self<block_start>config={'units':self.units}<line_sep>base_config=super(RNNCellWithConstants self).get_config()<line_sep><return>dict(list(base_config.items())+list(config.items()))<block_end><block_end># Test basic case. x=Input((5 5))<line_sep>c=Input((3 ))<line_sep>cell=RNNCellWithConstants(32)<line_sep>custom_objects={'RNNCellWithConstants':RNNCellWithConstants}<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional(RNN(cell))<block_end>y=layer(x constants=c)<line_sep>model=Model([x c] y)<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.train_on_batch([np.zeros((6 5 5)) np.zeros((6 3))] np.zeros((6 64)))<line_sep># Test basic case serialization. x_np=np.random.random((6 5 5))<line_sep>c_np=np.random.random((6 3))<line_sep>y_np=model.predict([x_np c_np])<line_sep>weights=model.get_weights()<line_sep>config=layer.get_config()<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional.from_config(copy.deepcopy(config))<block_end>y=layer(x constants=c)<line_sep>model=Model([x c] y)<line_sep>model.set_weights(weights)<line_sep>y_np_2=model.predict([x_np c_np])<line_sep>assert_allclose(y_np y_np_2 atol=1e-4)<line_sep># test flat list inputs <with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional.from_config(copy.deepcopy(config))<block_end>y=layer([x c])<line_sep>model=Model([x c] y)<line_sep>model.set_weights(weights)<line_sep>y_np_3=model.predict([x_np c_np])<line_sep>assert_allclose(y_np y_np_3 atol=1e-4)<block_end>@pytest.mark.skipif(K.backend()<eq>'mxnet' reason='MXNet backend does not support custom RNN cell yet')<def_stmt>test_Bidirectional_with_constants_layer_passing_initial_state <block_start><class_stmt>RNNCellWithConstants(Layer)<block_start><def_stmt>__init__ self units **kwargs<block_start>self.units=units<line_sep>self.state_size=units<line_sep>super(RNNCellWithConstants self).__init__(**kwargs)<block_end><def_stmt>build self input_shape<block_start><if_stmt><not>isinstance(input_shape list)<block_start><raise>TypeError('expects constants shape')<block_end>[input_shape constant_shape]=input_shape<line_sep># will (and should) raise if more than one constant passed self.input_kernel=self.add_weight(shape=(input_shape[-1] self.units) initializer='uniform' name='kernel')<line_sep>self.recurrent_kernel=self.add_weight(shape=(self.units self.units) initializer='uniform' name='recurrent_kernel')<line_sep>self.constant_kernel=self.add_weight(shape=(constant_shape[-1] self.units) initializer='uniform' name='constant_kernel')<line_sep>self.built=<true><block_end><def_stmt>call self inputs states constants<block_start>[prev_output]=states<line_sep>[constant]=constants<line_sep>h_input=K.dot(inputs self.input_kernel)<line_sep>h_state=K.dot(prev_output self.recurrent_kernel)<line_sep>h_const=K.dot(constant self.constant_kernel)<line_sep>output=h_input+h_state+h_const<line_sep><return>output [output]<block_end><def_stmt>get_config self<block_start>config={'units':self.units}<line_sep>base_config=super(RNNCellWithConstants self).get_config()<line_sep><return>dict(list(base_config.items())+list(config.items()))<block_end><block_end># Test basic case. x=Input((5 5))<line_sep>c=Input((3 ))<line_sep>s_for=Input((32 ))<line_sep>s_bac=Input((32 ))<line_sep>cell=RNNCellWithConstants(32)<line_sep>custom_objects={'RNNCellWithConstants':RNNCellWithConstants}<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional(RNN(cell))<block_end>y=layer(x initial_state=[s_for s_bac] constants=c)<line_sep>model=Model([x s_for s_bac c] y)<line_sep>model.compile(optimizer='rmsprop' loss='mse')<line_sep>model.train_on_batch([np.zeros((6 5 5)) np.zeros((6 32)) np.zeros((6 32)) np.zeros((6 3))] np.zeros((6 64)))<line_sep># Test basic case serialization. x_np=np.random.random((6 5 5))<line_sep>s_fw_np=np.random.random((6 32))<line_sep>s_bk_np=np.random.random((6 32))<line_sep>c_np=np.random.random((6 3))<line_sep>y_np=model.predict([x_np s_fw_np s_bk_np c_np])<line_sep>weights=model.get_weights()<line_sep>config=layer.get_config()<with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional.from_config(copy.deepcopy(config))<block_end>y=layer(x initial_state=[s_for s_bac] constants=c)<line_sep>model=Model([x s_for s_bac c] y)<line_sep>model.set_weights(weights)<line_sep>y_np_2=model.predict([x_np s_fw_np s_bk_np c_np])<line_sep>assert_allclose(y_np y_np_2 atol=1e-4)<line_sep># verify that state is used y_np_2_different_s=model.predict([x_np s_fw_np+10. s_bk_np+10. c_np])<with_stmt>pytest.raises(AssertionError)<block_start>assert_allclose(y_np y_np_2_different_s atol=1e-4)<block_end># test flat list inputs <with_stmt>CustomObjectScope(custom_objects)<block_start>layer=wrappers.Bidirectional.from_config(copy.deepcopy(config))<block_end>y=layer([x s_for s_bac c])<line_sep>model=Model([x s_for s_bac c] y)<line_sep>model.set_weights(weights)<line_sep>y_np_3=model.predict([x_np s_fw_np s_bk_np c_np])<line_sep>assert_allclose(y_np y_np_3 atol=1e-4)<block_end><def_stmt>test_Bidirectional_trainable # test layers that need learning_phase to be set <block_start>x=Input(shape=(3 2))<line_sep>layer=wrappers.Bidirectional(layers.SimpleRNN(3))<line_sep>_=layer(x)<assert_stmt>len(layer.trainable_weights)<eq>6<line_sep>layer.trainable=<false><assert_stmt>len(layer.trainable_weights)<eq>0<line_sep>layer.trainable=<true><assert_stmt>len(layer.trainable_weights)<eq>6<block_end><def_stmt>test_Bidirectional_updates <block_start>x=Input(shape=(3 2))<line_sep>layer=wrappers.Bidirectional(layers.SimpleRNN(3))<assert_stmt>len(layer.updates)<eq>0<assert_stmt>len(layer.get_updates_for(<none>))<eq>0<assert_stmt>len(layer.get_updates_for(x))<eq>0<line_sep>layer.forward_layer.add_update(0 inputs=x)<line_sep>layer.forward_layer.add_update(1 inputs=<none>)<line_sep>layer.backward_layer.add_update(0 inputs=x)<line_sep>layer.backward_layer.add_update(1 inputs=<none>)<assert_stmt>len(layer.updates)<eq>4<assert_stmt>len(layer.get_updates_for(<none>))<eq>2<assert_stmt>len(layer.get_updates_for(x))<eq>2<block_end><def_stmt>test_Bidirectional_losses <block_start>x=Input(shape=(3 2))<line_sep>layer=wrappers.Bidirectional(layers.SimpleRNN(3 kernel_regularizer='l1' bias_regularizer='l1'))<line_sep>_=layer(x)<assert_stmt>len(layer.losses)<eq>4<assert_stmt>len(layer.get_losses_for(<none>))<eq>4<assert_stmt>len(layer.get_losses_for(x))<eq>0<line_sep>layer.forward_layer.add_loss(0 inputs=x)<line_sep>layer.forward_layer.add_loss(1 inputs=<none>)<line_sep>layer.backward_layer.add_loss(0 inputs=x)<line_sep>layer.backward_layer.add_loss(1 inputs=<none>)<assert_stmt>len(layer.losses)<eq>8<assert_stmt>len(layer.get_losses_for(<none>))<eq>6<assert_stmt>len(layer.get_losses_for(x))<eq>2<block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main([__file__])<block_end>
"""Lowest-common-denominator implementations of platform functionality."""<import_from_future_stmt> absolute_import division print_function with_statement<import_stmt>errno<import_stmt>socket<import_from_stmt>tornado.platform interface<class_stmt>Waker(interface.Waker)<block_start>"""Create an OS independent asynchronous pipe. For use on platforms that don't have os.pipe() (or where pipes cannot be passed to select()), but do have sockets. This includes Windows and Jython. """<def_stmt>__init__ self# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py <block_start>self.writer=socket.socket()<line_sep># Disable buffering -- pulling the trigger sends 1 byte, # and we want that sent immediately, to wake up ASAP. self.writer.setsockopt(socket.IPPROTO_TCP socket.TCP_NODELAY 1)<line_sep>count=0<while_stmt>1<block_start>count<augadd>1<line_sep># Bind to a local port; for efficiency, let the OS pick # a free port for us. # Unfortunately, stress tests showed that we may not # be able to connect to that port ("Address already in # use") despite that the OS picked it. This appears # to be a race bug in the Windows socket implementation. # So we loop until a connect() succeeds (almost always # on the first try). See the long thread at # http://mail.zope.org/pipermail/zope/2005-July/160433.html # for hideous details. a=socket.socket()<line_sep>a.bind(("127.0.0.1" 0))<line_sep>a.listen(1)<line_sep>connect_address=a.getsockname()# assigned (host, port) pair <try_stmt><block_start>self.writer.connect(connect_address)<line_sep><break># success <block_end><except_stmt>socket.error<as>detail<block_start><if_stmt>(<not>hasattr(errno 'WSAEADDRINUSE')<or>detail[0]<ne>errno.WSAEADDRINUSE)# "Address already in use" is the only error # I've seen on two WinXP Pro SP2 boxes, under # Pythons 2.3.5 and 2.4.1. <block_start><raise><block_end># (10048, 'Address already in use') # assert count <= 2 # never triggered in Tim's tests <if_stmt>count<ge>10# I've never seen it go above 2 <block_start>a.close()<line_sep>self.writer.close()<line_sep><raise>socket.error("Cannot bind trigger!")<block_end># Close `a` and try again. Note: I originally put a short # sleep() here, but it didn't appear to help or hurt. a.close()<block_end><block_end>self.reader,addr=a.accept()<line_sep>self.reader.setblocking(0)<line_sep>self.writer.setblocking(0)<line_sep>a.close()<line_sep>self.reader_fd=self.reader.fileno()<block_end><def_stmt>fileno self<block_start><return>self.reader.fileno()<block_end><def_stmt>write_fileno self<block_start><return>self.writer.fileno()<block_end><def_stmt>wake self<block_start><try_stmt><block_start>self.writer.send(b"x")<block_end><except_stmt>(IOError socket.error)<block_start><pass><block_end><block_end><def_stmt>consume self<block_start><try_stmt><block_start><while_stmt><true><block_start>result=self.reader.recv(1024)<if_stmt><not>result<block_start><break><block_end><block_end><block_end><except_stmt>(IOError socket.error)<block_start><pass><block_end><block_end><def_stmt>close self<block_start>self.reader.close()<line_sep>self.writer.close()<block_end><block_end>
""" Basic usage =========== This example presents the basic usage of brokenaxes """<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>brokenaxes brokenaxes<import_stmt>numpy<as>np<line_sep>fig=plt.figure(figsize=(5 2))<line_sep>bax=brokenaxes(xlims=((0 .1) (.4 .7)) ylims=((-1 .7) (.79 1)) hspace=.05)<line_sep>x=np.linspace(0 1 100)<line_sep>bax.plot(x np.sin(10<times>x) label='sin')<line_sep>bax.plot(x np.cos(10<times>x) label='cos')<line_sep>bax.legend(loc=3)<line_sep>bax.set_xlabel('time')<line_sep>bax.set_ylabel('value')<line_sep>
<import_stmt>clpy<import_stmt>clpy.sparse.base<line_sep>_preamble_atomic_add=''' #if __CUDA_ARCH__ < 600 __device__ double atomicAdd(double* address, double val) { unsigned long long* address_as_ull = (unsigned long long*)address; unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif '''<def_stmt>isintlike x<block_start><try_stmt><block_start><return>bool(int(x)<eq>x)<block_end><except_stmt>(TypeError ValueError)<block_start><return><false><block_end><block_end><def_stmt>isscalarlike x<block_start><return>clpy.isscalar(x)<or>(clpy.sparse.base.isdense(x)<and>x.ndim<eq>0)<block_end><def_stmt>isshape x<block_start><if_stmt><not>isinstance(x tuple)<or>len(x)<ne>2<block_start><return><false><block_end>m,n=x<line_sep><return>isintlike(m)<and>isintlike(n)<block_end>
<import_from_stmt>pytest raises<import_from_stmt>discopy.cartesian *<def_stmt>test_Box_repr <block_start>f=Box('f' 1 2 <lambda>x:(x x))<assert_stmt>"Box('f', 1, 2"<in>repr(f)<block_end><def_stmt>test_Function_str <block_start>f=Function(2 1 <lambda>x y:x+y)<assert_stmt>'Function(dom=2, cod=1,'<in>str(f)<block_end><def_stmt>test_Function_call <block_start>f=Swap(2 1)<line_sep>values=(2 3)<with_stmt>raises(TypeError)<as>err<block_start>f(*values)<block_end><assert_stmt>str(err.value)<eq>messages.expected_input_length(f values)<block_end><def_stmt>test_Function_then <block_start>f,g=Function(2 1 <lambda>x y:x+y) Function(1 1 <lambda>x:x+1)<assert_stmt>Function.id(2).then(*(f g))(20 21)<eq>42<block_end><def_stmt>test_Function_then_err <block_start>f=Function(2 1 <lambda>x y:x+y)<line_sep>g=(<lambda>x:x )<with_stmt>raises(TypeError)<as>err<block_start>f<rshift>g<block_end><assert_stmt>str(err.value)<eq>messages.type_err(Function g)<line_sep>g=Function.id(2)<with_stmt>raises(AxiomError)<as>err<block_start>f<rshift>g<block_end><assert_stmt>str(err.value)<eq>messages.does_not_compose(f g)<block_end><def_stmt>test_Function_tensor <block_start><assert_stmt>Function.id(3)(1 2 3)<eq>Function.id(0).tensor(*(3<times>[Function.id(1)]))(1 2 3)<block_end><def_stmt>test_Function_tensor_err <block_start>f=Function(2 1 <lambda>x y:x+y)<line_sep>g=(<lambda>x:x )<with_stmt>raises(TypeError)<as>err<block_start>f@g<block_end><assert_stmt>str(err.value)<eq>messages.type_err(Function g)<block_end>
<import_stmt>os<import_stmt>sys<import_stmt>shutil<line_sep>cwd_path=os.getcwd()<line_sep>sys.path.append(os.path.join(os.path.dirname(cwd_path) 'rt-thread' 'tools'))<line_sep># BSP dist function <def_stmt>dist_do_building BSP_ROOT dist_dir<block_start><import_from_stmt>mkdist bsp_copy_files<import_stmt>rtconfig<line_sep>library_dir=os.path.join(dist_dir 'libraries')<line_sep>print("=> copy nrf52 bsp libraries")<line_sep>library_path=os.path.join(os.path.dirname(BSP_ROOT) 'libraries')<line_sep>bsp_copy_files(library_path library_dir)<block_end>
# -*- coding: utf-8 -*- <import_stmt>os<import_stmt>json<import_from_stmt>splash defaults<import_from_stmt>splash.utils to_bytes path_join_secure<import_from_stmt>splash.errors BadOption<class_stmt>RenderOptions(object)<block_start>""" Options that control how to render a response. """<line_sep>_REQUIRED=object()<def_stmt>__init__ self data max_timeout<block_start>self.data=data<line_sep>self.max_timeout=max_timeout<block_end>@classmethod<def_stmt>raise_error cls argument description type='bad_argument' **kwargs<block_start>params={'type':type 'argument':argument 'description':description}<line_sep>params.update(kwargs)<line_sep><raise>BadOption(params)<block_end>@classmethod<def_stmt>fromrequest cls request max_timeout<block_start>""" Initialize options from a Twisted Request. """<line_sep># 1. GET / POST data data={key.decode('utf-8'):values[0].decode('utf-8')<for>key,values request.args.items()}<if_stmt>request.method<eq>b'POST'<block_start>content_type=request.getHeader(b'content-type')<if_stmt>content_type<block_start>request.content.seek(0)<line_sep># 2. application/json POST data <if_stmt>b'application/json'<in>content_type<block_start><try_stmt><block_start>content=request.content.read().decode('utf-8')<line_sep>data.update(json.loads(content))<block_end><except_stmt>ValueError<as>e<block_start><raise>BadOption({'type':'invalid_json' 'description':"Can't decode JSON" 'message':str(e) })<block_end><block_end># 3. js_source from application/javascript POST requests <if_stmt>b'application/javascript'<in>content_type<block_start>data['js_source']=request.content.read().decode('utf-8')<block_end>request.content.seek(0)<block_end><block_end>data['uid']=id(request)<line_sep><return>cls(data max_timeout)<block_end><def_stmt>get_expired_args self cache<block_start>""" Return a list of argument names from load_args which can't be loaded """<line_sep><return>cache.get_missing(self.get_load_args().items())<block_end><def_stmt>save_args_to_cache self cache<block_start>""" Process save_args and put all values to cache. Return a list of (name, key) pairs. """<line_sep>save_args=self.get_save_args()<line_sep>save_values=[self.data.get(name)<for>name save_args]<line_sep>keys=cache.add_many(save_values)<line_sep><return>list(zip(save_args keys))<block_end><def_stmt>load_cached_args self cache<block_start>load_args=self.get_load_args()<for_stmt>name,key (load_args<or>{}).items()<block_start>self.data[name]=cache[key]<block_end><block_end><def_stmt>get self name default=_REQUIRED type=str range=<none><block_start>value=self.data.get(name)<if_stmt>value<is><not><none><block_start><if_stmt>type<is><not><none><block_start><try_stmt><block_start>value=type(value)<block_end><except_stmt>ValueError<block_start>msg="Argument %r has a wrong type"%(name )<line_sep>self.raise_error(name msg required_type=type.__name__)<block_end><block_end><if_stmt>range<is><not><none><and><not>(range[0]<le>value<le>range[1])<block_start>self.raise_error(name 'Argument is out of the allowed range' min=range[0] max=range[1] value=value)<block_end><return>value<block_end><elif_stmt>default<is>self._REQUIRED<block_start>self.raise_error(name 'Required argument is missing: %s'%name type='argument_required')<block_end><else_stmt><block_start><return>default<block_end><block_end><def_stmt>_get_bool self name default=_REQUIRED<block_start><return>self.get(name default type=int range=(0 1))<block_end><def_stmt>_get_url self name default=_REQUIRED<block_start>url=self.get(name default type=<none>)<if_stmt>isinstance(url bytes)<block_start>url=url.decode('utf8')<block_end><return>url<block_end><def_stmt>get_uid self<block_start><return>self.get('uid')<block_end><def_stmt>get_url self<block_start><return>self._get_url("url")<block_end><def_stmt>get_baseurl self<block_start><return>self._get_url("baseurl" default=<none>)<block_end><def_stmt>get_wait self<block_start><return>self.get("wait" defaults.WAIT_TIME type=float range=(0 self.get_timeout()))<block_end><def_stmt>get_timeout self<block_start>default=min(self.max_timeout defaults.TIMEOUT)<line_sep><return>self.get("timeout" default type=float range=(0 self.max_timeout))<block_end><def_stmt>get_resource_timeout self<block_start><return>self.get("resource_timeout" defaults.RESOURCE_TIMEOUT type=float range=(0 1e6))<block_end><def_stmt>get_response_body self<block_start><return>self._get_bool("response_body" defaults.RESPONSE_BODY_ENABLED)<block_end><def_stmt>get_request_body self<block_start><return>self._get_bool("request_body" defaults.REQUEST_BODY_ENABLED)<block_end><def_stmt>get_images self<block_start><return>self._get_bool("images" defaults.AUTOLOAD_IMAGES)<block_end><def_stmt>get_proxy self<block_start><return>self.get("proxy" default=<none>)<block_end><def_stmt>get_js_source self<block_start><return>self.get("js_source" default=<none>)<block_end><def_stmt>get_width self<block_start><return>self.get("width" <none> type=int range=(1 defaults.MAX_WIDTH))<block_end><def_stmt>get_height self<block_start><return>self.get("height" <none> type=int range=(1 defaults.MAX_HEIGTH))<block_end><def_stmt>get_scale_method self<block_start>scale_method=self.get("scale_method" defaults.IMAGE_SCALE_METHOD)<line_sep>allowed_scale_methods=['raster' 'vector']<if_stmt>scale_method<not><in>allowed_scale_methods<block_start>self.raise_error(argument='scale_method' description="Invalid 'scale_method': %s"%scale_method allowed=allowed_scale_methods received=scale_method )<block_end><return>scale_method<block_end><def_stmt>get_quality self<block_start><return>self.get("quality" defaults.JPEG_QUALITY type=int range=(0 100))<block_end><def_stmt>get_http_method self<block_start>method=self.get("http_method" "GET")<if_stmt>method.upper()<not><in>["POST" "GET"]<block_start>self.raise_error("http_method" "Unsupported HTTP method {}".format(method))<block_end><return>method<block_end><def_stmt>get_body self<block_start>body=self.get("body" <none> to_bytes)<line_sep>method=self.get("http_method" "GET").upper()<if_stmt>method<eq>'GET'<and>body<block_start>self.raise_error("body" "GET request should not have a body")<block_end><return>body<block_end><def_stmt>get_render_all self wait=<none><block_start>result=self._get_bool("render_all" <false>)<if_stmt>result<eq>1<and>wait<eq>0<block_start>self.raise_error("render_all" "Pass non-zero 'wait' to render full webpage")<block_end><return>result<block_end><def_stmt>get_lua_source self<block_start><return>self.get("lua_source")<block_end><def_stmt>get_js_profile self js_profiles_path<block_start>js_profile=self.get("js" default=<none>)<if_stmt><not>js_profile<block_start><return>js_profile<block_end><if_stmt>js_profiles_path<is><none><block_start>self.raise_error('js' 'Javascript profiles are not enabled on server')<block_end><try_stmt><block_start>profile_dir=path_join_secure(js_profiles_path js_profile)<block_end><except_stmt>ValueError<as>e# security check fails <block_start>print(e)<line_sep>self.raise_error('js' 'Javascript profile does not exist')<block_end><if_stmt><not>os.path.isdir(profile_dir)<block_start>self.raise_error('js' 'Javascript profile does not exist')<block_end><return>profile_dir<block_end><def_stmt>get_headers self<block_start>headers=self.get("headers" default=<none> type=<none>)<if_stmt>headers<is><none><block_start><return>headers<block_end><if_stmt><not>isinstance(headers (list tuple dict))<block_start>self.raise_error(argument='headers' description="'headers' must be either a JSON array of "<concat>"(name, value) pairs or a JSON object")<block_end><if_stmt>isinstance(headers (list tuple))<block_start><for_stmt>el headers<block_start>string_only=all(isinstance(e str)<for>e el)<if_stmt><not>(isinstance(el (list tuple))<and>len(el)<eq>2<and>string_only)<block_start>self.raise_error(argument='headers' description="'headers' must be either a JSON array of "<concat>"(name, value) pairs or a JSON object")<block_end><block_end><block_end><return>headers<block_end><def_stmt>get_save_args self<block_start>save_args=self.get("save_args" default=<none> type=<none>)<if_stmt>save_args<is><none><block_start><return>[]<block_end><if_stmt>isinstance(save_args str)# comma-separated string <block_start>save_args=save_args.split(',')<block_end><if_stmt><not>isinstance(save_args list)<block_start>self.raise_error(argument="save_args" description="'save_args' should be either a comma-separated "<concat>"string or a JSON array with argument names" )<block_end># JSON array <if_stmt><not>all(isinstance(a str)<for>a save_args)<block_start>self.raise_error(argument="save_args" description="'save_args' should be a list of strings" )<block_end><return>save_args<block_end><def_stmt>get_load_args self<block_start>load_args=self.get("load_args" default=<none> type=<none>)<if_stmt>load_args<is><none><block_start><return>{}<block_end><if_stmt>isinstance(load_args str)<block_start><try_stmt><block_start>load_args=dict(kv.split("=" 1)<for>kv load_args.split(';'))<block_end><except_stmt>ValueError<block_start>self.raise_error(argument="load_args" description="'load_args' string value is not a "<concat>"semicolon-separated list of name=hash pairs")<block_end><block_end><if_stmt><not>isinstance(load_args dict)<block_start>self.raise_error(argument="load_args" description="'load_args' should be either a JSON object with "<concat>"argument hashes or a semicolon-separated list "<concat>"of name=hash pairs")<block_end><return>load_args<block_end><def_stmt>get_viewport self wait=<none><block_start>viewport=self.get("viewport" defaults.VIEWPORT_SIZE)<if_stmt>viewport<eq>'full'<block_start><if_stmt>wait<eq>0<block_start>self.raise_error("viewport" "Pass non-zero 'wait' to render full webpage")<block_end><block_end><else_stmt><block_start><try_stmt><block_start>validate_size_str(viewport)<block_end><except_stmt>ValueError<as>e<block_start>self.raise_error("viewport" str(e))<block_end><block_end><return>viewport<block_end><def_stmt>get_filters self pool=<none> adblock_rules=<none><block_start>filter_names=self.get('filters' '')<line_sep>filter_names=[f<for>f filter_names.split(',')<if>f]<if_stmt>pool<is><none><and>adblock_rules<is><none># skip validation <block_start><return>filter_names<block_end><if_stmt><not>filter_names<block_start><return>filter_names<block_end><if_stmt>pool<is><not><none><block_start>adblock_rules=pool.network_manager_factory.adblock_rules<if_stmt>adblock_rules<is><none><block_start>self.raise_error("filters" "Invalid filter names: %s"%(filter_names ))<block_end><block_end><if_stmt>adblock_rules<is><not><none><block_start>unknown_filters=adblock_rules.get_unknown_filters(filter_names)<if_stmt>unknown_filters<block_start>self.raise_error("filters" "Invalid filter names: %s"%(unknown_filters ))<block_end><block_end><return>filter_names<block_end><def_stmt>get_allowed_domains self<block_start>allowed_domains=self.get("allowed_domains" default=<none>)<if_stmt>allowed_domains<is><not><none><block_start><return>allowed_domains.split(',')<block_end><block_end><def_stmt>get_allowed_content_types self<block_start>content_types=self.get("allowed_content_types" default=['*'])<if_stmt>isinstance(content_types str)<block_start>content_types=list(filter(<none> content_types.split(',')))<block_end><return>content_types<block_end><def_stmt>get_forbidden_content_types self<block_start>content_types=self.get("forbidden_content_types" default=[])<if_stmt>isinstance(content_types str)<block_start>content_types=list(filter(<none> content_types.split(',')))<block_end><return>content_types<block_end><def_stmt>get_html5_media self<block_start><return>self._get_bool("html5_media" defaults.HTML5_MEDIA_ENABLED)<block_end><def_stmt>get_engine self browser_engines_enabled=<none><block_start>engine=self.get("engine" default="webkit" type=str)<if_stmt>engine<not><in>{"webkit" "chromium"}<block_start>self.raise_error("engine" "Unknown render engine {}".format(engine))<block_end><if_stmt>browser_engines_enabled<is><not><none><block_start><if_stmt>engine<not><in>browser_engines_enabled<block_start>self.raise_error("engine" "Disabled render engine {}".format(engine))<block_end><block_end><return>engine<block_end><def_stmt>get_http2 self<block_start>engine=self.get_engine()<if_stmt>self.get_engine()<eq>"webkit"<block_start>default=defaults.WEBKIT_HTTP2_ENABLED<block_end><else_stmt><block_start><assert_stmt>engine<eq>'chromium'<line_sep>default=defaults.CHROMIUM_HTTP2_ENABLED<block_end><return>self._get_bool("http2" default)<block_end><def_stmt>get_common_params self js_profiles_path<block_start>wait=self.get_wait()<line_sep><return>{'url':self.get_url() 'baseurl':self.get_baseurl() 'wait':wait 'resource_timeout':self.get_resource_timeout() 'viewport':self.get_viewport(wait) 'render_all':self.get_render_all(wait) 'images':self.get_images() 'headers':self.get_headers() 'proxy':self.get_proxy() 'js_profile':self.get_js_profile(js_profiles_path) 'js_source':self.get_js_source() 'http_method':self.get_http_method() 'body':self.get_body() 'html5_media':self.get_html5_media() 'http2':self.get_http2() # 'lua': self.get_lua(), }<block_end><def_stmt>get_image_params self<block_start><return>{'width':self.get_width() 'height':self.get_height() 'scale_method':self.get_scale_method()}<block_end><def_stmt>get_png_params self<block_start><return>self.get_image_params()<block_end><def_stmt>get_jpeg_params self<block_start>params={'quality':self.get_quality()}<line_sep>params.update(self.get_image_params())<line_sep><return>params<block_end><def_stmt>get_include_params self<block_start><return>dict(html=self._get_bool("html" defaults.DO_HTML) iframes=self._get_bool("iframes" defaults.DO_IFRAMES) png=self._get_bool("png" defaults.DO_PNG) jpeg=self._get_bool("jpeg" defaults.DO_JPEG) script=self._get_bool("script" defaults.SHOW_SCRIPT) console=self._get_bool("console" defaults.SHOW_CONSOLE) history=self._get_bool("history" defaults.SHOW_HISTORY) har=self._get_bool("har" defaults.SHOW_HAR) )<block_end><block_end><def_stmt>validate_size_str size_str<block_start>""" Validate size string in WxH format. Can be used to validate both viewport and window size strings. Does not special-case ``'full'`` viewport. Raises ``ValueError`` if anything goes wrong. :param size_str: string to validate """<line_sep>max_width=defaults.VIEWPORT_MAX_WIDTH<line_sep>max_heigth=defaults.VIEWPORT_MAX_HEIGTH<line_sep>max_area=defaults.VIEWPORT_MAX_AREA<try_stmt><block_start>w,h=map(int size_str.split('x'))<block_end><except_stmt>ValueError<block_start><raise>ValueError("Invalid viewport format: %s"%size_str)<block_end><else_stmt><block_start><if_stmt><not>((0<l>w<le>max_width)<and>(0<l>h<le>max_heigth)<and>(w<times>h<l>max_area))<block_start><raise>ValueError("Viewport (%dx%d, area=%d) is out of range (%dx%d, area=%d)"%(w h w<times>h max_width max_heigth max_area))<block_end><block_end><block_end>
# Set up configuration variables __all__=['custom_viewer' 'qglue' 'test']<import_stmt>os<import_stmt>sys<import_from_stmt>pkg_resources get_distribution DistributionNotFound<try_stmt><block_start>__version__=get_distribution('glue-core').version<block_end><except_stmt>DistributionNotFound<block_start>__version__='undefined'<block_end><import_from_stmt>._mpl_backend MatplotlibBackendSetter<line_sep>sys.meta_path.append(MatplotlibBackendSetter())<import_from_stmt>glue.viewers.custom.helper custom_viewer<line_sep># Load user's configuration file <import_from_stmt>.config load_configuration<line_sep>env=load_configuration()<import_from_stmt>.qglue qglue<import_from_stmt>.main load_plugins# noqa <def_stmt>test no_optional_skip=<false><block_start><import_from_stmt>pytest main<line_sep>root=os.path.abspath(os.path.dirname(__file__))<line_sep>args=[root '-x']<if_stmt>no_optional_skip<block_start>args.append('--no-optional-skip')<block_end><return>main(args=args)<block_end><import_from_stmt>glue._settings_helpers load_settings<line_sep>load_settings()<line_sep># In PyQt 5.5+, PyQt overrides the default exception catching and fatally # crashes the Qt application without printing out any details about the error. # Below we revert the exception hook to the original Python one. Note that we # can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect # the default excepthook is in place and override it. <def_stmt>handle_exception exc_type exc_value exc_traceback<block_start>sys.__excepthook__(exc_type exc_value exc_traceback)<block_end>sys.excepthook=handle_exception<line_sep>
<import_stmt>requests<import_from_stmt>allauth.socialaccount.providers.oauth2.views OAuth2Adapter OAuth2CallbackView OAuth2LoginView <import_from_stmt>.provider DropboxOAuth2Provider<class_stmt>DropboxOAuth2Adapter(OAuth2Adapter)<block_start>provider_id=DropboxOAuth2Provider.id<line_sep>access_token_url="https://api.dropbox.com/oauth2/token"<line_sep>authorize_url="https://www.dropbox.com/oauth2/authorize"<line_sep>profile_url="https://api.dropbox.com/2/users/get_current_account"<line_sep>redirect_uri_protocol="https"<def_stmt>complete_login self request app token **kwargs<block_start>response=requests.post(self.profile_url headers={"Authorization":"Bearer %s"%(token.token )} )<line_sep>response.raise_for_status()<line_sep><return>self.get_provider().sociallogin_from_response(request response.json())<block_end><block_end>oauth_login=OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)<line_sep>oauth_callback=OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)<line_sep>
""" 3D visualization primitives based on Plotly. We might want to instead use a more powerful library like Open3D. Plotly however supports animations, buttons and sliders. 1) Initialize a figure with `fig = init_figure()` 2) Plot points, cameras, lines, or create a slider animation. 3) Call `fig.show()` to render the figure. """<import_stmt>plotly.graph_objects<as>go<import_stmt>numpy<as>np<import_from_stmt>..pixlib.geometry.utils to_homogeneous<def_stmt>init_figure height=800<block_start>"""Initialize a 3D figure."""<line_sep>fig=go.Figure()<line_sep>fig.update_layout(height=height scene_camera=dict(eye=dict(x=0. y=-.1 z=-2) up=dict(x=0 y=-1. z=0)) scene=dict(xaxis=dict(showbackground=<false>) yaxis=dict(showbackground=<false>) aspectmode='data' dragmode='orbit') margin=dict(l=0 r=0 b=0 t=0 pad=0))<line_sep># noqa E741 <return>fig<block_end><def_stmt>plot_points fig pts color='rgba(255, 0, 0, 1)' ps=2<block_start>"""Plot a set of 3D points."""<line_sep>x,y,z=pts.T<line_sep>tr=go.Scatter3d(x=x y=y z=z mode='markers' marker_size=ps marker_color=color marker_line_width=.2)<line_sep>fig.add_trace(tr)<block_end><def_stmt>plot_camera fig R t K color='rgb(0, 0, 255)'<block_start>"""Plot a camera as a cone with camera frustum."""<line_sep>x,y,z=t<line_sep>u,v,[email protected]([0 0 1])<line_sep>tr=go.Cone(x=[x] y=[y] z=[z] u=[u] v=[v] w=[w] anchor='tip' showscale=<false> colorscale=[[0 color] [1 color]] sizemode='absolute')<line_sep>fig.add_trace(tr)<line_sep>W,H=K[0 2]<times>2 K[1 2]<times>2<line_sep>corners=np.array([[0 0] [W 0] [W H] [0 H] [0 0]])<line_sep>corners=to_homogeneous(corners)@np.linalg.inv(K).T<line_sep>corners=(corners/2)@R.T+t<line_sep>x,y,z=corners.T<line_sep>tr=go.Scatter3d(x=x y=y z=z line=dict(color='rgba(0, 0, 0, .5)') marker=dict(size=0.0001) showlegend=<false>)<line_sep>fig.add_trace(tr)<block_end><def_stmt>create_slider_animation fig traces<block_start>"""Create a slider that animates a list of traces (e.g. 3D points)."""<line_sep>slider={'steps':[]}<line_sep>frames=[]<line_sep>fig.add_trace(traces[0])<line_sep>idx=len(fig.data)-1<for_stmt>i,tr enumerate(traces)<block_start>frames.append(go.Frame(name=str(i) traces=[idx] data=[tr]))<line_sep>step={"args":[[str(i)] {"frame":{"redraw":<true>} "mode":"immediate"}] "label":i "method":"animate"}<line_sep>slider['steps'].append(step)<block_end>fig.frames=tuple(frames)<line_sep>fig.layout.sliders=(slider )<block_end>
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>parameterized parameterized<import_from_stmt>monai.networks eval_mode<import_from_stmt>monai.networks.blocks SubpixelUpsample<import_from_stmt>monai.networks.layers.factories Conv<line_sep>TEST_CASE_SUBPIXEL=[]<for_stmt>inch range(1 5)<block_start><for_stmt>dim range(1 4)<block_start><for_stmt>factor range(1 3)<block_start>test_case=[{"dimensions":dim "in_channels":inch "scale_factor":factor} (2 inch *([8]<times>dim)) (2 inch *([8<times>factor]<times>dim)) ]<line_sep>TEST_CASE_SUBPIXEL.append(test_case)<block_end><block_end><block_end>TEST_CASE_SUBPIXEL_2D_EXTRA=[{"dimensions":2 "in_channels":2 "scale_factor":3} (2 2 8 4) # different size for H and W (2 2 24 12) ]<line_sep>TEST_CASE_SUBPIXEL_3D_EXTRA=[{"dimensions":3 "in_channels":1 "scale_factor":2} (2 1 16 8 4) # different size for H, W and D (2 1 32 16 8) ]<line_sep>conv_block=nn.Sequential(Conv[Conv.CONV 3](1 4 kernel_size=1) Conv[Conv.CONV 3](4 8 kernel_size=3 stride=1 padding=1))<line_sep>TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA=[{"dimensions":3 "in_channels":1 "scale_factor":2 "conv_block":conv_block} (2 1 16 8 4) # different size for H, W and D (2 1 32 16 8) ]<line_sep>TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_2D_EXTRA)<line_sep>TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_3D_EXTRA)<line_sep>TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA)<line_sep># add every test back with the pad/pool sequential component omitted <for_stmt>tests list(TEST_CASE_SUBPIXEL)<block_start>args:dict=tests[0]# type: ignore args=dict(args)<line_sep>args["apply_pad_pool"]=<false><line_sep>TEST_CASE_SUBPIXEL.append([args tests[1] tests[2]])<block_end><class_stmt>TestSUBPIXEL(unittest.TestCase)<block_start>@parameterized.expand(TEST_CASE_SUBPIXEL)<def_stmt>test_subpixel_shape self input_param input_shape expected_shape<block_start>net=SubpixelUpsample(**input_param)<with_stmt>eval_mode(net)<block_start>result=net.forward(torch.randn(input_shape))<line_sep>self.assertEqual(result.shape expected_shape)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2018-2019, <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.autograd Function Variable<import_from_stmt>torch.nn Module<def_stmt>check_type var t name<block_start><if_stmt>var.dtype<is><not>t<block_start><raise>TypeError("{} must be {}".format(name t))<block_end><block_end><def_stmt>check_contiguous var name<block_start><if_stmt><not>var.is_contiguous()<block_start><raise>ValueError("{} must be contiguous".format(name))<block_end><block_end><def_stmt>check_dim var dim name<block_start><if_stmt>len(var.shape)<ne>dim<block_start><raise>ValueError("{} must be {}D".format(name dim))<block_end><block_end><def_stmt>certify_inputs log_probs labels lengths label_lengths# check_type(log_probs, torch.float32, "log_probs") <block_start>check_type(labels torch.int32 "labels")<line_sep>check_type(label_lengths torch.int32 "label_lengths")<line_sep>check_type(lengths torch.int32 "lengths")<line_sep>check_contiguous(log_probs "log_probs")<line_sep>check_contiguous(labels "labels")<line_sep>check_contiguous(label_lengths "label_lengths")<line_sep>check_contiguous(lengths "lengths")<if_stmt>lengths.shape[0]<ne>log_probs.shape[0]<block_start><raise>ValueError(f"Must have a length per example. "<concat>f"Given lengths dim: {lengths.shape[0]}, "<concat>f"Log probs dim : {log_probs.shape[0]}")<block_end><if_stmt>label_lengths.shape[0]<ne>log_probs.shape[0]<block_start><raise>ValueError("Must have a label length per example. "<concat>f"Given label lengths dim : {label_lengths.shape[0]}, "<concat>f"Log probs dim : {log_probs.shape[0]}")<block_end>check_dim(log_probs 4 "log_probs")<line_sep>check_dim(labels 2 "labels")<line_sep>check_dim(lengths 1 "lenghts")<line_sep>check_dim(label_lengths 1 "label_lenghts")<line_sep>max_T=torch.max(lengths)<line_sep>max_U=torch.max(label_lengths)<line_sep>T,U=log_probs.shape[1:3]<if_stmt>T<ne>max_T<block_start><raise>ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")<block_end><if_stmt>U<ne>max_U+1<block_start><raise>ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")<block_end><block_end><def_stmt>_assert_no_grad tensor<block_start><assert_stmt><not>tensor.requires_grad ("gradients only computed for log_probs - please "<concat>"mark other tensors as not requiring gradients")<block_end><def_stmt>forward_pass log_probs labels blank<block_start>""" Computes probability of the forward variable alpha. Args: log_probs: Tensor of shape [T, U, V+1] labels: Labels of shape [B, U] blank: Index of the blank token. Returns: A tuple of the forward variable probabilities - alpha of shape [T, U] and the log likelihood of this forward step. """<line_sep>T,U,_=log_probs.shape<line_sep>alphas=np.zeros((T U) dtype='f')<for_stmt>t range(1 T)<block_start>alphas[t 0]=alphas[t-1 0]+log_probs[t-1 0 blank]<block_end><for_stmt>u range(1 U)<block_start>alphas[0 u]=alphas[0 u-1]+log_probs[0 u-1 labels[u-1]]<block_end><for_stmt>t range(1 T)<block_start><for_stmt>u range(1 U)<block_start>no_emit=alphas[t-1 u]+log_probs[t-1 u blank]<line_sep>emit=alphas[t u-1]+log_probs[t u-1 labels[u-1]]<line_sep>alphas[t u]=np.logaddexp(emit no_emit)<block_end><block_end>loglike=alphas[T-1 U-1]+log_probs[T-1 U-1 blank]<line_sep><return>alphas loglike<block_end><def_stmt>backward_pass log_probs labels blank<block_start>""" Computes probability of the backward variable beta. Args: log_probs: Tensor of shape [T, U, V+1] labels: Labels of shape [B, U] blank: Index of the blank token. Returns: A tuple of the backward variable probabilities - beta of shape [T, U] and the log likelihood of this backward step. """<line_sep>T,U,_=log_probs.shape<line_sep>betas=np.zeros((T U) dtype='f')<line_sep>betas[T-1 U-1]=log_probs[T-1 U-1 blank]<for_stmt>t reversed(range(T-1))<block_start>betas[t U-1]=betas[t+1 U-1]+log_probs[t U-1 blank]<block_end><for_stmt>u reversed(range(U-1))<block_start>betas[T-1 u]=betas[T-1 u+1]+log_probs[T-1 u labels[u]]<block_end><for_stmt>t reversed(range(T-1))<block_start><for_stmt>u reversed(range(U-1))<block_start>no_emit=betas[t+1 u]+log_probs[t u blank]<line_sep>emit=betas[t u+1]+log_probs[t u labels[u]]<line_sep>betas[t u]=np.logaddexp(emit no_emit)<block_end><block_end><return>betas betas[0 0]<block_end><def_stmt>compute_gradient log_probs alphas betas labels blank fastemit_lambda<block_start>""" Computes the gradients of the log_probs with respect to the log probability of this step occuring. Args: Args: log_probs: Tensor of shape [T, U, V+1] alphas: Tensor of shape [T, U] which represents the forward variable. betas: Tensor of shape [T, U] which represents the backward variable. labels: Labels of shape [B, U] blank: Index of the blank token. Returns: Gradients of shape [T, U, V+1] with respect to the forward log probability """<line_sep>T,U,_=log_probs.shape<line_sep>grads=np.full(log_probs.shape -float("inf"))<line_sep>log_like=betas[0 0]# == alphas[T - 1, U - 1] + betas[T - 1, U - 1] # // grad to last blank transition grads[T-1 U-1 blank]=alphas[T-1 U-1]<line_sep>grads[:T-1 : blank]=alphas[:T-1 :]+betas[1: :]<line_sep># // grad to label transition <for_stmt>u,l enumerate(labels)<block_start>grads[: u l]=alphas[: u]+betas[: u+1]<block_end>grads=-np.exp(grads+log_probs-log_like)<if_stmt>fastemit_lambda<g>0.0<block_start><for_stmt>u,l enumerate(labels)<block_start>grads[: u l]=(1.0+fastemit_lambda)<times>grads[: u l]<block_end><block_end><return>grads<block_end><def_stmt>fastemit_regularization log_probs labels alphas betas blank fastemit_lambda<block_start>""" Describes the computation of FastEmit regularization from the paper - [FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148) Args: log_probs: Tensor of shape [T, U, V+1] labels: Unused. Labels of shape [B, U] alphas: Tensor of shape [T, U] which represents the forward variable. betas: Unused. Tensor of shape [T, U] which represents the backward variable. blank: Index of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: The regularized negative log likelihood - lambda * P˜(At, u|x) """<line_sep># General calculation of the fastemit regularization alignments T,U,_=log_probs.shape<line_sep># alignment = np.zeros((T, U), dtype='float32') # # for t in range(0, T): # alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1] # # for t in range(0, T): # for u in range(0, U - 1): # emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1] # alignment[t, u] = emit # reg = fastemit_lambda * (alignment[T - 1, U - 1]) # The above is equivalent to below, without need of computing above # reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1]) # The above is also equivalent to below, without need of computing the betas alignment matrix reg=fastemit_lambda<times>(alphas[T-1 U-1]+log_probs[T-1 U-1 blank])<line_sep><return>-reg<block_end><def_stmt>transduce log_probs labels blank=0 fastemit_lambda=0.0<block_start>""" Args: log_probs: 3D array with shape [input len, output len + 1, vocab size] labels: 1D array with shape [output time steps] blank: Index of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: float: The negative log-likelihood 3D array: Gradients with respect to the unnormalized input actications 2d arrays: Alphas matrix (TxU) 2d array: Betas matrix (TxU) """<line_sep>alphas,ll_forward=forward_pass(log_probs labels blank)<line_sep>betas,ll_backward=backward_pass(log_probs labels blank)<line_sep>grads=compute_gradient(log_probs alphas betas labels blank fastemit_lambda)<line_sep><return>-ll_forward grads alphas betas<block_end><def_stmt>transduce_batch log_probs labels flen glen blank=0 fastemit_lambda=0.0<block_start>""" Compute the transducer loss of the batch. Args: log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax. labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning. flen: Length vector of the acoustic sequence. glen: Length vector of the target sequence. blank: Id of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix. """<line_sep>grads=np.zeros_like(log_probs)<line_sep>costs=[]<for_stmt>b range(log_probs.shape[0])<block_start>t=int(flen[b])<line_sep>u=int(glen[b])+1<line_sep>ll,g,alphas,betas=transduce(log_probs[b :t :u :] labels[b :u-1] blank fastemit_lambda)<line_sep>grads[b :t :u :]=g<line_sep>reg=fastemit_regularization(log_probs[b :t :u :] labels[b :u-1] alphas betas blank fastemit_lambda)<line_sep>ll<augadd>reg<line_sep>costs.append(ll)<block_end><return>costs grads<block_end><class_stmt>_RNNT(Function)<block_start>@staticmethod<def_stmt>forward ctx acts labels act_lens label_lens blank fastemit_lambda<block_start>costs,grads=transduce_batch(acts.detach().cpu().numpy() labels.cpu().numpy() act_lens.cpu().numpy() label_lens.cpu().numpy() blank fastemit_lambda )<line_sep>costs=torch.FloatTensor([sum(costs)])<line_sep>grads=torch.Tensor(grads).to(acts)<line_sep>ctx.grads=grads<line_sep><return>costs<block_end>@staticmethod<def_stmt>backward ctx grad_output<block_start><return>ctx.grads <none> <none> <none> <none> <none><block_end><block_end><class_stmt>RNNTLoss(Module)<block_start>""" Parameters: `blank_label` (int): default 0 - label index of blank token fastemit_lambda: Float scaling factor for FastEmit regularization. """<def_stmt>__init__ self blank:int=0 fastemit_lambda:float=0.0<block_start>super(RNNTLoss self).__init__()<line_sep>self.blank=blank<line_sep>self.fastemit_lambda=fastemit_lambda<line_sep>self.rnnt=_RNNT.apply<block_end><def_stmt>forward self acts labels act_lens label_lens<block_start><assert_stmt>len(labels.size())<eq>2<line_sep>_assert_no_grad(labels)<line_sep>_assert_no_grad(act_lens)<line_sep>_assert_no_grad(label_lens)<line_sep>certify_inputs(acts labels act_lens label_lens)<line_sep>acts=torch.nn.functional.log_softmax(acts -1)<line_sep><return>self.rnnt(acts labels act_lens label_lens self.blank self.fastemit_lambda)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>loss=RNNTLoss(fastemit_lambda=0.01)<line_sep>torch.manual_seed(0)<line_sep>acts=torch.randn(1 2 5 3)<line_sep>labels=torch.tensor([[0 2 1 2]] dtype=torch.int32)<line_sep>act_lens=torch.tensor([2] dtype=torch.int32)<line_sep>label_lens=torch.tensor([len(labels[0])] dtype=torch.int32)<line_sep>loss_val=loss(acts labels act_lens label_lens)<block_end>
#coding:utf8 ''' Created on 2013-8-21 @author: lan (www.9miao.com) '''<import_stmt>itertools<import_stmt>datetime<def_stmt>safeunicode obj encoding='utf-8'<block_start>r""" Converts any given object to unicode string. >>> safeunicode('hello') u'hello' >>> safeunicode(2) u'2' >>> safeunicode('\xe1\x88\xb4') u'\u1234' """<line_sep>t=type(obj)<if_stmt>t<is>unicode<block_start><return>obj<block_end><elif_stmt>t<is>str<block_start><return>obj.decode(encoding)<block_end><elif_stmt>t<in>[int float bool]<block_start><return>unicode(obj)<block_end><elif_stmt>hasattr(obj '__unicode__')<or>isinstance(obj unicode)<block_start><return>unicode(obj)<block_end><else_stmt><block_start><return>str(obj).decode(encoding)<block_end><block_end><def_stmt>safestr obj encoding='utf-8'<block_start>r""" Converts any given object to utf-8 encoded string. >>> safestr('hello') 'hello' >>> safestr(u'\u1234') '\xe1\x88\xb4' >>> safestr(2) '2' """<if_stmt>isinstance(obj unicode)<block_start><return>obj.encode(encoding)<block_end><elif_stmt>isinstance(obj str)<block_start><return>obj<block_end><elif_stmt>hasattr(obj 'next')# iterator <block_start><return>itertools.imap(safestr obj)<block_end><else_stmt><block_start><return>str(obj)<block_end><block_end><def_stmt>sqlify obj<block_start>""" converts `obj` to its proper SQL version >>> sqlify(None) 'NULL' >>> sqlify(True) "'t'" >>> sqlify(3) '3' """<line_sep># because `1 == True and hash(1) == hash(True)` # we have to do this the hard way... <if_stmt>obj<is><none><block_start><return>'NULL'<block_end><elif_stmt>obj<is><true><block_start><return>"'t'"<block_end><elif_stmt>obj<is><false><block_start><return>"'f'"<block_end><elif_stmt>datetime<and>isinstance(obj datetime.datetime)<block_start><return>repr(obj.isoformat())<block_end><else_stmt><block_start><if_stmt>isinstance(obj unicode)<block_start>obj=obj.encode('utf8')<block_end><return>repr(obj)<block_end><block_end><def_stmt>sqllist lst<block_start>""" Converts the arguments for use in something like a WHERE clause. >>> sqllist(['a', 'b']) 'a, b' >>> sqllist('a') 'a' >>> sqllist(u'abc') u'abc' """<if_stmt>isinstance(lst basestring)<block_start><return>lst<block_end><else_stmt><block_start><return>', '.join(lst)<block_end><block_end><def_stmt>_sqllist values<block_start>""" >>> _sqllist([1, 2, 3]) <sql: '(1, 2, 3)'> """<line_sep>items=[]<line_sep>items.append('(')<for_stmt>i,v enumerate(values)<block_start><if_stmt>i<ne>0<block_start>items.append(', ')<block_end>items.append(sqlparam(v))<block_end>items.append(')')<line_sep><return>SQLQuery(items)<block_end><def_stmt>sqlquote a<block_start>""" Ensures `a` is quoted properly for use in a SQL query. >>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3) <sql: "WHERE x = 't' AND y = 3"> >>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3]) <sql: "WHERE x = 't' AND y IN (2, 3)"> """<if_stmt>isinstance(a list)<block_start><return>_sqllist(a)<block_end><else_stmt><block_start><return>sqlparam(a).sqlquery()<block_end><block_end><def_stmt>_interpolate sformat<block_start>""" Takes a format string and returns a list of 2-tuples of the form (boolean, string) where boolean says whether string should be evaled or not. from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee) """<import_from_stmt>tokenize tokenprog<line_sep>tokenprog=tokenprog<def_stmt>matchorfail text pos<block_start>match=tokenprog.match(text pos)<if_stmt>match<is><none><block_start><raise>_ItplError(text pos)<block_end><return>match match.end()<block_end>namechars="abcdefghijklmnopqrstuvwxyz"<concat>"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"<line_sep>chunks=[]<line_sep>pos=0<while_stmt>1<block_start>dollar=sformat.find("$" pos)<if_stmt>dollar<l>0<block_start><break><block_end>nextchar=sformat[dollar+1]<if_stmt>nextchar<eq>"{"<block_start>chunks.append((0 sformat[pos:dollar]))<line_sep>pos,level=dollar+2 1<while_stmt>level<block_start>match,pos=matchorfail(sformat pos)<line_sep>tstart,tend=match.regs[3]<line_sep>token=sformat[tstart:tend]<if_stmt>token<eq>"{"<block_start>level=level+1<block_end><elif_stmt>token<eq>"}"<block_start>level=level-1<block_end><block_end>chunks.append((1 sformat[dollar+2:pos-1]))<block_end><elif_stmt>nextchar<in>namechars<block_start>chunks.append((0 sformat[pos:dollar]))<line_sep>match,pos=matchorfail(sformat dollar+1)<while_stmt>pos<l>len(sformat)<block_start><if_stmt>sformat[pos]<eq>"."<and>pos+1<l>len(sformat)<and>sformat[pos+1]<in>namechars<block_start>match,pos=matchorfail(sformat pos+1)<block_end><elif_stmt>sformat[pos]<in>"(["<block_start>pos,level=pos+1 1<while_stmt>level<block_start>match,pos=matchorfail(sformat pos)<line_sep>tstart,tend=match.regs[3]<line_sep>token=sformat[tstart:tend]<if_stmt>token[0]<in>"(["<block_start>level=level+1<block_end><elif_stmt>token[0]<in>")]"<block_start>level=level-1<block_end><block_end><block_end><else_stmt><block_start><break><block_end><block_end>chunks.append((1 sformat[dollar+1:pos]))<block_end><else_stmt><block_start>chunks.append((0 sformat[pos:dollar+1]))<line_sep>pos=dollar+1+(nextchar<eq>"$")<block_end><block_end><if_stmt>pos<l>len(sformat)<block_start>chunks.append((0 sformat[pos:]))<block_end><return>chunks<block_end><def_stmt>sqlwhere dictionary grouping=' AND '<block_start>""" Converts a `dictionary` to an SQL WHERE clause `SQLQuery`. >>> sqlwhere({'cust_id': 2, 'order_id':3}) <sql: 'order_id = 3 AND cust_id = 2'> >>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ') <sql: 'order_id = 3, cust_id = 2'> >>> sqlwhere({'a': 'a', 'b': 'b'}).query() 'a = %s AND b = %s' """<line_sep><return>SQLQuery.join([k+' = '+sqlparam(v)<for>k,v dictionary.items()] grouping)<block_end><def_stmt>reparam string_ dictionary<block_start>""" Takes a string and a dictionary and interpolates the string using values from the dictionary. Returns an `SQLQuery` for the result. >>> reparam("s = $s", dict(s=True)) <sql: "s = 't'"> >>> reparam("s IN $s", dict(s=[1, 2])) <sql: 's IN (1, 2)'> """<line_sep>dictionary=dictionary.copy()# eval mucks with it result=[]<for_stmt>live,chunk _interpolate(string_)<block_start><if_stmt>live<block_start>v=eval(chunk dictionary)<line_sep>result.append(sqlquote(v))<block_end><else_stmt><block_start>result.append(chunk)<block_end><block_end><return>SQLQuery.join(result '')<block_end><class_stmt>UnknownParamstyle(Exception)<block_start>""" raised for unsupported db paramstyles (currently supported: qmark, numeric, format, pyformat) """<line_sep><pass><block_end><class_stmt>_ItplError(ValueError)<block_start><def_stmt>__init__ self text pos<block_start>ValueError.__init__(self)<line_sep>self.text=text<line_sep>self.pos=pos<block_end><def_stmt>__str__ self<block_start><return>"unfinished expression in %s at char %d"%(repr(self.text) self.pos)<block_end><block_end><class_stmt>SQLParam(object)<block_start>""" Parameter in SQLQuery. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")]) >>> q <sql: "SELECT * FROM test WHERE name='joe'"> >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.values() ['joe'] """<line_sep>__slots__=["value"]<def_stmt>__init__ self value<block_start>self.value=value<block_end><def_stmt>get_marker self paramstyle='pyformat'<block_start><if_stmt>paramstyle<eq>'qmark'<block_start><return>'?'<block_end><elif_stmt>paramstyle<eq>'numeric'<block_start><return>':1'<block_end><elif_stmt>paramstyle<is><none><or>paramstyle<in>['format' 'pyformat']<block_start><return>'%s'<block_end><raise>UnknownParamstyle paramstyle<block_end><def_stmt>sqlquery self<block_start><return>SQLQuery([self])<block_end><def_stmt>__add__ self other<block_start><return>self.sqlquery()+other<block_end><def_stmt>__radd__ self other<block_start><return>other+self.sqlquery()<block_end><def_stmt>__str__ self<block_start><return>str(self.value)<block_end><def_stmt>__repr__ self<block_start><return>'<param: %s>'%repr(self.value)<block_end><block_end>sqlparam=SQLParam<class_stmt>SQLQuery(object)<block_start>""" You can pass this sort of thing as a clause in any db function. Otherwise, you can pass a dictionary to the keyword argument `vars` and the function will call reparam for you. Internally, consists of `items`, which is a list of strings and SQLParams, which get concatenated to produce the actual query. """<line_sep>__slots__=["items"]<line_sep># tested in sqlquote's docstring <def_stmt>__init__ self items=<none><block_start>r"""Creates a new SQLQuery. >>> SQLQuery("x") <sql: 'x'> >>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)]) >>> q <sql: 'SELECT * FROM test WHERE x=1'> >>> q.query(), q.values() ('SELECT * FROM test WHERE x=%s', [1]) >>> SQLQuery(SQLParam(1)) <sql: '1'> """<if_stmt>items<is><none><block_start>self.items=[]<block_end><elif_stmt>isinstance(items list)<block_start>self.items=items<block_end><elif_stmt>isinstance(items SQLParam)<block_start>self.items=[items]<block_end><elif_stmt>isinstance(items SQLQuery)<block_start>self.items=list(items.items)<block_end><else_stmt><block_start>self.items=[items]<block_end># Take care of SQLLiterals <for_stmt>i,item enumerate(self.items)<block_start><if_stmt>isinstance(item SQLParam)<and>isinstance(item.value SQLLiteral)<block_start>self.items[i]=item.value.v<block_end><block_end><block_end><def_stmt>append self value<block_start>self.items.append(value)<block_end><def_stmt>__add__ self other<block_start><if_stmt>isinstance(other basestring)<block_start>items=[other]<block_end><elif_stmt>isinstance(other SQLQuery)<block_start>items=other.items<block_end><else_stmt><block_start><return>NotImplemented<block_end><return>SQLQuery(self.items+items)<block_end><def_stmt>__radd__ self other<block_start><if_stmt>isinstance(other basestring)<block_start>items=[other]<block_end><else_stmt><block_start><return>NotImplemented<block_end><return>SQLQuery(items+self.items)<block_end><def_stmt>__iadd__ self other<block_start><if_stmt>isinstance(other (basestring SQLParam))<block_start>self.items.append(other)<block_end><elif_stmt>isinstance(other SQLQuery)<block_start>self.items.extend(other.items)<block_end><else_stmt><block_start><return>NotImplemented<block_end><return>self<block_end><def_stmt>__len__ self<block_start><return>len(self.query())<block_end><def_stmt>query self paramstyle=<none><block_start>""" Returns the query part of the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.query(paramstyle='qmark') 'SELECT * FROM test WHERE name=?' """<line_sep>s=[]<for_stmt>x self.items<block_start><if_stmt>isinstance(x SQLParam)<block_start>x=x.get_marker(paramstyle)<line_sep>s.append(safestr(x))<block_end><else_stmt><block_start>x=safestr(x)<line_sep># automatically escape % characters in the query # For backward compatability, ignore escaping when the query looks already escaped <if_stmt>paramstyle<in>['format' 'pyformat']<block_start><if_stmt>'%'<in>x<and>'%%'<not><in>x<block_start>x=x.replace('%' '%%')<block_end><block_end>s.append(x)<block_end><block_end><return>"".join(s)<block_end><def_stmt>values self<block_start>""" Returns the values of the parameters used in the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.values() ['joe'] """<line_sep><return>[i.value<for>i self.items<if>isinstance(i SQLParam)]<block_end><def_stmt>join items sep=' ' prefix=<none> suffix=<none> target=<none><block_start>""" Joins multiple queries. >>> SQLQuery.join(['a', 'b'], ', ') <sql: 'a, b'> Optinally, prefix and suffix arguments can be provided. >>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')') <sql: '(a, b)'> If target argument is provided, the items are appended to target instead of creating a new SQLQuery. """<if_stmt>target<is><none><block_start>target=SQLQuery()<block_end>target_items=target.items<if_stmt>prefix<block_start>target_items.append(prefix)<block_end><for_stmt>i,item enumerate(items)<block_start><if_stmt>i<ne>0<block_start>target_items.append(sep)<block_end><if_stmt>isinstance(item SQLQuery)<block_start>target_items.extend(item.items)<block_end><else_stmt><block_start>target_items.append(item)<block_end><block_end><if_stmt>suffix<block_start>target_items.append(suffix)<block_end><return>target<block_end>join=staticmethod(join)<def_stmt>_str self<block_start><try_stmt><block_start><return>self.query()%tuple([sqlify(x)<for>x self.values()])<block_end><except_stmt>(ValueError TypeError)<block_start><return>self.query()<block_end><block_end><def_stmt>__str__ self<block_start><return>safestr(self._str())<block_end><def_stmt>__unicode__ self<block_start><return>safeunicode(self._str())<block_end><def_stmt>__repr__ self<block_start><return>'<sql: %s>'%repr(str(self))<block_end><block_end><class_stmt>SQLLiteral<block_start>""" Protects a string from `sqlquote`. >>> sqlquote('NOW()') <sql: "'NOW()'"> >>> sqlquote(SQLLiteral('NOW()')) <sql: 'NOW()'> """<def_stmt>__init__ self v<block_start>self.v=v<block_end><def_stmt>__repr__ self<block_start><return>self.v<block_end><block_end><class_stmt>SQLProducer<block_start>"""Database"""<def_stmt>__init__ self<block_start>"""Creates a database. """<line_sep><pass><block_end><def_stmt>query self sql_query processed=<false> svars=<none><block_start>""" Execute SQL query `sql_query` using dictionary `vars` to interpolate it. If `processed=True`, `vars` is a `reparam`-style list to use instead of interpolating. >>> db = DB(None, {}) >>> db.query("SELECT * FROM foo", _test=True) <sql: 'SELECT * FROM foo'> >>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> >>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> """<if_stmt>svars<is><none><block_start>svars={}<block_end><if_stmt><not>processed<and><not>isinstance(sql_query SQLQuery)<block_start>sql_query=reparam(sql_query svars)<block_end><return>sql_query<block_end><def_stmt>sql_clauses self what tables where group order limit offset<block_start><return>(('SELECT' what) ('FROM' sqllist(tables)) ('WHERE' where) ('GROUP BY' group) ('ORDER BY' order) ('LIMIT' limit) ('OFFSET' offset))<block_end><def_stmt>gen_clause self sql val svars<block_start><if_stmt>isinstance(val (int long))<block_start><if_stmt>sql<eq>'WHERE'<block_start>nout='id = '+sqlquote(val)<block_end><else_stmt><block_start>nout=SQLQuery(val)<block_end><block_end><elif_stmt>isinstance(val (list tuple))<and>len(val)<eq>2<block_start>nout=SQLQuery(val[0] val[1])# backwards-compatibility <block_end><elif_stmt>isinstance(val SQLQuery)<block_start>nout=val<block_end><else_stmt><block_start>nout=reparam(val svars)<block_end><def_stmt>xjoin a b<block_start><if_stmt>a<and>b<block_start><return>a+' '+b<block_end><else_stmt><block_start><return>a<or>b<block_end><block_end><return>xjoin(sql nout)<block_end><def_stmt>_where self where svars<block_start><if_stmt>isinstance(where (int long))<block_start>where="id = "+sqlparam(where)<block_end><elif_stmt>isinstance(where (list tuple))<and>len(where)<eq>2<block_start>where=SQLQuery(where[0] where[1])<block_end><elif_stmt>isinstance(where SQLQuery)<block_start><pass><block_end><else_stmt><block_start>where=reparam(where svars)<block_end><return>where<block_end><def_stmt>select self tables svars=<none> what='*' where=<none> order=<none> group=<none> limit=<none> offset=<none> _test=<false><block_start>""" Selects `what` from `tables` with clauses `where`, `order`, `group`, `limit`, and `offset`. Uses vars to interpolate. Otherwise, each clause can be a SQLQuery. >>> db = DB(None, {}) >>> db.select('foo', _test=True) <sql: 'SELECT * FROM foo'> >>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True) <sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'> """<if_stmt>svars<is><none><block_start>svars={}<block_end>sql_clauses=self.sql_clauses(what tables where group order limit offset)<line_sep>clauses=[self.gen_clause(sql val svars)<for>sql,val sql_clauses<if>val<is><not><none>]<line_sep>qout=SQLQuery.join(clauses)<if_stmt>_test<block_start><return>qout<block_end><return>self.query(qout processed=<true>)<block_end><def_stmt>insert self tablename seqname=<none> _test=<false> **values<block_start>""" Inserts `values` into `tablename`. Returns current sequence ID. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True) >>> q <sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())"> >>> q.query() 'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())' >>> q.values() [2, 'bob'] """<def_stmt>q x<block_start><return>"("+x+")"<block_end><if_stmt>values<block_start>_keys=SQLQuery.join(values.keys() ', ')<line_sep>_values=SQLQuery.join([sqlparam(v)<for>v values.values()] ', ')<line_sep>sql_query="INSERT INTO %s "%tablename+q(_keys)+' VALUES '+q(_values)<block_end><else_stmt><block_start>sql_query=SQLQuery(self._get_insert_default_values_query(tablename))<block_end><return>sql_query<block_end><def_stmt>_get_insert_default_values_query self table<block_start><return>"INSERT INTO %s DEFAULT VALUES"%table<block_end><def_stmt>multiple_insert self tablename values seqname=<none> _test=<false><block_start>""" Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries, one for each row to be inserted, each with the same set of keys. Returns the list of ids of the inserted rows. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> db.supports_multiple_insert = True >>> values = [{"name": "foo", "email": "<EMAIL>"}, {"name": "bar", "email": "<EMAIL>"}] >>> db.multiple_insert('person', values=values, _test=True) <sql: "INSERT INTO person (name, email) VALUES ('foo', '<EMAIL>'), ('bar', '<EMAIL>')"> """<if_stmt><not>values<block_start><return>[]<block_end><if_stmt><not>self.supports_multiple_insert<block_start>out=[self.insert(tablename seqname=seqname _test=_test **v)<for>v values]<if_stmt>seqname<is><false><block_start><return><none><block_end><else_stmt><block_start><return>out<block_end><block_end>keys=values[0].keys()<line_sep>#@@ make sure all keys are valid # make sure all rows have same keys. <for_stmt>v values<block_start><if_stmt>v.keys()<ne>keys<block_start><raise>ValueError 'Bad data'<block_end><block_end>sql_query=SQLQuery('INSERT INTO %s (%s) VALUES '%(tablename ', '.join(keys)))<for_stmt>i,row enumerate(values)<block_start><if_stmt>i<ne>0<block_start>sql_query.append(", ")<block_end>SQLQuery.join([SQLParam(row[k])<for>k keys] sep=", " target=sql_query prefix="(" suffix=")")<block_end><if_stmt>_test<block_start><return>sql_query<block_end>db_cursor=self._db_cursor()<if_stmt>seqname<is><not><false><block_start>sql_query=self._process_insert_query(sql_query tablename seqname)<block_end><if_stmt>isinstance(sql_query tuple)# for some databases, a separate query has to be made to find # the id of the inserted row. <block_start>q1,q2=sql_query<line_sep>self._db_execute(db_cursor q1)<line_sep>self._db_execute(db_cursor q2)<block_end><else_stmt><block_start>self._db_execute(db_cursor sql_query)<block_end><try_stmt><block_start>out=db_cursor.fetchone()[0]<line_sep>out=range(out-len(values)+1 out+1)<block_end><except_stmt>Exception<block_start>out=<none><block_end><if_stmt><not>self.ctx.transactions<block_start>self.ctx.commit()<block_end><return>out<block_end><def_stmt>update self tables where svars=<none> _test=<false> **values<block_start>""" Update `tables` with clause `where` (interpolated using `vars`) and setting `values`. >>> db = DB(None, {}) >>> name = 'Joseph' >>> q = db.update('foo', where='name = $name', name='bob', age=2, ... created=SQLLiteral('NOW()'), vars=locals(), _test=True) >>> q <sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'"> >>> q.query() 'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s' >>> q.values() [2, 'bob', 'Joseph'] """<if_stmt>svars<is><none><block_start>svars={}<block_end>where=self._where(where svars)<line_sep>query=("UPDATE "+sqllist(tables)+" SET "+sqlwhere(values ', ')+" WHERE "+where)<if_stmt>_test<block_start><return>query<block_end>db_cursor=self._db_cursor()<line_sep>self._db_execute(db_cursor query)<if_stmt><not>self.ctx.transactions<block_start>self.ctx.commit()<block_end><return>db_cursor.rowcount<block_end><def_stmt>delete self table where using=<none> svars=<none> _test=<false><block_start>""" Deletes from `table` with clauses `where` and `using`. >>> db = DB(None, {}) >>> name = 'Joe' >>> db.delete('foo', where='name = $name', vars=locals(), _test=True) <sql: "DELETE FROM foo WHERE name = 'Joe'"> """<if_stmt>svars<is><none><block_start>svars={}<block_end>where=self._where(where svars)<line_sep>q='DELETE FROM '+table<if_stmt>using<block_start>q<augadd>' USING '+sqllist(using)<block_end><if_stmt>where<block_start>q<augadd>' WHERE '+where<block_end><return>q<block_end><block_end>sqlproducer=SQLProducer()<line_sep>
# -*- coding: utf-8 -*- # Copyright (c) 2013 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. <import_stmt>numpy<import_stmt>scipy<import_stmt>unittest<import_stmt>time<import_from_stmt>nearpy Engine<import_from_stmt>nearpy.distances CosineDistance<import_from_stmt>nearpy.hashes RandomBinaryProjections HashPermutations HashPermutationMapper<def_stmt>example2 # Dimension of feature space <block_start>DIM=100<line_sep># Number of data points (dont do too much because of exact search) POINTS=20000<line_sep>########################################################## print('Performing indexing with HashPermutations...')<line_sep>t0=time.time()<line_sep># Create permutations meta-hash permutations=HashPermutations('permut')<line_sep># Create binary hash as child hash rbp_perm=RandomBinaryProjections('rbp_perm' 14)<line_sep>rbp_conf={'num_permutation':50 'beam_size':10 'num_neighbour':100}<line_sep># Add rbp as child hash of permutations hash permutations.add_child_hash(rbp_perm rbp_conf)<line_sep># Create engine engine_perm=Engine(DIM lshashes=[permutations] distance=CosineDistance())<line_sep># First index some random vectors matrix=numpy.zeros((POINTS DIM))<for_stmt>i range(POINTS)<block_start>v=numpy.random.randn(DIM)<line_sep>matrix[i]=v<line_sep>engine_perm.store_vector(v)<block_end># Then update permuted index permutations.build_permuted_index()<line_sep>t1=time.time()<line_sep>print('Indexing took %f seconds'%(t1-t0))<line_sep># Get random query vector query=numpy.random.randn(DIM)<line_sep># Do random query on engine 3 print('\nNeighbour distances with HashPermutations:')<line_sep>print(' -> Candidate count is %d'%engine_perm.candidate_count(query))<line_sep>results=engine_perm.neighbours(query)<line_sep>dists=[x[2]<for>x results]<line_sep>print(dists)<line_sep># Real neighbours print('\nReal neighbour distances:')<line_sep>query=query.reshape((DIM))<line_sep>dists=CosineDistance().distance(matrix query)<line_sep>dists=dists.reshape((-1 ))<line_sep>dists=sorted(dists)<line_sep>print(dists[:10])<line_sep>########################################################## print('\nPerforming indexing with HashPermutationMapper...')<line_sep>t0=time.time()<line_sep># Create permutations meta-hash permutations2=HashPermutationMapper('permut2')<line_sep># Create binary hash as child hash rbp_perm2=RandomBinaryProjections('rbp_perm2' 14)<line_sep># Add rbp as child hash of permutations hash permutations2.add_child_hash(rbp_perm2)<line_sep># Create engine engine_perm2=Engine(DIM lshashes=[permutations2] distance=CosineDistance())<line_sep># First index some random vectors matrix=numpy.zeros((POINTS DIM))<for_stmt>i range(POINTS)<block_start>v=numpy.random.randn(DIM)<line_sep>matrix[i]=v<line_sep>engine_perm2.store_vector(v)<block_end>t1=time.time()<line_sep>print('Indexing took %f seconds'%(t1-t0))<line_sep># Get random query vector query=numpy.random.randn(DIM)<line_sep># Do random query on engine 4 print('\nNeighbour distances with HashPermutationMapper:')<line_sep>print(' -> Candidate count is %d'%engine_perm2.candidate_count(query))<line_sep>results=engine_perm2.neighbours(query)<line_sep>dists=[x[2]<for>x results]<line_sep>print(dists)<line_sep># Real neighbours print('\nReal neighbour distances:')<line_sep>query=query.reshape((DIM))<line_sep>dists=CosineDistance().distance(matrix query)<line_sep>dists=dists.reshape((-1 ))<line_sep>dists=sorted(dists)<line_sep>print(dists[:10])<line_sep>########################################################## print('\nPerforming indexing with multiple binary hashes...')<line_sep>t0=time.time()<line_sep>hashes=[]<for_stmt>k range(20)<block_start>hashes.append(RandomBinaryProjections('rbp_%d'%k 10))<block_end># Create engine engine_rbps=Engine(DIM lshashes=hashes distance=CosineDistance())<line_sep># First index some random vectors matrix=numpy.zeros((POINTS DIM))<for_stmt>i range(POINTS)<block_start>v=numpy.random.randn(DIM)<line_sep>matrix[i]=v<line_sep>engine_rbps.store_vector(v)<block_end>t1=time.time()<line_sep>print('Indexing took %f seconds'%(t1-t0))<line_sep># Get random query vector query=numpy.random.randn(DIM)<line_sep># Do random query on engine 4 print('\nNeighbour distances with multiple binary hashes:')<line_sep>print(' -> Candidate count is %d'%engine_rbps.candidate_count(query))<line_sep>results=engine_rbps.neighbours(query)<line_sep>dists=[x[2]<for>x results]<line_sep>print(dists)<line_sep># Real neighbours print('\nReal neighbour distances:')<line_sep>query=query.reshape((DIM))<line_sep>dists=CosineDistance().distance(matrix query)<line_sep>dists=dists.reshape((-1 ))<line_sep>dists=sorted(dists)<line_sep>print(dists[:10])<line_sep>########################################################## <block_end>
# Copyright (c) 1999-2008 <NAME> and <NAME> # Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_from_stmt>slicc.ast.DeclAST DeclAST<import_from_stmt>slicc.symbols.Type Type<class_stmt>TypeDeclAST(DeclAST)<block_start><def_stmt>__init__ self slicc type_ast pairs field_asts<block_start>super(TypeDeclAST self).__init__(slicc pairs)<line_sep>self.type_ast=type_ast<line_sep>self.field_asts=field_asts<block_end><def_stmt>__repr__ self<block_start><return>"[TypeDecl: %r]"%(self.type_ast)<block_end><def_stmt>files self parent=<none><block_start><if_stmt>"external"<in>self<block_start><return>set()<block_end><if_stmt>parent<block_start>ident="%s_%s"%(parent self.type_ast.ident)<block_end><else_stmt><block_start>ident=self.type_ast.ident<block_end><return>set(("%s.hh"%ident "%s.cc"%ident))<block_end><def_stmt>generate self<block_start>ident=str(self.type_ast)<line_sep>machine=self.symtab.state_machine<line_sep># Make the new type new_type=Type(self.symtab ident self.location self.pairs self.state_machine)<if_stmt>machine<block_start>machine.addType(new_type)<block_end>self.symtab.newSymbol(new_type)<line_sep>self.symtab.pushFrame()<line_sep># Add all of the fields of the type to it <for_stmt>field self.field_asts<block_start>field.generate(new_type)<block_end>self.symtab.popFrame()<block_end><block_end>
# This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__="biotite"<line_sep>__author__="<NAME>"<line_sep>__all__=["File" "TextFile" "InvalidFileError"]<import_stmt>abc<import_stmt>io<import_stmt>warnings<import_from_stmt>.copyable Copyable<import_stmt>copy<class_stmt>File(Copyable metaclass=abc.ABCMeta)<block_start>""" Base class for all file classes. The constructor creates an empty file, that can be filled with data using the class specific setter methods. Conversely, the class method :func:`read()` reads a file from disk (or a file-like object from other sources). In order to write the instance content into a file the :func:`write()` method is used. """<def_stmt>__init__ self# Support for deprecated instance method 'read()': # When creating an instance, the 'read()' class method is # replaced by the instance method, so that subsequent # 'read()' calls are delegated to the instance method <block_start>self.read=self._deprecated_read<block_end>@[email protected]<def_stmt>read cls file<block_start>""" Parse a file (or file-like object). Parameters ---------- file : file-like object or str The file to be read. Alternatively a file path can be supplied. Returns ------- file_object : File An instance from the respective :class:`File` subclass representing the parsed file. """<line_sep><pass><block_end><def_stmt>_deprecated_read self file *args **kwargs<block_start>""" Support for deprecated instance method :func:`read()`. Internally this calls the :func:`read()` class method and replaces the data in `self` with the data from the newly created :class:`File` object """<line_sep>warnings.warn("Instance method 'read()' is deprecated, "<concat>"use class method instead" DeprecationWarning)<line_sep>cls=type(self)<line_sep>new_file=cls.read(file *args **kwargs)<line_sep>self.__dict__.update(new_file.__dict__)<block_end>@abc.abstractmethod<def_stmt>write self file<block_start>""" Write the contents of this :class:`File` object into a file. Parameters ---------- file_name : file-like object or str The file to be written to. Alternatively a file path can be supplied. """<line_sep><pass><block_end><block_end><class_stmt>TextFile(File metaclass=abc.ABCMeta)<block_start>""" Base class for all line based text files. When reading a file, the text content is saved as list of strings, one for each line. When writing a file, this list is written into the file. Attributes ---------- lines : list List of string representing the lines in the text file. PROTECTED: Do not modify from outside. """<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.lines=[]<block_end>@classmethod<def_stmt>read cls file *args **kwargs# File name <block_start><if_stmt>isinstance(file str)<block_start><with_stmt>open(file "r")<as>f<block_start>lines=f.read().splitlines()<block_end><block_end># File object <else_stmt><block_start><if_stmt><not>is_text(file)<block_start><raise>TypeError("A file opened in 'text' mode is required")<block_end>lines=file.read().splitlines()<block_end>file_object=cls(*args **kwargs)<line_sep>file_object.lines=lines<line_sep><return>file_object<block_end>@staticmethod<def_stmt>read_iter file<block_start>""" Create an iterator over each line of the given text file. Parameters ---------- file : file-like object or str The file to be read. Alternatively a file path can be supplied. Yields ------ line : str The current line in the file. """<line_sep># File name <if_stmt>isinstance(file str)<block_start><with_stmt>open(file "r")<as>f<block_start><while_stmt><true><block_start>line=f.readline()<if_stmt><not>line<block_start><break><block_end><yield>line<block_end><block_end><block_end># File object <else_stmt><block_start><if_stmt><not>is_text(file)<block_start><raise>TypeError("A file opened in 'text' mode is required")<block_end><while_stmt><true><block_start>line=file.readline()<if_stmt><not>line<block_start><break><block_end><yield>line<block_end><block_end><block_end><def_stmt>write self file<block_start>""" Write the contents of this object into a file (or file-like object). Parameters ---------- file_name : file-like object or str The file to be written to. Alternatively a file path can be supplied. """<if_stmt>isinstance(file str)<block_start><with_stmt>open(file "w")<as>f<block_start>f.write("\n".join(self.lines)+"\n")<block_end><block_end><else_stmt><block_start><if_stmt><not>is_text(file)<block_start><raise>TypeError("A file opened in 'text' mode is required")<block_end>file.write("\n".join(self.lines)+"\n")<block_end><block_end><def_stmt>__copy_fill__ self clone<block_start>super().__copy_fill__(clone)<line_sep>clone.lines=copy.copy(self.lines)<block_end><def_stmt>__str__ self<block_start><return>("\n".join(self.lines))<block_end><block_end><class_stmt>InvalidFileError(Exception)<block_start>""" Indicates that the file is not suitable for the requested action, either because the file does not contain the required data or because the file is malformed. """<line_sep><pass><block_end><def_stmt>wrap_string text width<block_start>""" A much simpler and hence much more efficient version of `textwrap.wrap()`. This function simply wraps the given `text` after `width` characters, ignoring sentences, whitespaces, etc. """<line_sep>lines=[]<for_stmt>i range(0 len(text) width)<block_start>lines.append(text[i:i+width])<block_end><return>lines<block_end><def_stmt>is_binary file<block_start><if_stmt>isinstance(file io.BufferedIOBase)<block_start><return><true><block_end># for file wrappers, e.g. 'TemporaryFile' <elif_stmt>hasattr(file "file")<and>isinstance(file.file io.BufferedIOBase)<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>is_text file<block_start><if_stmt>isinstance(file io.TextIOBase)<block_start><return><true><block_end># for file wrappers, e.g. 'TemporaryFile' <elif_stmt>hasattr(file "file")<and>isinstance(file.file io.TextIOBase)<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># # module to make the MaxSumPtWMass jet combination # findTtSemiLepJetCombMaxSumPtWMass=cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass" ## jet input jets=cms.InputTag("selectedPatJets") ## lepton input leps=cms.InputTag("selectedPatMuons") ## maximum number of jets to be considered maxNJets=cms.int32(4) ## nominal WMass parameter (in GeV) wMass=cms.double(80.4) ## use b-tagging two distinguish between light and b jets useBTagging=cms.bool(<false>) ## choose algorithm for b-tagging bTagAlgorithm=cms.string("trackCountingHighEffBJetTags") ## minimum b discriminator value required for b jets and ## maximum b discriminator value allowed for non-b jets minBDiscBJets=cms.double(1.0) maxBDiscLightJets=cms.double(3.0))<line_sep>
<import_from_stmt>libsaas http parsers<import_from_stmt>libsaas.services base<import_from_stmt>libsaas.services.twilio resource<class_stmt>ApplicationsBase(resource.TwilioResource)<block_start>path='Applications'<block_end><class_stmt>Application(ApplicationsBase)<block_start><def_stmt>create self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>Applications(ApplicationsBase)<block_start>@base.apimethod<def_stmt>get self FriendlyName=<none> Page=<none> PageSize=<none> AfterSid=<none><block_start>""" Fetch the Applications belonging to an account. :var FriendlyName: Only return the Account resources with friendly names that exactly match this name. :vartype FriendlyName: str :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """<line_sep>params=resource.get_params(<none> locals())<line_sep>request=http.Request('GET' self.get_url() params)<line_sep><return>request parsers.parse_json<block_end><def_stmt>update self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><def_stmt>delete self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>ConnectAppsBase(resource.TwilioResource)<block_start>path='ConnectApps'<def_stmt>create self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><def_stmt>delete self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>ConnectApp(ConnectAppsBase)<block_start><pass><block_end><class_stmt>ConnectApps(ConnectAppsBase)<block_start>@base.apimethod<def_stmt>get self Page=<none> PageSize=<none> AfterSid=<none><block_start>""" Fetch the Connect Apps belonging to an account. :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """<line_sep>params=resource.get_params(<none> locals())<line_sep>request=http.Request('GET' self.get_url() params)<line_sep><return>request parsers.parse_json<block_end><def_stmt>update self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>AuthorizedConnectAppsBase(resource.TwilioResource)<block_start>path='AuthorizedConnectApps'<def_stmt>create self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><def_stmt>update self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><def_stmt>delete self *args **kwargs<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>AuthorizedConnectApp(AuthorizedConnectAppsBase)<block_start><pass><block_end><class_stmt>AuthorizedConnectApps(AuthorizedConnectAppsBase)<block_start>@base.apimethod<def_stmt>get self Page=<none> PageSize=<none> AfterSid=<none><block_start>""" Fetch the Authorized Connect Apps belonging to an account. :var Page: The current page number. Zero-indexed, so the first page is 0. :vartype Page: int :var PageSize: How many resources to return in each list page. The default is 50, and the maximum is 1000. :vartype PageSize: int :var AfterSid: The last Sid returned in the previous page, used to avoid listing duplicated resources if new ones are created while paging. :vartype AfterSid: str """<line_sep>params=resource.get_params(<none> locals())<line_sep>request=http.Request('GET' self.get_url() params)<line_sep><return>request parsers.parse_json<block_end><block_end>
<import_from_stmt>vyper ast<as>vy_ast<def_stmt>test_output_class <block_start>old_node=vy_ast.parse_to_ast("foo = 42")<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt>isinstance(new_node vy_ast.Int)<block_end><def_stmt>test_source <block_start>old_node=vy_ast.parse_to_ast("foo = 42")<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt>old_node.src<eq>new_node.src<assert_stmt>old_node.node_source_code<eq>new_node.node_source_code<block_end><def_stmt>test_kwargs <block_start>old_node=vy_ast.parse_to_ast("42").body[0].value<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt>old_node.value<eq>42<assert_stmt>new_node.value<eq>666<block_end><def_stmt>test_compare_nodes <block_start>old_node=vy_ast.parse_to_ast("foo = 42")<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt><not>vy_ast.compare_nodes(old_node new_node)<block_end><def_stmt>test_new_node_has_no_parent <block_start>old_node=vy_ast.parse_to_ast("foo = 42")<line_sep>new_node=vy_ast.Int.from_node(old_node value=666)<assert_stmt>new_node._parent<is><none><assert_stmt>new_node._depth<eq>0<block_end>
# This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string. # You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings. # It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API. # # More information about the RoboDK API here: # https://robodk.com/doc/en/RoboDK-API.html # For more information visit: # https://robodk.com/doc/en/PythonAPI/robolink.html <import_from_stmt>robolink *# RoboDK API # JSON tools <import_stmt>json<line_sep># Start the RoboDK API RDK=Robolink()<line_sep># Ask the user to select a robot arm (6 axis robot wich can have external axes) robot=RDK.ItemUserPick("Select a robot arm" ITEM_TYPE_ROBOT_ARM)<line_sep># Default optimization settings test template AxesOptimSettings={# Optimization parameters: "Active":1 # Use generic axes optimization: 0=Disabled or 1=Enabled "Algorithm":2 # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead "MaxIter":650 # Max. number of iterations "Tol":0.0016 # Tolerance to stop iterations # Absolute Reference joints (double): "AbsJnt_1":104.17 "AbsJnt_2":11.22 "AbsJnt_3":15.97 "AbsJnt_4":-87.48 "AbsJnt_5":-75.36 "AbsJnt_6":63.03 "AbsJnt_7":174.13 "AbsJnt_8":173.60 "AbsJnt_9":0 # Using Absolute reference joints (0: No, 1: Yes): "AbsOn_1":1 "AbsOn_2":1 "AbsOn_3":1 "AbsOn_4":1 "AbsOn_5":1 "AbsOn_6":1 "AbsOn_7":1 "AbsOn_8":1 "AbsOn_9":1 # Weight for absolute reference joints (double): "AbsW_1":100 "AbsW_2":100 "AbsW_3":100 "AbsW_4":89 "AbsW_5":90 "AbsW_6":92 "AbsW_7":92 "AbsW_8":96 "AbsW_9":50 # Using for relative joint motion smoothing (0: No, 1: Yes): "RelOn_1":1 "RelOn_2":1 "RelOn_3":1 "RelOn_4":1 "RelOn_5":1 "RelOn_6":1 "RelOn_7":1 "RelOn_8":1 "RelOn_9":1 # Weight for relative joint motion (double): "RelW_1":5 "RelW_2":47 "RelW_3":44 "RelW_4":43 "RelW_5":36 "RelW_6":47 "RelW_7":53 "RelW_8":59 "RelW_9":0 }<line_sep># Update one value, for example, make it active: ToUpdate={}<line_sep>ToUpdate["Active"]=1<line_sep>json_str=json.dumps(json.dumps(ToUpdate))<line_sep>status=robot.setParam("OptimAxes" json_str)<line_sep>print(status)<line_sep># Example to make a partial or full update count=1<while_stmt><true><block_start><for_stmt>i range(7)# Partial update <block_start>ToUpdate={}<line_sep>ToUpdate["AbsJnt_"+str(i+1)]=(count+i)<times>4<line_sep>ToUpdate["AbsOn_"+str(i+1)]=count%2<line_sep>ToUpdate["AbsW_"+str(i+1)]=(count+i)<line_sep>json_str=json.dumps(json.dumps(ToUpdate))<line_sep>status=robot.setParam("OptimAxes" json_str)<line_sep>print(status)<line_sep># Full update #OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4 #OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i) #OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2 <block_end># Full update #print(robot.setParam("OptimAxes", str(AxesOptimSettings))) count=count+1<line_sep># Read settings json_data=robot.setParam("OptimAxes")<line_sep>json_object=json.loads(json_data)<line_sep>print(json.dumps(json_object indent=4))<line_sep>pause(0.2)<block_end># Example to read the current axes optimization settings: <while_stmt><true><block_start>json_data=robot.setParam("OptimAxes")<line_sep>json_object=json.loads(json_data)<line_sep>print(json.dumps(json_object indent=4))<line_sep>pause(0.2)<block_end>
''' This file contains test cases for tflearn '''<import_stmt>tensorflow.compat.v1<as>tf<import_stmt>tflearn<import_stmt>unittest<class_stmt>TestActivations(unittest.TestCase)<block_start>''' This class contains test cases for the functions in tflearn/activations.py '''<line_sep>PLACES=4# Number of places to match when testing floating point values <def_stmt>test_linear self<block_start>f=tflearn.linear<line_sep># Case 1 x=tf.placeholder(tf.float32 shape=())<line_sep>self.assertEqual(f(x) x)<line_sep># Case 2 x=tf.placeholder(tf.int64 shape=())<line_sep>self.assertEqual(f(x) x)<block_end><def_stmt>test_tanh self<block_start>f=tflearn.tanh<line_sep>x=tf.placeholder(tf.float32 shape=())<with_stmt>tf.Session()<as>sess# Case 1 <block_start>self.assertEqual(sess.run(f(x) feed_dict={x:0}) 0)<line_sep># Case 2 self.assertAlmostEqual(sess.run(f(x) feed_dict={x:0.5}) 0.4621 places=TestActivations.PLACES)<line_sep># Case 3 self.assertAlmostEqual(sess.run(f(x) feed_dict={x:-0.25}) -0.2449 places=TestActivations.PLACES)<block_end><block_end><def_stmt>test_leaky_relu self<block_start>f=<lambda>x:tflearn.leaky_relu(x alpha=0.2)<line_sep>x=tf.placeholder(tf.float32 shape=())<with_stmt>tf.Session()<as>sess# Case 1 <block_start>self.assertEqual(sess.run(f(x) feed_dict={x:0}) 0)<line_sep># Case 2 self.assertAlmostEqual(sess.run(f(x) feed_dict={x:1}) 1 places=TestActivations.PLACES)<line_sep># Case 3 self.assertAlmostEqual(sess.run(f(x) feed_dict={x:-1}) -0.2 places=TestActivations.PLACES)<line_sep># Case 4 self.assertAlmostEqual(sess.run(f(x) feed_dict={x:-5}) -1 places=TestActivations.PLACES)<block_end><block_end><def_stmt>test_apply_activation self<block_start>lrelu_02=<lambda>x:tflearn.leaky_relu(x alpha=0.2)<line_sep>x=tf.constant(-0.25 tf.float32)<with_stmt>tf.Session()<as>sess# Case 1: 'linear' <block_start>self.assertEqual(sess.run(tflearn.activation(x 'linear')) -0.25)<line_sep># Case 2: 'relu' self.assertEqual(sess.run(tflearn.activation(x 'relu')) 0)<line_sep># Case 3: 'leaky_relu' self.assertAlmostEqual(sess.run(tflearn.activation(x 'leaky_relu')) -0.025 places=TestActivations.PLACES)<line_sep># Case 4: 'tanh' self.assertAlmostEqual(sess.run(tflearn.activation(x 'tanh')) -0.2449 places=TestActivations.PLACES)<line_sep># Case 5: lrelu_02 (callable) self.assertAlmostEqual(sess.run(tflearn.activation(x lrelu_02)) -0.05 places=TestActivations.PLACES)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# This file is part of Patsy # Copyright (C) 2013 <NAME> <<EMAIL>> # See file LICENSE.txt for license information. # Regression tests for fixed bugs (when not otherwise better covered somewhere # else) <import_from_stmt>patsy EvalEnvironment dmatrix build_design_matrices PatsyError Origin <def_stmt>test_issue_11 # Give a sensible error message for level mismatches # (At some points we've failed to put an origin= on these errors) <block_start>env=EvalEnvironment.capture()<line_sep>data={"X":[0 1 2 3] "Y":[1 2 3 4]}<line_sep>formula="C(X) + Y"<line_sep>new_data={"X":[0 0 1 2 3 3 4] "Y":[1 2 3 4 5 6 7]}<line_sep>info=dmatrix(formula data)<try_stmt><block_start>build_design_matrices([info.design_info] new_data)<block_end><except_stmt>PatsyError<as>e<block_start><assert_stmt>e.origin<eq>Origin(formula 0 4)<block_end><else_stmt><block_start><assert_stmt><false><block_end><block_end>
<class_stmt>Foo<block_start><pass><block_end><class_stmt>Bar(Foo)<block_start><def_stmt>__init__ self<block_start>super(Bar self).__init__()<block_end><block_end># [super-with-arguments] <class_stmt>Baz(Foo)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end><block_end><class_stmt>Qux(Foo)<block_start><def_stmt>__init__ self<block_start>super(Bar self).__init__()<block_end><block_end><class_stmt>NotSuperCall(Foo)<block_start><def_stmt>__init__ self<block_start>super.test(Bar self).__init__()<block_end><block_end><class_stmt>InvalidSuperCall(Foo)<block_start><def_stmt>__init__ self<block_start>super(InvalidSuperCall.__class__ self).__init__()<block_end><block_end><def_stmt>method_accepting_cls cls self# Using plain `super()` is not valid here, since there's no `__class__` cell found # (Exact exception would be 'RuntimeError: super(): __class__ cell not found') # Instead, we expect to *not* see a warning about `super-with-arguments`. # Explicitly passing `cls`, and `self` to `super()` is what's required. <block_start>super(cls self).__init__()<block_end>
<import_stmt>logging<line_sep>logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' datefmt='%Y/%m/%d %H:%M:%S' level=logging.INFO )<line_sep>logger=logging.getLogger("Main")<import_stmt>os random<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>processing convert_examples_to_features read_squad_examples<import_from_stmt>processing ChineseFullTokenizer<import_from_stmt>pytorch_pretrained_bert.my_modeling BertConfig<import_from_stmt>optimization BERTAdam<import_stmt>config<import_from_stmt>utils read_and_convert divide_parameters<import_from_stmt>modeling BertForQASimple BertForQASimpleAdaptorTraining<import_from_stmt>textbrewer DistillationConfig TrainingConfig BasicTrainer<import_from_stmt>torch.utils.data TensorDataset DataLoader RandomSampler<import_from_stmt>functools partial<import_from_stmt>train_eval predict<def_stmt>args_check args<block_start><if_stmt>os.path.exists(args.output_dir)<and>os.listdir(args.output_dir)<block_start>logger.warning("Output directory () already exists and is not empty.")<block_end><if_stmt>args.gradient_accumulation_steps<l>1<block_start><raise>ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(args.gradient_accumulation_steps))<block_end><if_stmt><not>args.do_train<and><not>args.do_predict<block_start><raise>ValueError("At least one of `do_train` or `do_predict` must be True.")<block_end><if_stmt>args.local_rank<eq>-1<or>args.no_cuda<block_start>device=torch.device("cuda"<if>torch.cuda.is_available()<and><not>args.no_cuda<else>"cpu")<line_sep>n_gpu=torch.cuda.device_count()<if><not>args.no_cuda<else>0<block_end><else_stmt><block_start>device=torch.device("cuda" args.local_rank)<line_sep>n_gpu=1<line_sep>torch.distributed.init_process_group(backend='nccl')<block_end>logger.info("device %s n_gpu %d distributed training %r" device n_gpu bool(args.local_rank<ne>-1))<line_sep>args.n_gpu=n_gpu<line_sep>args.device=device<line_sep><return>device n_gpu<block_end><def_stmt>main #parse arguments <block_start>config.parse()<line_sep>args=config.args<for_stmt>k,v vars(args).items()<block_start>logger.info(f"{k}:{v}")<block_end>#set seeds torch.manual_seed(args.random_seed)<line_sep>torch.cuda.manual_seed_all(args.random_seed)<line_sep>np.random.seed(args.random_seed)<line_sep>random.seed(args.random_seed)<line_sep>#arguments check device,n_gpu=args_check(args)<line_sep>os.makedirs(args.output_dir exist_ok=<true>)<line_sep>forward_batch_size=int(args.train_batch_size/args.gradient_accumulation_steps)<line_sep>args.forward_batch_size=forward_batch_size<line_sep>#load bert config bert_config_S=BertConfig.from_json_file(args.bert_config_file_S)<assert_stmt>args.max_seq_length<le>bert_config_S.max_position_embeddings<line_sep>#read data train_examples=<none><line_sep>train_features=<none><line_sep>eval_examples=<none><line_sep>eval_features=<none><line_sep>num_train_steps=<none><line_sep>tokenizer=ChineseFullTokenizer(vocab_file=args.vocab_file do_lower_case=args.do_lower_case)<line_sep>convert_fn=partial(convert_examples_to_features tokenizer=tokenizer max_seq_length=args.max_seq_length doc_stride=args.doc_stride max_query_length=args.max_query_length)<if_stmt>args.do_train<block_start>train_examples,train_features=read_and_convert(args.train_file is_training=<true> do_lower_case=args.do_lower_case read_fn=read_squad_examples convert_fn=convert_fn)<if_stmt>args.fake_file_1<block_start>fake_examples1,fake_features1=read_and_convert(args.fake_file_1 is_training=<true> do_lower_case=args.do_lower_case read_fn=read_squad_examples convert_fn=convert_fn)<line_sep>train_examples<augadd>fake_examples1<line_sep>train_features<augadd>fake_features1<block_end><if_stmt>args.fake_file_2<block_start>fake_examples2,fake_features2=read_and_convert(args.fake_file_2 is_training=<true> do_lower_case=args.do_lower_case read_fn=read_squad_examples convert_fn=convert_fn)<line_sep>train_examples<augadd>fake_examples2<line_sep>train_features<augadd>fake_features2<block_end>num_train_steps=int(len(train_features)/args.train_batch_size)<times>args.num_train_epochs<block_end><if_stmt>args.do_predict<block_start>eval_examples,eval_features=read_and_convert(args.predict_file is_training=<false> do_lower_case=args.do_lower_case read_fn=read_squad_examples convert_fn=convert_fn)<block_end>#Build Model and load checkpoint model_S=BertForQASimple(bert_config_S args)<line_sep>#Load student <if_stmt>args.load_model_type<eq>'bert'<block_start><assert_stmt>args.init_checkpoint_S<is><not><none><line_sep>state_dict_S=torch.load(args.init_checkpoint_S map_location='cpu')<line_sep>state_weight={k[5:]:v<for>k,v state_dict_S.items()<if>k.startswith('bert.')}<line_sep>missing_keys,_=model_S.bert.load_state_dict(state_weight strict=<false>)<assert_stmt>len(missing_keys)<eq>0<block_end><elif_stmt>args.load_model_type<eq>'all'<block_start><assert_stmt>args.tuned_checkpoint_S<is><not><none><line_sep>state_dict_S=torch.load(args.tuned_checkpoint_S map_location='cpu')<line_sep>model_S.load_state_dict(state_dict_S)<block_end><else_stmt><block_start>logger.info("Model is randomly initialized.")<block_end>model_S.to(device)<if_stmt>args.local_rank<ne>-1<or>n_gpu<g>1<block_start><if_stmt>args.local_rank<ne>-1<block_start><raise>NotImplementedError<block_end><elif_stmt>n_gpu<g>1<block_start>model_S=torch.nn.DataParallel(model_S)<block_end><block_end>#,output_device=n_gpu-1) <if_stmt>args.do_train#parameters <block_start>params=list(model_S.named_parameters())<line_sep>all_trainable_params=divide_parameters(params lr=args.learning_rate)<line_sep>logger.info("Length of all_trainable_params: %d" len(all_trainable_params))<line_sep>optimizer=BERTAdam(all_trainable_params lr=args.learning_rate warmup=args.warmup_proportion t_total=num_train_steps schedule=args.schedule s_opt1=args.s_opt1 s_opt2=args.s_opt2 s_opt3=args.s_opt3)<line_sep>logger.info("***** Running training *****")<line_sep>logger.info(" Num orig examples = %d" len(train_examples))<line_sep>logger.info(" Num split examples = %d" len(train_features))<line_sep>logger.info(" Forward batch size = %d" forward_batch_size)<line_sep>logger.info(" Num backward steps = %d" num_train_steps)<line_sep>########### DISTILLATION ########### train_config=TrainingConfig(gradient_accumulation_steps=args.gradient_accumulation_steps ckpt_frequency=args.ckpt_frequency log_dir=args.output_dir output_dir=args.output_dir device=args.device)<line_sep>distiller=BasicTrainer(train_config=train_config model=model_S adaptor=BertForQASimpleAdaptorTraining)<line_sep>all_input_ids=torch.tensor([f.input_ids<for>f train_features] dtype=torch.long)<line_sep>all_input_mask=torch.tensor([f.input_mask<for>f train_features] dtype=torch.long)<line_sep>all_doc_mask=torch.tensor([f.doc_mask<for>f train_features] dtype=torch.float)<line_sep>all_segment_ids=torch.tensor([f.segment_ids<for>f train_features] dtype=torch.long)<line_sep>all_start_positions=torch.tensor([f.start_position<for>f train_features] dtype=torch.long)<line_sep>all_end_positions=torch.tensor([f.end_position<for>f train_features] dtype=torch.long)<line_sep>train_dataset=TensorDataset(all_input_ids all_segment_ids all_input_mask all_doc_mask all_start_positions all_end_positions)<if_stmt>args.local_rank<eq>-1<block_start>train_sampler=RandomSampler(train_dataset)<block_end><else_stmt><block_start><raise>NotImplementedError<block_end>train_dataloader=DataLoader(train_dataset sampler=train_sampler batch_size=args.forward_batch_size drop_last=<true>)<line_sep>callback_func=partial(predict eval_examples=eval_examples eval_features=eval_features args=args)<with_stmt>distiller<block_start>distiller.train(optimizer scheduler=<none> dataloader=train_dataloader num_epochs=args.num_train_epochs callback=callback_func)<block_end><block_end><if_stmt><not>args.do_train<and>args.do_predict<block_start>res=predict(model_S eval_examples eval_features step=0 args=args)<line_sep>print(res)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
"""The Ray autoscaler uses tags/labels to associate metadata with instances."""<line_sep># Tag for the name of the node TAG_RAY_NODE_NAME="ray-node-name"<line_sep># Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag # value says 'type' instead of 'kind'. TAG_RAY_NODE_KIND="ray-node-type"<line_sep>NODE_KIND_HEAD="head"<line_sep>NODE_KIND_WORKER="worker"<line_sep>NODE_KIND_UNMANAGED="unmanaged"<line_sep># Tag for user defined node types (e.g., m4xl_spot). This is used for multi # node type clusters. TAG_RAY_USER_NODE_TYPE="ray-user-node-type"<line_sep># Tag for autofilled node types for legacy cluster yamls without multi # node type defined in the cluster configs. NODE_TYPE_LEGACY_HEAD="ray-legacy-head-node-type"<line_sep>NODE_TYPE_LEGACY_WORKER="ray-legacy-worker-node-type"<line_sep># Tag that reports the current state of the node (e.g. Updating, Up-to-date) TAG_RAY_NODE_STATUS="ray-node-status"<line_sep>STATUS_UNINITIALIZED="uninitialized"<line_sep>STATUS_WAITING_FOR_SSH="waiting-for-ssh"<line_sep>STATUS_SYNCING_FILES="syncing-files"<line_sep>STATUS_SETTING_UP="setting-up"<line_sep>STATUS_UPDATE_FAILED="update-failed"<line_sep>STATUS_UP_TO_DATE="up-to-date"<line_sep># Tag uniquely identifying all nodes of a cluster TAG_RAY_CLUSTER_NAME="ray-cluster-name"<line_sep># Hash of the node launch config, used to identify out-of-date nodes TAG_RAY_LAUNCH_CONFIG="ray-launch-config"<line_sep># Hash of the node runtime config, used to determine if updates are needed TAG_RAY_RUNTIME_CONFIG="ray-runtime-config"<line_sep># Hash of the contents of the directories specified by the file_mounts config # if the node is a worker, this also hashes content of the directories # specified by the cluster_synced_files config TAG_RAY_FILE_MOUNTS_CONTENTS="ray-file-mounts-contents"<line_sep>
<import_from_future_stmt> with_statement<import_from_stmt>.. Lock NeedRegenerationException<import_from_stmt>..util NameRegistry<import_from_stmt>. exception<import_from_stmt>..util PluginLoader memoized_property coerce_string_conf<import_from_stmt>.util function_key_generator function_multi_key_generator<import_from_stmt>.api NO_VALUE CachedValue<import_from_stmt>.proxy ProxyBackend<import_from_stmt>..util compat<import_stmt>time<import_stmt>datetime<import_from_stmt>numbers Number<import_from_stmt>functools wraps<import_stmt>threading<line_sep>_backend_loader=PluginLoader("dogpile.cache")<line_sep>register_backend=_backend_loader.register<import_from_stmt>. backends# noqa value_version=1<line_sep>"""An integer placed in the :class:`.CachedValue` so that new versions of dogpile.cache can detect cached values from a previous, backwards-incompatible version. """<class_stmt>RegionInvalidationStrategy(object)<block_start>"""Region invalidation strategy interface Implement this interface and pass implementation instance to :meth:`.CacheRegion.configure` to override default region invalidation. Example:: class CustomInvalidationStrategy(RegionInvalidationStrategy): def __init__(self): self._soft_invalidated = None self._hard_invalidated = None def invalidate(self, hard=None): if hard: self._soft_invalidated = None self._hard_invalidated = time.time() else: self._soft_invalidated = time.time() self._hard_invalidated = None def is_invalidated(self, timestamp): return ((self._soft_invalidated and timestamp < self._soft_invalidated) or (self._hard_invalidated and timestamp < self._hard_invalidated)) def was_hard_invalidated(self): return bool(self._hard_invalidated) def is_hard_invalidated(self, timestamp): return (self._hard_invalidated and timestamp < self._hard_invalidated) def was_soft_invalidated(self): return bool(self._soft_invalidated) def is_soft_invalidated(self, timestamp): return (self._soft_invalidated and timestamp < self._soft_invalidated) The custom implementation is injected into a :class:`.CacheRegion` at configure time using the :paramref:`.CacheRegion.configure.region_invalidator` parameter:: region = CacheRegion() region = region.configure(region_invalidator=CustomInvalidationStrategy()) Invalidation strategies that wish to have access to the :class:`.CacheRegion` itself should construct the invalidator given the region as an argument:: class MyInvalidator(RegionInvalidationStrategy): def __init__(self, region): self.region = region # ... # ... region = CacheRegion() region = region.configure(region_invalidator=MyInvalidator(region)) .. versionadded:: 0.6.2 .. seealso:: :paramref:`.CacheRegion.configure.region_invalidator` """<def_stmt>invalidate self hard=<true><block_start>"""Region invalidation. :class:`.CacheRegion` propagated call. The default invalidation system works by setting a current timestamp (using ``time.time()``) to consider all older timestamps effectively invalidated. """<line_sep><raise>NotImplementedError()<block_end><def_stmt>is_hard_invalidated self timestamp<block_start>"""Check timestamp to determine if it was hard invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time and region is invalidated in hard mode. """<line_sep><raise>NotImplementedError()<block_end><def_stmt>is_soft_invalidated self timestamp<block_start>"""Check timestamp to determine if it was soft invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time and region is invalidated in soft mode. """<line_sep><raise>NotImplementedError()<block_end><def_stmt>is_invalidated self timestamp<block_start>"""Check timestamp to determine if it was invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time. """<line_sep><raise>NotImplementedError()<block_end><def_stmt>was_soft_invalidated self<block_start>"""Indicate the region was invalidated in soft mode. :return: Boolean. True if region was invalidated in soft mode. """<line_sep><raise>NotImplementedError()<block_end><def_stmt>was_hard_invalidated self<block_start>"""Indicate the region was invalidated in hard mode. :return: Boolean. True if region was invalidated in hard mode. """<line_sep><raise>NotImplementedError()<block_end><block_end><class_stmt>DefaultInvalidationStrategy(RegionInvalidationStrategy)<block_start><def_stmt>__init__ self<block_start>self._is_hard_invalidated=<none><line_sep>self._invalidated=<none><block_end><def_stmt>invalidate self hard=<true><block_start>self._is_hard_invalidated=bool(hard)<line_sep>self._invalidated=time.time()<block_end><def_stmt>is_invalidated self timestamp<block_start><return>(self._invalidated<is><not><none><and>timestamp<l>self._invalidated)<block_end><def_stmt>was_hard_invalidated self<block_start><return>self._is_hard_invalidated<is><true><block_end><def_stmt>is_hard_invalidated self timestamp<block_start><return>self.was_hard_invalidated()<and>self.is_invalidated(timestamp)<block_end><def_stmt>was_soft_invalidated self<block_start><return>self._is_hard_invalidated<is><false><block_end><def_stmt>is_soft_invalidated self timestamp<block_start><return>self.was_soft_invalidated()<and>self.is_invalidated(timestamp)<block_end><block_end><class_stmt>CacheRegion(object)<block_start>"""A front end to a particular cache backend. :param name: Optional, a string name for the region. This isn't used internally but can be accessed via the ``.name`` parameter, helpful for configuring a region from a config file. :param function_key_generator: Optional. A function that will produce a "cache key" given a data creation function and arguments, when using the :meth:`.CacheRegion.cache_on_arguments` method. The structure of this function should be two levels: given the data creation function, return a new function that generates the key based on the given arguments. Such as:: def my_key_generator(namespace, fn, **kw): fname = fn.__name__ def generate_key(*arg): return namespace + "_" + fname + "_".join(str(s) for s in arg) return generate_key region = make_region( function_key_generator = my_key_generator ).configure( "dogpile.cache.dbm", expiration_time=300, arguments={ "filename":"file.dbm" } ) The ``namespace`` is that passed to :meth:`.CacheRegion.cache_on_arguments`. It's not consulted outside this function, so in fact can be of any form. For example, it can be passed as a tuple, used to specify arguments to pluck from \**kw:: def my_key_generator(namespace, fn): def generate_key(*arg, **kw): return ":".join( [kw[k] for k in namespace] + [str(x) for x in arg] ) return generate_key Where the decorator might be used as:: @my_region.cache_on_arguments(namespace=('x', 'y')) def my_function(a, b, **kw): return my_data() .. seealso:: :func:`.function_key_generator` - default key generator :func:`.kwarg_function_key_generator` - optional gen that also uses keyword arguments :param function_multi_key_generator: Optional. Similar to ``function_key_generator`` parameter, but it's used in :meth:`.CacheRegion.cache_multi_on_arguments`. Generated function should return list of keys. For example:: def my_multi_key_generator(namespace, fn, **kw): namespace = fn.__name__ + (namespace or '') def generate_keys(*args): return [namespace + ':' + str(a) for a in args] return generate_keys :param key_mangler: Function which will be used on all incoming keys before passing to the backend. Defaults to ``None``, in which case the key mangling function recommended by the cache backend will be used. A typical mangler is the SHA1 mangler found at :func:`.sha1_mangle_key` which coerces keys into a SHA1 hash, so that the string length is fixed. To disable all key mangling, set to ``False``. Another typical mangler is the built-in Python function ``str``, which can be used to convert non-string or Unicode keys to bytestrings, which is needed when using a backend such as bsddb or dbm under Python 2.x in conjunction with Unicode keys. :param async_creation_runner: A callable that, when specified, will be passed to and called by dogpile.lock when there is a stale value present in the cache. It will be passed the mutex and is responsible releasing that mutex when finished. This can be used to defer the computation of expensive creator functions to later points in the future by way of, for example, a background thread, a long-running queue, or a task manager system like Celery. For a specific example using async_creation_runner, new values can be created in a background thread like so:: import threading def async_creation_runner(cache, somekey, creator, mutex): ''' Used by dogpile.core:Lock when appropriate ''' def runner(): try: value = creator() cache.set(somekey, value) finally: mutex.release() thread = threading.Thread(target=runner) thread.start() region = make_region( async_creation_runner=async_creation_runner, ).configure( 'dogpile.cache.memcached', expiration_time=5, arguments={ 'url': '127.0.0.1:11211', 'distributed_lock': True, } ) Remember that the first request for a key with no associated value will always block; async_creator will not be invoked. However, subsequent requests for cached-but-expired values will still return promptly. They will be refreshed by whatever asynchronous means the provided async_creation_runner callable implements. By default the async_creation_runner is disabled and is set to ``None``. .. versionadded:: 0.4.2 added the async_creation_runner feature. """<def_stmt>__init__ self name=<none> function_key_generator=function_key_generator function_multi_key_generator=function_multi_key_generator key_mangler=<none> async_creation_runner=<none> <block_start>"""Construct a new :class:`.CacheRegion`."""<line_sep>self.name=name<line_sep>self.function_key_generator=function_key_generator<line_sep>self.function_multi_key_generator=function_multi_key_generator<line_sep>self.key_mangler=self._user_defined_key_mangler=key_mangler<line_sep>self.async_creation_runner=async_creation_runner<line_sep>self.region_invalidator=DefaultInvalidationStrategy()<block_end><def_stmt>configure self backend expiration_time=<none> arguments=<none> _config_argument_dict=<none> _config_prefix=<none> wrap=<none> replace_existing_backend=<false> region_invalidator=<none><block_start>"""Configure a :class:`.CacheRegion`. The :class:`.CacheRegion` itself is returned. :param backend: Required. This is the name of the :class:`.CacheBackend` to use, and is resolved by loading the class from the ``dogpile.cache`` entrypoint. :param expiration_time: Optional. The expiration time passed to the dogpile system. May be passed as an integer number of seconds, or as a ``datetime.timedelta`` value. .. versionadded 0.5.0 ``expiration_time`` may be optionally passed as a ``datetime.timedelta`` value. The :meth:`.CacheRegion.get_or_create` method as well as the :meth:`.CacheRegion.cache_on_arguments` decorator (though note: **not** the :meth:`.CacheRegion.get` method) will call upon the value creation function after this time period has passed since the last generation. :param arguments: Optional. The structure here is passed directly to the constructor of the :class:`.CacheBackend` in use, though is typically a dictionary. :param wrap: Optional. A list of :class:`.ProxyBackend` classes and/or instances, each of which will be applied in a chain to ultimately wrap the original backend, so that custom functionality augmentation can be applied. .. versionadded:: 0.5.0 .. seealso:: :ref:`changing_backend_behavior` :param replace_existing_backend: if True, the existing cache backend will be replaced. Without this flag, an exception is raised if a backend is already configured. .. versionadded:: 0.5.7 :param region_invalidator: Optional. Override default invalidation strategy with custom implementation of :class:`.RegionInvalidationStrategy`. .. versionadded:: 0.6.2 """<if_stmt>"backend"<in>self.__dict__<and><not>replace_existing_backend<block_start><raise>exception.RegionAlreadyConfigured("This region is already "<concat>"configured with backend: %s. "<concat>"Specify replace_existing_backend=True to replace."%self.backend)<block_end>backend_cls=_backend_loader.load(backend)<if_stmt>_config_argument_dict<block_start>self.backend=backend_cls.from_config_dict(_config_argument_dict _config_prefix)<block_end><else_stmt><block_start>self.backend=backend_cls(arguments<or>{})<block_end><if_stmt><not>expiration_time<or>isinstance(expiration_time Number)<block_start>self.expiration_time=expiration_time<block_end><elif_stmt>isinstance(expiration_time datetime.timedelta)<block_start>self.expiration_time=int(compat.timedelta_total_seconds(expiration_time))<block_end><else_stmt><block_start><raise>exception.ValidationError('expiration_time is not a number or timedelta.')<block_end><if_stmt><not>self._user_defined_key_mangler<block_start>self.key_mangler=self.backend.key_mangler<block_end>self._lock_registry=NameRegistry(self._create_mutex)<if_stmt>getattr(wrap '__iter__' <false>)<block_start><for_stmt>wrapper reversed(wrap)<block_start>self.wrap(wrapper)<block_end><block_end><if_stmt>region_invalidator<block_start>self.region_invalidator=region_invalidator<block_end><return>self<block_end><def_stmt>wrap self proxy<block_start>''' Takes a ProxyBackend instance or class and wraps the attached backend. '''<line_sep># if we were passed a type rather than an instance then # initialize it. <if_stmt>type(proxy)<eq>type<block_start>proxy=proxy()<block_end><if_stmt><not>issubclass(type(proxy) ProxyBackend)<block_start><raise>TypeError("Type %s is not a valid ProxyBackend"%type(proxy))<block_end>self.backend=proxy.wrap(self.backend)<block_end><def_stmt>_mutex self key<block_start><return>self._lock_registry.get(key)<block_end><class_stmt>_LockWrapper(object)<block_start>"""weakref-capable wrapper for threading.Lock"""<def_stmt>__init__ self<block_start>self.lock=threading.Lock()<block_end><def_stmt>acquire self wait=<true><block_start><return>self.lock.acquire(wait)<block_end><def_stmt>release self<block_start>self.lock.release()<block_end><block_end><def_stmt>_create_mutex self key<block_start>mutex=self.backend.get_mutex(key)<if_stmt>mutex<is><not><none><block_start><return>mutex<block_end><else_stmt><block_start><return>self._LockWrapper()<block_end><block_end><def_stmt>invalidate self hard=<true><block_start>"""Invalidate this :class:`.CacheRegion`. The default invalidation system works by setting a current timestamp (using ``time.time()``) representing the "minimum creation time" for a value. Any retrieved value whose creation time is prior to this timestamp is considered to be stale. It does not affect the data in the cache in any way, and is also local to this instance of :class:`.CacheRegion`. Once set, the invalidation time is honored by the :meth:`.CacheRegion.get_or_create`, :meth:`.CacheRegion.get_or_create_multi` and :meth:`.CacheRegion.get` methods. The method supports both "hard" and "soft" invalidation options. With "hard" invalidation, :meth:`.CacheRegion.get_or_create` will force an immediate regeneration of the value which all getters will wait for. With "soft" invalidation, subsequent getters will return the "old" value until the new one is available. Usage of "soft" invalidation requires that the region or the method is given a non-None expiration time. .. versionadded:: 0.3.0 :param hard: if True, cache values will all require immediate regeneration; dogpile logic won't be used. If False, the creation time of existing values will be pushed back before the expiration time so that a return+regen will be invoked. .. versionadded:: 0.5.1 """<line_sep>self.region_invalidator.invalidate(hard)<block_end><def_stmt>configure_from_config self config_dict prefix<block_start>"""Configure from a configuration dictionary and a prefix. Example:: local_region = make_region() memcached_region = make_region() # regions are ready to use for function # decorators, but not yet for actual caching # later, when config is available myconfig = { "cache.local.backend":"dogpile.cache.dbm", "cache.local.arguments.filename":"/path/to/dbmfile.dbm", "cache.memcached.backend":"dogpile.cache.pylibmc", "cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1", } local_region.configure_from_config(myconfig, "cache.local.") memcached_region.configure_from_config(myconfig, "cache.memcached.") """<line_sep>config_dict=coerce_string_conf(config_dict)<line_sep><return>self.configure(config_dict["%sbackend"%prefix] expiration_time=config_dict.get("%sexpiration_time"%prefix <none>) _config_argument_dict=config_dict _config_prefix="%sarguments."%prefix wrap=config_dict.get("%swrap"%prefix <none>) )<block_end>@memoized_property<def_stmt>backend self<block_start><raise>exception.RegionNotConfigured("No backend is configured on this region.")<block_end>@property<def_stmt>is_configured self<block_start>"""Return True if the backend has been configured via the :meth:`.CacheRegion.configure` method already. .. versionadded:: 0.5.1 """<line_sep><return>'backend'<in>self.__dict__<block_end><def_stmt>get self key expiration_time=<none> ignore_expiration=<false><block_start>"""Return a value from the cache, based on the given key. If the value is not present, the method returns the token ``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from ``None`` to distinguish between a cached value of ``None``. By default, the configured expiration time of the :class:`.CacheRegion`, or alternatively the expiration time supplied by the ``expiration_time`` argument, is tested against the creation time of the retrieved value versus the current time (as reported by ``time.time()``). If stale, the cached value is ignored and the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the expiration time check. .. versionchanged:: 0.3.0 :meth:`.CacheRegion.get` now checks the value's creation time against the expiration time, rather than returning the value unconditionally. The method also interprets the cached value in terms of the current "invalidation" time as set by the :meth:`.invalidate` method. If a value is present, but its creation time is older than the current invalidation time, the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the invalidation time check. .. versionadded:: 0.3.0 Support for the :meth:`.CacheRegion.invalidate` method. :param key: Key to be retrieved. While it's typical for a key to be a string, it is ultimately passed directly down to the cache backend, before being optionally processed by the key_mangler function, so can be of any type recognized by the backend or by the key_mangler function, if present. :param expiration_time: Optional expiration time value which will supersede that configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.3.0 :param ignore_expiration: if ``True``, the value is returned from the cache if present, regardless of configured expiration times or whether or not :meth:`.invalidate` was called. .. versionadded:: 0.3.0 """<if_stmt>self.key_mangler<block_start>key=self.key_mangler(key)<block_end>value=self.backend.get(key)<line_sep>value=self._unexpired_value_fn(expiration_time ignore_expiration)(value)<line_sep><return>value.payload<block_end><def_stmt>_unexpired_value_fn self expiration_time ignore_expiration<block_start><if_stmt>ignore_expiration<block_start><return><lambda>value:value<block_end><else_stmt><block_start><if_stmt>expiration_time<is><none><block_start>expiration_time=self.expiration_time<block_end>current_time=time.time()<def_stmt>value_fn value<block_start><if_stmt>value<is>NO_VALUE<block_start><return>value<block_end><elif_stmt>expiration_time<is><not><none><and>current_time-value.metadata["ct"]<g>expiration_time<block_start><return>NO_VALUE<block_end><elif_stmt>self.region_invalidator.is_invalidated(value.metadata["ct"])<block_start><return>NO_VALUE<block_end><else_stmt><block_start><return>value<block_end><block_end><return>value_fn<block_end><block_end><def_stmt>get_multi self keys expiration_time=<none> ignore_expiration=<false><block_start>"""Return multiple values from the cache, based on the given keys. Returns values as a list matching the keys given. E.g.:: values = region.get_multi(["one", "two", "three"]) To convert values to a dictionary, use ``zip()``:: keys = ["one", "two", "three"] values = region.get_multi(keys) dictionary = dict(zip(keys, values)) Keys which aren't present in the list are returned as the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False, but is separate from ``None`` to distinguish between a cached value of ``None``. By default, the configured expiration time of the :class:`.CacheRegion`, or alternatively the expiration time supplied by the ``expiration_time`` argument, is tested against the creation time of the retrieved value versus the current time (as reported by ``time.time()``). If stale, the cached value is ignored and the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the expiration time check. .. versionadded:: 0.5.0 """<if_stmt><not>keys<block_start><return>[]<block_end><if_stmt>self.key_mangler<block_start>keys=list(map(<lambda>key:self.key_mangler(key) keys))<block_end>backend_values=self.backend.get_multi(keys)<line_sep>_unexpired_value_fn=self._unexpired_value_fn(expiration_time ignore_expiration)<line_sep><return>[value.payload<if>value<is><not>NO_VALUE<else>value<for>value (_unexpired_value_fn(value)<for>value backend_values)]<block_end><def_stmt>get_or_create self key creator expiration_time=<none> should_cache_fn=<none><block_start>"""Return a cached value based on the given key. If the value does not exist or is considered to be expired based on its creation time, the given creation function may or may not be used to recreate the value and persist the newly generated value in the cache. Whether or not the function is used depends on if the *dogpile lock* can be acquired or not. If it can't, it means a different thread or process is already running a creation function for this key against the cache. When the dogpile lock cannot be acquired, the method will block if no previous value is available, until the lock is released and a new value available. If a previous value is available, that value is returned immediately without blocking. If the :meth:`.invalidate` method has been called, and the retrieved value's timestamp is older than the invalidation timestamp, the value is unconditionally prevented from being returned. The method will attempt to acquire the dogpile lock to generate a new value, or will wait until the lock is released to return the new value. .. versionchanged:: 0.3.0 The value is unconditionally regenerated if the creation time is older than the last call to :meth:`.invalidate`. :param key: Key to be retrieved. While it's typical for a key to be a string, it is ultimately passed directly down to the cache backend, before being optionally processed by the key_mangler function, so can be of any type recognized by the backend or by the key_mangler function, if present. :param creator: function which creates a new value. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive the value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. E.g.:: def dont_cache_none(value): return value is not None value = region.get_or_create("some key", create_value, should_cache_fn=dont_cache_none) Above, the function returns the value of create_value() if the cache is invalid, however if the return value is None, it won't be cached. .. versionadded:: 0.4.3 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` - applies :meth:`.get_or_create` to any function using a decorator. :meth:`.CacheRegion.get_or_create_multi` - multiple key/value version """<line_sep>orig_key=key<if_stmt>self.key_mangler<block_start>key=self.key_mangler(key)<block_end><def_stmt>get_value <block_start>value=self.backend.get(key)<if_stmt>(value<is>NO_VALUE<or>value.metadata['v']<ne>value_version<or>self.region_invalidator.is_hard_invalidated(value.metadata["ct"]))<block_start><raise>NeedRegenerationException()<block_end>ct=value.metadata["ct"]<if_stmt>self.region_invalidator.is_soft_invalidated(ct)<block_start>ct=time.time()-expiration_time-.0001<block_end><return>value.payload ct<block_end><def_stmt>gen_value <block_start>created_value=creator()<line_sep>value=self._value(created_value)<if_stmt><not>should_cache_fn<or>should_cache_fn(created_value)<block_start>self.backend.set(key value)<block_end><return>value.payload value.metadata["ct"]<block_end><if_stmt>expiration_time<is><none><block_start>expiration_time=self.expiration_time<block_end><if_stmt>(expiration_time<is><none><and>self.region_invalidator.was_soft_invalidated())<block_start><raise>exception.DogpileCacheException("Non-None expiration time required "<concat>"for soft invalidation")<block_end><if_stmt>expiration_time<eq>-1<block_start>expiration_time=<none><block_end><if_stmt>self.async_creation_runner<block_start><def_stmt>async_creator mutex<block_start><return>self.async_creation_runner(self orig_key creator mutex)<block_end><block_end><else_stmt><block_start>async_creator=<none><block_end><with_stmt>Lock(self._mutex(key) gen_value get_value expiration_time async_creator)<as>value<block_start><return>value<block_end><block_end><def_stmt>get_or_create_multi self keys creator expiration_time=<none> should_cache_fn=<none><block_start>"""Return a sequence of cached values based on a sequence of keys. The behavior for generation of values based on keys corresponds to that of :meth:`.Region.get_or_create`, with the exception that the ``creator()`` function may be asked to generate any subset of the given keys. The list of keys to be generated is passed to ``creator()``, and ``creator()`` should return the generated values as a sequence corresponding to the order of the keys. The method uses the same approach as :meth:`.Region.get_multi` and :meth:`.Region.set_multi` to get and set values from the backend. If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend` that modifies values, take note this function invokes ``.set_multi()`` for newly generated values using the same values it returns to the calling function. A correct implementation of ``.set_multi()`` will not modify values in-place on the submitted ``mapping`` dict. :param keys: Sequence of keys to be retrieved. :param creator: function which accepts a sequence of keys and returns a sequence of new values. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive each value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. .. versionadded:: 0.5.0 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` """<def_stmt>get_value key<block_start>value=values.get(key NO_VALUE)<if_stmt>(value<is>NO_VALUE<or>value.metadata['v']<ne>value_version<or>self.region_invalidator.is_hard_invalidated(value.metadata['v']))# dogpile.core understands a 0 here as # "the value is not available", e.g. # _has_value() will return False. <block_start><return>value.payload 0<block_end><else_stmt><block_start>ct=value.metadata["ct"]<if_stmt>self.region_invalidator.is_soft_invalidated(ct)<block_start>ct=time.time()-expiration_time-.0001<block_end><return>value.payload ct<block_end><block_end><def_stmt>gen_value <block_start><raise>NotImplementedError()<block_end><def_stmt>async_creator key mutex<block_start>mutexes[key]=mutex<block_end><if_stmt>expiration_time<is><none><block_start>expiration_time=self.expiration_time<block_end><if_stmt>(expiration_time<is><none><and>self.region_invalidator.was_soft_invalidated())<block_start><raise>exception.DogpileCacheException("Non-None expiration time required "<concat>"for soft invalidation")<block_end><if_stmt>expiration_time<eq>-1<block_start>expiration_time=<none><block_end>mutexes={}<line_sep>sorted_unique_keys=sorted(set(keys))<if_stmt>self.key_mangler<block_start>mangled_keys=[self.key_mangler(k)<for>k sorted_unique_keys]<block_end><else_stmt><block_start>mangled_keys=sorted_unique_keys<block_end>orig_to_mangled=dict(zip(sorted_unique_keys mangled_keys))<line_sep>values=dict(zip(mangled_keys self.backend.get_multi(mangled_keys)))<for_stmt>orig_key,mangled_key orig_to_mangled.items()<block_start><with_stmt>Lock(self._mutex(mangled_key) gen_value <lambda>:get_value(mangled_key) expiration_time async_creator=<lambda>mutex:async_creator(orig_key mutex))<block_start><pass><block_end><block_end><try_stmt><block_start><if_stmt>mutexes# sort the keys, the idea is to prevent deadlocks. # though haven't been able to simulate one anyway. <block_start>keys_to_get=sorted(mutexes)<line_sep>new_values=creator(*keys_to_get)<line_sep>values_w_created=dict((orig_to_mangled[k] self._value(v))<for>k,v zip(keys_to_get new_values))<if_stmt><not>should_cache_fn<block_start>self.backend.set_multi(values_w_created)<block_end><else_stmt><block_start>self.backend.set_multi(dict((k v)<for>k,v values_w_created.items()<if>should_cache_fn(v[0])))<block_end>values.update(values_w_created)<block_end><return>[values[orig_to_mangled[k]].payload<for>k keys]<block_end><finally_stmt><block_start><for_stmt>mutex mutexes.values()<block_start>mutex.release()<block_end><block_end><block_end><def_stmt>_value self value<block_start>"""Return a :class:`.CachedValue` given a value."""<line_sep><return>CachedValue(value {"ct":time.time() "v":value_version})<block_end><def_stmt>set self key value<block_start>"""Place a new value in the cache under the given key."""<if_stmt>self.key_mangler<block_start>key=self.key_mangler(key)<block_end>self.backend.set(key self._value(value))<block_end><def_stmt>set_multi self mapping<block_start>"""Place new values in the cache under the given keys. .. versionadded:: 0.5.0 """<if_stmt><not>mapping<block_start><return><block_end><if_stmt>self.key_mangler<block_start>mapping=dict((self.key_mangler(k) self._value(v))<for>k,v mapping.items())<block_end><else_stmt><block_start>mapping=dict((k self._value(v))<for>k,v mapping.items())<block_end>self.backend.set_multi(mapping)<block_end><def_stmt>delete self key<block_start>"""Remove a value from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) """<if_stmt>self.key_mangler<block_start>key=self.key_mangler(key)<block_end>self.backend.delete(key)<block_end><def_stmt>delete_multi self keys<block_start>"""Remove multiple values from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) .. versionadded:: 0.5.0 """<if_stmt>self.key_mangler<block_start>keys=list(map(<lambda>key:self.key_mangler(key) keys))<block_end>self.backend.delete_multi(keys)<block_end><def_stmt>cache_on_arguments self namespace=<none> expiration_time=<none> should_cache_fn=<none> to_str=compat.string_type function_key_generator=<none><block_start>"""A function decorator that will cache the return value of the function using a key derived from the function itself and its arguments. The decorator internally makes use of the :meth:`.CacheRegion.get_or_create` method to access the cache and conditionally call the function. See that method for additional behavioral details. E.g.:: @someregion.cache_on_arguments() def generate_something(x, y): return somedatabase.query(x, y) The decorated function can then be called normally, where data will be pulled from the cache region unless a new value is needed:: result = generate_something(5, 6) The function is also given an attribute ``invalidate()``, which provides for invalidation of the value. Pass to ``invalidate()`` the same arguments you'd pass to the function itself to represent a particular value:: generate_something.invalidate(5, 6) Another attribute ``set()`` is added to provide extra caching possibilities relative to the function. This is a convenience method for :meth:`.CacheRegion.set` which will store a given value directly without calling the decorated function. The value to be cached is passed as the first argument, and the arguments which would normally be passed to the function should follow:: generate_something.set(3, 5, 6) The above example is equivalent to calling ``generate_something(5, 6)``, if the function were to produce the value ``3`` as the value to be cached. .. versionadded:: 0.4.1 Added ``set()`` method to decorated function. Similar to ``set()`` is ``refresh()``. This attribute will invoke the decorated function and populate a new value into the cache with the new value, as well as returning that value:: newvalue = generate_something.refresh(5, 6) .. versionadded:: 0.5.0 Added ``refresh()`` method to decorated function. Lastly, the ``get()`` method returns either the value cached for the given key, or the token ``NO_VALUE`` if no such key exists:: value = generate_something.get(5, 6) .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. The default key generation will use the name of the function, the module name for the function, the arguments passed, as well as an optional "namespace" parameter in order to generate a cache key. Given a function ``one`` inside the module ``myapp.tools``:: @region.cache_on_arguments(namespace="foo") def one(a, b): return a + b Above, calling ``one(3, 4)`` will produce a cache key as follows:: myapp.tools:one|foo|3 4 The key generator will ignore an initial argument of ``self`` or ``cls``, making the decorator suitable (with caveats) for use with instance or class methods. Given the example:: class MyClass(object): @region.cache_on_arguments(namespace="foo") def one(self, a, b): return a + b The cache key above for ``MyClass().one(3, 4)`` will again produce the same cache key of ``myapp.tools:one|foo|3 4`` - the name ``self`` is skipped. The ``namespace`` parameter is optional, and is used normally to disambiguate two functions of the same name within the same module, as can occur when decorating instance or class methods as below:: class MyClass(object): @region.cache_on_arguments(namespace='MC') def somemethod(self, x, y): "" class MyOtherClass(object): @region.cache_on_arguments(namespace='MOC') def somemethod(self, x, y): "" Above, the ``namespace`` parameter disambiguates between ``somemethod`` on ``MyClass`` and ``MyOtherClass``. Python class declaration mechanics otherwise prevent the decorator from having awareness of the ``MyClass`` and ``MyOtherClass`` names, as the function is received by the decorator before it becomes an instance method. The function key generation can be entirely replaced on a per-region basis using the ``function_key_generator`` argument present on :func:`.make_region` and :class:`.CacheRegion`. If defaults to :func:`.function_key_generator`. :param namespace: optional string argument which will be established as part of the cache key. This may be needed to disambiguate functions of the same name within the same source file, such as those associated with classes - note that the decorator itself can't see the parent class on a function as the class is being declared. :param expiration_time: if not None, will override the normal expiration time. May be specified as a callable, taking no arguments, that returns a value to be used as the ``expiration_time``. This callable will be called whenever the decorated function itself is called, in caching or retrieving. Thus, this can be used to determine a *dynamic* expiration time for the cached function result. Example use cases include "cache the result until the end of the day, week or time period" and "cache until a certain date or time passes". .. versionchanged:: 0.5.0 ``expiration_time`` may be passed as a callable to :meth:`.CacheRegion.cache_on_arguments`. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`. .. versionadded:: 0.4.3 :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_key_generator: a function that will produce a "cache key". This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` """<line_sep>expiration_time_is_callable=compat.callable(expiration_time)<if_stmt>function_key_generator<is><none><block_start>function_key_generator=self.function_key_generator<block_end><def_stmt>decorator fn<block_start><if_stmt>to_str<is>compat.string_type# backwards compatible <block_start>key_generator=function_key_generator(namespace fn)<block_end><else_stmt><block_start>key_generator=function_key_generator(namespace fn to_str=to_str)<block_end>@wraps(fn)<def_stmt>decorate *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep>@wraps(fn)<def_stmt>creator <block_start><return>fn(*arg **kw)<block_end>timeout=expiration_time()<if>expiration_time_is_callable<else>expiration_time<line_sep><return>self.get_or_create(key creator timeout should_cache_fn)<block_end><def_stmt>invalidate *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep>self.delete(key)<block_end><def_stmt>set_ value *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep>self.set(key value)<block_end><def_stmt>get *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep><return>self.get(key)<block_end><def_stmt>refresh *arg **kw<block_start>key=key_generator(*arg **kw)<line_sep>value=fn(*arg **kw)<line_sep>self.set(key value)<line_sep><return>value<block_end>decorate.set=set_<line_sep>decorate.invalidate=invalidate<line_sep>decorate.refresh=refresh<line_sep>decorate.get=get<line_sep>decorate.original=fn<line_sep><return>decorate<block_end><return>decorator<block_end><def_stmt>cache_multi_on_arguments self namespace=<none> expiration_time=<none> should_cache_fn=<none> asdict=<false> to_str=compat.string_type function_multi_key_generator=<none><block_start>"""A function decorator that will cache multiple return values from the function using a sequence of keys derived from the function itself and the arguments passed to it. This method is the "multiple key" analogue to the :meth:`.CacheRegion.cache_on_arguments` method. Example:: @someregion.cache_multi_on_arguments() def generate_something(*keys): return [ somedatabase.query(key) for key in keys ] The decorated function can be called normally. The decorator will produce a list of cache keys using a mechanism similar to that of :meth:`.CacheRegion.cache_on_arguments`, combining the name of the function with the optional namespace and with the string form of each key. It will then consult the cache using the same mechanism as that of :meth:`.CacheRegion.get_multi` to retrieve all current values; the originally passed keys corresponding to those values which aren't generated or need regeneration will be assembled into a new argument list, and the decorated function is then called with that subset of arguments. The returned result is a list:: result = generate_something("key1", "key2", "key3") The decorator internally makes use of the :meth:`.CacheRegion.get_or_create_multi` method to access the cache and conditionally call the function. See that method for additional behavioral details. Unlike the :meth:`.CacheRegion.cache_on_arguments` method, :meth:`.CacheRegion.cache_multi_on_arguments` works only with a single function signature, one which takes a simple list of keys as arguments. Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function is also provided with a ``set()`` method, which here accepts a mapping of keys and values to set in the cache:: generate_something.set({"k1": "value1", "k2": "value2", "k3": "value3"}) ...an ``invalidate()`` method, which has the effect of deleting the given sequence of keys using the same mechanism as that of :meth:`.CacheRegion.delete_multi`:: generate_something.invalidate("k1", "k2", "k3") ...a ``refresh()`` method, which will call the creation function, cache the new values, and return them:: values = generate_something.refresh("k1", "k2", "k3") ...and a ``get()`` method, which will return values based on the given arguments:: values = generate_something.get("k1", "k2", "k3") .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments` have the same meaning as those passed to :meth:`.CacheRegion.cache_on_arguments`. :param namespace: optional string argument which will be established as part of each cache key. :param expiration_time: if not None, will override the normal expiration time. May be passed as an integer or a callable. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create_multi`. This function is given a value as returned by the creator, and only if it returns True will that value be placed in the cache. :param asdict: if ``True``, the decorated function should return its result as a dictionary of keys->values, and the final result of calling the decorated function will also be a dictionary. If left at its default value of ``False``, the decorated function should return its result as a list of values, and the final result of calling the decorated function will also be a list. When ``asdict==True`` if the dictionary returned by the decorated function is missing keys, those keys will not be cached. :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_multi_key_generator: a function that will produce a list of keys. This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` :meth:`.CacheRegion.get_or_create_multi` """<line_sep>expiration_time_is_callable=compat.callable(expiration_time)<if_stmt>function_multi_key_generator<is><none><block_start>function_multi_key_generator=self.function_multi_key_generator<block_end><def_stmt>decorator fn<block_start>key_generator=function_multi_key_generator(namespace fn to_str=to_str)<line_sep>@wraps(fn)<def_stmt>decorate *arg **kw<block_start>cache_keys=arg<line_sep>keys=key_generator(*arg **kw)<line_sep>key_lookup=dict(zip(keys cache_keys))<line_sep>@wraps(fn)<def_stmt>creator *keys_to_create<block_start><return>fn(*[key_lookup[k]<for>k keys_to_create])<block_end>timeout=expiration_time()<if>expiration_time_is_callable<else>expiration_time<if_stmt>asdict<block_start><def_stmt>dict_create *keys<block_start>d_values=creator(*keys)<line_sep><return>[d_values.get(key_lookup[k] NO_VALUE)<for>k keys]<block_end><def_stmt>wrap_cache_fn value<block_start><if_stmt>value<is>NO_VALUE<block_start><return><false><block_end><elif_stmt><not>should_cache_fn<block_start><return><true><block_end><else_stmt><block_start><return>should_cache_fn(value)<block_end><block_end>result=self.get_or_create_multi(keys dict_create timeout wrap_cache_fn)<line_sep>result=dict((k v)<for>k,v zip(cache_keys result)<if>v<is><not>NO_VALUE)<block_end><else_stmt><block_start>result=self.get_or_create_multi(keys creator timeout should_cache_fn)<block_end><return>result<block_end><def_stmt>invalidate *arg<block_start>keys=key_generator(*arg)<line_sep>self.delete_multi(keys)<block_end><def_stmt>set_ mapping<block_start>keys=list(mapping)<line_sep>gen_keys=key_generator(*keys)<line_sep>self.set_multi(dict((gen_key mapping[key])<for>gen_key,key zip(gen_keys keys)))<block_end><def_stmt>get *arg<block_start>keys=key_generator(*arg)<line_sep><return>self.get_multi(keys)<block_end><def_stmt>refresh *arg<block_start>keys=key_generator(*arg)<line_sep>values=fn(*arg)<if_stmt>asdict<block_start>self.set_multi(dict(zip(keys [values[a]<for>a arg])))<line_sep><return>values<block_end><else_stmt><block_start>self.set_multi(dict(zip(keys values)))<line_sep><return>values<block_end><block_end>decorate.set=set_<line_sep>decorate.invalidate=invalidate<line_sep>decorate.refresh=refresh<line_sep>decorate.get=get<line_sep><return>decorate<block_end><return>decorator<block_end><block_end><def_stmt>make_region *arg **kw<block_start>"""Instantiate a new :class:`.CacheRegion`. Currently, :func:`.make_region` is a passthrough to :class:`.CacheRegion`. See that class for constructor arguments. """<line_sep><return>CacheRegion(*arg **kw)<block_end>
<import_stmt>os<import_stmt>unittest<import_stmt>torch<import_stmt>torch.distributed<as>dist<import_from_stmt>torch.multiprocessing Process<import_stmt>torch.nn<as>nn<import_from_stmt>machina.optims DistributedAdamW<def_stmt>init_processes rank world_size function backend='tcp'<block_start>os.environ['MASTER_ADDR']='127.0.0.1'<line_sep>os.environ['MASTER_PORT']='29500'<line_sep>dist.init_process_group(backend rank=rank world_size=world_size)<line_sep>function(rank world_size)<block_end><class_stmt>TestDistributedAdamW(unittest.TestCase)<block_start><def_stmt>test_step self<block_start><def_stmt>_run rank world_size<block_start>model=nn.Linear(10 1)<line_sep>optimizer=DistributedAdamW(model.parameters())<line_sep>optimizer.zero_grad()<line_sep>loss=model(torch.ones(10).float())<line_sep>loss.backward()<line_sep>optimizer.step()<block_end>processes=[]<line_sep>world_size=4<for_stmt>rank range(world_size)<block_start>p=Process(target=init_processes args=(rank world_size _run))<line_sep>p.start()<line_sep>processes.append(p)<block_end><for_stmt>p processes<block_start>p.join()<block_end><block_end><block_end>
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_stmt>torch<import_from_stmt>neuralcompression.functional soft_round soft_round_inverse<def_stmt>test_soft_round_inverse <block_start>x=torch.linspace(-2.0 2.0 50)<line_sep>torch.testing.assert_close(x soft_round_inverse(x alpha=1e-13) )<line_sep>x=torch.tensor([-1.25 -0.75 0.75 1.25])<line_sep>torch.testing.assert_close(x soft_round_inverse(soft_round(x alpha=2.0) alpha=2.0) )<for_stmt>offset range(-5 5)<block_start>x=torch.linspace(offset+0.001 offset+0.999 100)<line_sep>torch.testing.assert_close(torch.ceil(x)-0.5 soft_round_inverse(x alpha=5000.0) atol=0.001 rtol=0.002 )<block_end><block_end>
# Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for car_layers."""<import_from_stmt>lingvo compat<as>tf<import_from_stmt>lingvo.core py_utils<import_from_stmt>lingvo.core test_utils<import_from_stmt>lingvo.tasks.car car_layers<class_stmt>CarLayersTest(test_utils.TestCase)<block_start><def_stmt>_testNestedOutShape self p input_shape expected_shape<block_start>batch_size,num_points,_=input_shape<line_sep>g=tf.Graph()<with_stmt>g.as_default()<block_start>net=p.Instantiate()<line_sep>input_data=py_utils.NestedMap(points=tf.random.uniform(input_shape[:-1]+(3 )) features=tf.random.uniform(input_shape) padding=tf.zeros((batch_size num_points) dtype=tf.float32) label=tf.random.uniform((batch_size ) minval=0 maxval=16 dtype=tf.int32))<line_sep>result=net.FPropDefaultTheta(input_data)<block_end><with_stmt>self.session(graph=g)<block_start>self.evaluate(tf.global_variables_initializer())<line_sep>np_result=self.evaluate(result)<block_end>grouped_points_result=np_result.grouped_points<line_sep>self.assertEqual(grouped_points_result.features.shape expected_shape.grouped_points.features)<line_sep>self.assertEqual(grouped_points_result.points.shape expected_shape.grouped_points.points)<line_sep>self.assertEqual(grouped_points_result.padding.shape expected_shape.grouped_points.padding)<line_sep>query_points_result=np_result.query_points<line_sep>self.assertEqual(query_points_result.points.shape expected_shape.query_points.points)<line_sep>self.assertEqual(query_points_result.padding.shape expected_shape.query_points.padding)<block_end><def_stmt>testSamplingAndGrouping self<block_start><for_stmt>num_points [1024 256]<block_start><for_stmt>input_dims [3 6 9]<block_start><for_stmt>group_size [32 64]<block_start>p=car_layers.SamplingAndGroupingLayer.Params().Set(name='SampleGroupTest' num_samples=256 ball_radius=0.2 group_size=group_size sample_neighbors_uniformly=<true>)<line_sep>grouped_points_shape=py_utils.NestedMap(features=(8 256 group_size input_dims) points=(8 256 group_size 3) padding=(8 256 group_size))<line_sep>query_points_shape=py_utils.NestedMap(points=(8 256 3) padding=(8 256))<line_sep>expected_shape=py_utils.NestedMap({'grouped_points':grouped_points_shape 'query_points':query_points_shape})<line_sep>self._testNestedOutShape(p (8 num_points input_dims) expected_shape)<block_end><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
<import_stmt>oblate<import_stmt>numpy<as>np<import_stmt>pytest<line_sep># TODO!
<import_from_stmt>CommonServerPython *<line_sep>''' IMPORTS '''<import_stmt>re<import_stmt>requests<line_sep># Disable insecure warnings requests.packages.urllib3.disable_warnings()<line_sep>''' GLOBALS/PARAMS '''<line_sep>VENDOR='Have I Been Pwned? V2'<line_sep>MAX_RETRY_ALLOWED=demisto.params().get('max_retry_time' -1)<line_sep>API_KEY=demisto.params().get('api_key')<line_sep>USE_SSL=<not>demisto.params().get('insecure' <false>)<line_sep>BASE_URL='https://haveibeenpwned.com/api/v3'<line_sep>HEADERS={'hibp-api-key':API_KEY 'user-agent':'DBOT-API' 'Content-Type':'application/json' 'Accept':'application/json'}<line_sep>DEFAULT_DBOT_SCORE_EMAIL=2<if>demisto.params().get('default_dbot_score_email')<eq>'SUSPICIOUS'<else>3<line_sep>DEFAULT_DBOT_SCORE_DOMAIN=2<if>demisto.params().get('default_dbot_score_domain')<eq>'SUSPICIOUS'<else>3<line_sep>SUFFIXES={"email":'/breachedaccount/' "domain":'/breaches?domain=' "username":'/breachedaccount/' "paste":'/pasteaccount/' "email_truncate_verified":'?truncateResponse=false&includeUnverified=true' "domain_truncate_verified":'&truncateResponse=false&includeUnverified=true' "username_truncate_verified":'?truncateResponse=false&includeUnverified=true'}<line_sep>RETRIES_END_TIME=datetime.min<line_sep>''' HELPER FUNCTIONS '''<def_stmt>http_request method url_suffix params=<none> data=<none><block_start><while_stmt><true><block_start>res=requests.request(method BASE_URL+url_suffix verify=USE_SSL params=params data=data headers=HEADERS)<if_stmt>res.status_code<ne>429# Rate limit response code <block_start><break><block_end><if_stmt>datetime.now()<g>RETRIES_END_TIME<block_start>return_error('Max retry time has exceeded.')<block_end>wait_regex=re.search(r'\d+' res.json()['message'])<if_stmt>wait_regex<block_start>wait_amount=wait_regex.group()<block_end><else_stmt><block_start>demisto.error('failed extracting wait time will use default (5). Res body: {}'.format(res.text))<line_sep>wait_amount=5<block_end><if_stmt>datetime.now()+timedelta(seconds=int(wait_amount))<g>RETRIES_END_TIME<block_start>return_error('Max retry time has exceeded.')<block_end>time.sleep(int(wait_amount))<block_end><if_stmt>res.status_code<eq>404<block_start><return><none><block_end><if_stmt><not>res.status_code<eq>200<block_start><if_stmt><not>res.status_code<eq>401<block_start>demisto.error('Error in API call to Pwned Integration [%d]. Full text: %s'%(res.status_code res.text))<block_end>return_error('Error in API call to Pwned Integration [%d] - %s'%(res.status_code res.reason))<line_sep><return><none><block_end><return>res.json()<block_end><def_stmt>html_description_to_human_readable breach_description<block_start>""" Converting from html description to hr :param breach_description: Description of breach from API response :return: Description string that altered HTML urls to clickable urls for better readability in war-room """<line_sep>html_link_pattern=re.compile('<a href="(.+?)"(.+?)>(.+?)</a>')<line_sep>patterns_found=html_link_pattern.findall(breach_description)<for_stmt>link patterns_found<block_start>html_actual_address=link[0]<line_sep>html_readable_name=link[2]<line_sep>link_from_desc='['+html_readable_name+']'+'('+html_actual_address+')'<line_sep>breach_description=re.sub(html_link_pattern link_from_desc breach_description count=1)<block_end><return>breach_description<block_end><def_stmt>data_to_markdown query_type query_arg api_res api_paste_res=<none><block_start>records_found=<false><line_sep>md='### Have I Been Pwned query for '+query_type.lower()+': *'+query_arg+'*\n'<if_stmt>api_res<block_start>records_found=<true><for_stmt>breach api_res<block_start>verified_breach='Verified'<if>breach['IsVerified']<else>'Unverified'<line_sep>md<augadd>'#### '+breach['Title']+' ('+breach['Domain']+'): '+str(breach['PwnCount'])+' records breached ['+verified_breach+' breach]\n'<line_sep>md<augadd>'Date: **'+breach['BreachDate']+'**\n\n'<line_sep>md<augadd>html_description_to_human_readable(breach['Description'])+'\n'<line_sep>md<augadd>'Data breached: **'+','.join(breach['DataClasses'])+'**\n'<block_end><block_end><if_stmt>api_paste_res<block_start>records_found=<true><line_sep>pastes_list=[]<for_stmt>paste_breach api_paste_res<block_start>paste_entry={'Source':paste_breach['Source'] 'Title':paste_breach['Title'] 'ID':paste_breach['Id'] 'Date':'' 'Amount of emails in paste':str(paste_breach['EmailCount'])}<if_stmt>paste_breach['Date']<block_start>paste_entry['Date']=paste_breach['Date'].split('T')[0]<block_end>pastes_list.append(paste_entry)<block_end>md<augadd>tableToMarkdown('The email address was found in the following "Pastes":' pastes_list ['ID' 'Title' 'Date' 'Source' 'Amount of emails in paste'])<block_end><if_stmt><not>records_found<block_start>md<augadd>'No records found'<block_end><return>md<block_end><def_stmt>create_dbot_score_dictionary indicator_value indicator_type dbot_score<block_start><return>{'Indicator':indicator_value 'Type':indicator_type 'Vendor':VENDOR 'Score':dbot_score}<block_end><def_stmt>create_context_entry context_type context_main_value comp_sites comp_pastes malicious_score<block_start>context_dict=dict()# dict <if_stmt>context_type<eq>'email'<block_start>context_dict['Address']=context_main_value<block_end><else_stmt><block_start>context_dict['Name']=context_main_value<block_end>context_dict['Pwned-V2']={'Compromised':{'Vendor':VENDOR 'Reporters':', '.join(comp_sites+comp_pastes)}}<if_stmt>malicious_score<eq>3<block_start>context_dict['Malicious']=add_malicious_to_context(context_type)<block_end><return>context_dict<block_end><def_stmt>add_malicious_to_context malicious_type<block_start><return>{'Vendor':VENDOR 'Description':'The '+malicious_type+' has been compromised'}<block_end><def_stmt>email_to_entry_context email api_email_res api_paste_res<block_start>dbot_score=0<line_sep>comp_email=dict()# type: dict comp_sites=sorted([item['Title']<for>item api_email_res])<line_sep>comp_pastes=sorted(set(item['Source']<for>item api_paste_res))<if_stmt>len(comp_sites)<g>0<block_start>dbot_score=DEFAULT_DBOT_SCORE_EMAIL<line_sep>email_context=create_context_entry('email' email comp_sites comp_pastes DEFAULT_DBOT_SCORE_EMAIL)<line_sep>comp_email[outputPaths['email']]=email_context<block_end>comp_email['DBotScore']=create_dbot_score_dictionary(email 'email' dbot_score)<line_sep><return>comp_email<block_end><def_stmt>domain_to_entry_context domain api_res<block_start>comp_sites=[item['Title']<for>item api_res]<line_sep>comp_sites=sorted(comp_sites)<line_sep>comp_domain=dict()# type: dict dbot_score=0<if_stmt>len(comp_sites)<g>0<block_start>dbot_score=DEFAULT_DBOT_SCORE_DOMAIN<line_sep>domain_context=create_context_entry('domain' domain comp_sites [] DEFAULT_DBOT_SCORE_DOMAIN)<line_sep>comp_domain[outputPaths['domain']]=domain_context<block_end>comp_domain['DBotScore']=create_dbot_score_dictionary(domain 'domain' dbot_score)<line_sep><return>comp_domain<block_end><def_stmt>set_retry_end_time <block_start><global>RETRIES_END_TIME<if_stmt>MAX_RETRY_ALLOWED<ne>-1<block_start>RETRIES_END_TIME=datetime.now()+timedelta(seconds=int(MAX_RETRY_ALLOWED))<block_end><block_end>''' COMMANDS + REQUESTS FUNCTIONS '''<def_stmt>test_module args_dict<block_start>""" If the http request was successful the test will return OK :return: 3 arrays of outputs """<line_sep>http_request('GET' SUFFIXES.get("username" '')+'test')<line_sep><return>['ok'] [<none>] [<none>]<block_end><def_stmt>pwned_email_command args_dict<block_start>""" Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the email list is needed :return: 3 arrays of outputs """<line_sep>email_list=argToList(args_dict.get('email' ''))<line_sep>api_email_res_list,api_paste_res_list=pwned_email(email_list)<line_sep>md_list=[]<line_sep>ec_list=[]<for_stmt>email,api_email_res,api_paste_res zip(email_list api_email_res_list api_paste_res_list)<block_start>md_list.append(data_to_markdown('Email' email api_email_res api_paste_res))<line_sep>ec_list.append(email_to_entry_context(email api_email_res<or>[] api_paste_res<or>[]))<block_end><return>md_list ec_list api_email_res_list<block_end><def_stmt>pwned_email email_list<block_start>""" Executing the http requests :param email_list: the email list that needed for the http requests :return: 2 arrays of http requests outputs """<line_sep>api_email_res_list=[]<line_sep>api_paste_res_list=[]<for_stmt>email email_list<block_start>email_suffix=SUFFIXES.get("email")+email+SUFFIXES.get("email_truncate_verified")<line_sep>paste_suffix=SUFFIXES.get("paste")+email<line_sep>api_email_res_list.append(http_request('GET' url_suffix=email_suffix))<line_sep>api_paste_res_list.append(http_request('GET' url_suffix=paste_suffix))<block_end><return>api_email_res_list api_paste_res_list<block_end><def_stmt>pwned_domain_command args_dict<block_start>""" Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the domain list is needed :return: 3 arrays of outputs """<line_sep>domain_list=argToList(args_dict.get('domain' ''))<line_sep>api_res_list=pwned_domain(domain_list)<line_sep>md_list=[]<line_sep>ec_list=[]<for_stmt>domain,api_res zip(domain_list api_res_list)<block_start>md_list.append(data_to_markdown('Domain' domain api_res))<line_sep>ec_list.append(domain_to_entry_context(domain api_res<or>[]))<block_end><return>md_list ec_list api_res_list<block_end><def_stmt>pwned_domain domain_list<block_start>""" Executing the http request :param domain_list: the domains list that needed for the http requests :return: an array of http requests outputs """<line_sep>api_res_list=[]<for_stmt>domain domain_list<block_start>suffix=SUFFIXES.get("domain")+domain+SUFFIXES.get("domain_truncate_verified")<line_sep>api_res_list.append(http_request('GET' url_suffix=suffix))<block_end><return>api_res_list<block_end><def_stmt>pwned_username_command args_dict<block_start>""" Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the username list is needed :return: 3 arrays of outputs """<line_sep>username_list=argToList(args_dict.get('username' ''))<line_sep>api_res_list=pwned_username(username_list)<line_sep>md_list=[]<line_sep>ec_list=[]<for_stmt>username,api_res zip(username_list api_res_list)<block_start>md_list.append(data_to_markdown('Username' username api_res))<line_sep>ec_list.append(domain_to_entry_context(username api_res<or>[]))<block_end><return>md_list ec_list api_res_list<block_end><def_stmt>pwned_username username_list<block_start>""" Executing the http request :param username_list: the username list that needed for the http requests :return: an array of http requests outputs """<line_sep>api_res_list=[]<for_stmt>username username_list<block_start>suffix=SUFFIXES.get("username")+username+SUFFIXES.get("username_truncate_verified")<line_sep>api_res_list.append(http_request('GET' url_suffix=suffix))<block_end><return>api_res_list<block_end>command=demisto.command()<line_sep>LOG('Command being called is: {}'.format(command))<try_stmt><block_start>handle_proxy()<line_sep>set_retry_end_time()<line_sep>commands={'test-module':test_module 'email':pwned_email_command 'pwned-email':pwned_email_command 'domain':pwned_domain_command 'pwned-domain':pwned_domain_command 'pwned-username':pwned_username_command}<if_stmt>command<in>commands<block_start>md_list,ec_list,api_email_res_list=commands[command](demisto.args())<for_stmt>md,ec,api_paste_res zip(md_list ec_list api_email_res_list)<block_start>return_outputs(md ec api_paste_res)<block_end><block_end><block_end># Log exceptions <except_stmt>Exception<as>e<block_start>return_error(str(e))<block_end>
<import_from_stmt>moshmosh.extension Extension<import_from_stmt>moshmosh.ast_compat ast<class_stmt>PipelineVisitor(ast.NodeTransformer)<block_start>""" `a | f -> f(a)`, recursively """<def_stmt>__init__ self activation<block_start>self.activation=activation<block_end><def_stmt>visit_BinOp self n:ast.BinOp<block_start><if_stmt>n.lineno<in>self.activation<and>isinstance(n.op ast.BitOr)<block_start><return>ast.Call(self.visit(n.right) [self.visit(n.left)] [] lineno=n.lineno col_offset=n.col_offset)<block_end><return>self.generic_visit(n)<block_end><block_end><class_stmt>Pipeline(Extension)<block_start>identifier="pipeline"<def_stmt>__init__ self<block_start>self.visitor=PipelineVisitor(self.activation)<block_end><def_stmt>rewrite_ast self node<block_start><return>self.visitor.visit(node)<block_end><block_end>
<import_from_stmt>itertools product<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>alibi_detect.utils.discretizer Discretizer<line_sep>x=np.random.rand(10 4)<line_sep>n_features=x.shape[1]<line_sep>feature_names=[str(_)<for>_ range(n_features)]<line_sep>categorical_features=[[] [1 3]]<line_sep>percentiles=[list(np.arange(25 100 25)) list(np.arange(10 100 10))]<line_sep>tests=list(product(categorical_features percentiles))<line_sep>n_tests=len(tests)<line_sep>@pytest.fixture<def_stmt>cats_and_percentiles request<block_start>cat,perc=tests[request.param]<line_sep><return>cat perc<block_end>@pytest.mark.parametrize('cats_and_percentiles' list(range(n_tests)) indirect=<true>)<def_stmt>test_discretizer cats_and_percentiles<block_start>cat,perc=cats_and_percentiles<line_sep>disc=Discretizer(x cat feature_names perc)<line_sep>to_disc=list(disc.names.keys())<assert_stmt>len(to_disc)<eq>(x.shape[1]-len(cat))<line_sep>x_disc=disc.discretize(x)<for_stmt>k,v disc.names.items()<block_start><assert_stmt>len(v)<le>len(perc)+1<assert_stmt>callable(disc.lambdas[k])<assert_stmt>(x_disc[: k].min()<eq>0).all()<assert_stmt>(x_disc[: k].max()<eq>len(perc)).all()<block_end><for_stmt>i range(x.shape[1])<block_start><if_stmt>i<not><in>to_disc<block_start><assert_stmt>(x_disc[: i]<eq>x[: i]).all()<block_end><block_end><block_end>
<import_stmt>logging<import_stmt>unittest<import_from_stmt>pyinstrument Profiler<import_from_stmt>nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils get_test_nuplan_scenario<import_from_stmt>nuplan.planning.simulation.history.simulation_history_buffer SimulationHistoryBuffer<import_from_stmt>nuplan.planning.simulation.observation.idm_agents IDMAgents<import_from_stmt>nuplan.planning.simulation.simulation_time_controller.simulation_iteration SimulationIteration<line_sep>logger=logging.getLogger(__name__)<line_sep>logging.basicConfig(level=logging.INFO)<class_stmt>TestProfileIDM(unittest.TestCase)<block_start>""" Profiling test for IDM agents. """<def_stmt>setUp self<arrow><none><block_start>""" Inherited, see super class. """<line_sep>self.n_repeat_trials=1<line_sep>self.display_results=<true><line_sep>self.scenario=get_test_nuplan_scenario()<block_end><def_stmt>test_profile_idm_agent_observation self<arrow><none><block_start>"""Profile IDMAgents."""<line_sep>profiler=Profiler(interval=0.0001)<line_sep>profiler.start()<line_sep># How many times to repeat runtime test <for_stmt>_ range(self.n_repeat_trials)<block_start>observation=IDMAgents(target_velocity=10 min_gap_to_lead_agent=0.5 headway_time=1.5 accel_max=1.0 decel_max=2.0 scenario=self.scenario )<for_stmt>step range(self.scenario.get_number_of_iterations()-1)<block_start>iteration=SimulationIteration(time_point=self.scenario.get_time_point(step) index=step)<line_sep>next_iteration=SimulationIteration(time_point=self.scenario.get_time_point(step+1) index=step+1)<line_sep>buffer=SimulationHistoryBuffer.initialize_from_list(1 [self.scenario.get_ego_state_at_iteration(step)] [self.scenario.get_tracked_objects_at_iteration(step)] next_iteration.time_point.time_s-iteration.time_point.time_s )<line_sep>observation.update_observation(iteration next_iteration buffer)<block_end><block_end>profiler.stop()<if_stmt>self.display_results<block_start>logger.info(profiler.output_text(unicode=<true> color=<true>))<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_stmt>os sys<import_from_stmt>os.path dirname<as>d<import_from_stmt>os.path abspath join<line_sep>root_dir=d(d(d(abspath(__file__))))<line_sep>sys.path.append(root_dir)<import_from_stmt>openprompt.data_utils.conditional_generation_dataset PROCESSORS<line_sep>base_path=os.path.join(root_dir "datasets/CondGen")<def_stmt>test_WebNLGProcessor <block_start>dataset_name="webnlg_2017"<line_sep>dataset_path=os.path.join(base_path dataset_name)<line_sep>processor=PROCESSORS[dataset_name.lower()]()<line_sep>train_dataset=processor.get_train_examples(dataset_path)<line_sep>valid_dataset=processor.get_train_examples(dataset_path)<line_sep>test_dataset=processor.get_test_examples(dataset_path)<assert_stmt>len(train_dataset)<eq>18025<assert_stmt>len(valid_dataset)<eq>18025<assert_stmt>len(test_dataset)<eq>4928<assert_stmt>test_dataset[0].text_a<eq>" | Abilene_Regional_Airport : cityServed : Abilene,_Texas"<assert_stmt>test_dataset[0].text_b<eq>""<assert_stmt>test_dataset[0].tgt_text<eq>"Abilene, Texas is served by the Abilene regional airport."<block_end>
<import_from_stmt>PhysicsTools.Heppy.analyzers.core.Analyzer Analyzer<import_from_stmt>PhysicsTools.Heppy.analyzers.core.AutoHandle AutoHandle<import_from_stmt>PhysicsTools.Heppy.physicsobjects.Tau Tau<import_from_stmt>PhysicsTools.HeppyCore.utils.deltar deltaR matchObjectCollection3<import_stmt>PhysicsTools.HeppyCore.framework.config<as>cfg<class_stmt>TauAnalyzer(Analyzer)<block_start><def_stmt>__init__ self cfg_ana cfg_comp looperName<block_start>super(TauAnalyzer self).__init__(cfg_ana cfg_comp looperName)<block_end>#---------------------------------------- # DECLARATION OF HANDLES OF LEPTONS STUFF #---------------------------------------- <def_stmt>declareHandles self<block_start>super(TauAnalyzer self).declareHandles()<line_sep>self.handles['taus']=AutoHandle(('slimmedTaus' '') 'std::vector<pat::Tau>')<block_end><def_stmt>beginLoop self setup<block_start>super(TauAnalyzer self).beginLoop(setup)<line_sep>self.counters.addCounter('events')<line_sep>count=self.counters.counter('events')<line_sep>count.register('all events')<line_sep>count.register('has >=1 tau at preselection')<line_sep>count.register('has >=1 selected taus')<line_sep>count.register('has >=1 other taus')<block_end>#------------------ # MAKE LEPTON LISTS #------------------ <def_stmt>makeTaus self event<block_start>event.inclusiveTaus=[]<line_sep>event.selectedTaus=[]<line_sep>event.otherTaus=[]<line_sep>#get all alltaus=map(Tau self.handles['taus'].product())<line_sep>#make inclusive taus <for_stmt>tau alltaus<block_start>tau.associatedVertex=event.goodVertices[0]<if>len(event.goodVertices)<g>0<else>event.vertices[0]<line_sep>tau.lepVeto=<false><line_sep>tau.idDecayMode=tau.tauID("decayModeFinding")<line_sep>tau.idDecayModeNewDMs=tau.tauID("decayModeFindingNewDMs")<if_stmt>hasattr(self.cfg_ana 'inclusive_decayModeID')<and>self.cfg_ana.inclusive_decayModeID<and><not>tau.tauID(self.cfg_ana.inclusive_decayModeID)<block_start><continue><block_end>tau.inclusive_lepVeto=<false><if_stmt>self.cfg_ana.inclusive_vetoLeptons<block_start><for_stmt>lep event.selectedLeptons<block_start><if_stmt>deltaR(lep.eta() lep.phi() tau.eta() tau.phi())<l>self.cfg_ana.inclusive_leptonVetoDR<block_start>tau.inclusive_lepVeto=<true><block_end><block_end><if_stmt>tau.inclusive_lepVeto<block_start><continue><block_end><block_end><if_stmt>self.cfg_ana.inclusive_vetoLeptonsPOG<block_start><if_stmt><not>tau.tauID(self.cfg_ana.inclusive_tauAntiMuonID)<block_start>tau.inclusive_lepVeto=<true><block_end><if_stmt><not>tau.tauID(self.cfg_ana.inclusive_tauAntiElectronID)<block_start>tau.inclusive_lepVeto=<true><block_end><if_stmt>tau.inclusive_lepVeto<block_start><continue><block_end><block_end><if_stmt>tau.pt()<l>self.cfg_ana.inclusive_ptMin<block_start><continue><block_end><if_stmt>abs(tau.eta())<g>self.cfg_ana.inclusive_etaMax<block_start><continue><block_end><if_stmt>abs(tau.dxy())<g>self.cfg_ana.inclusive_dxyMax<or>abs(tau.dz())<g>self.cfg_ana.inclusive_dzMax<block_start><continue><block_end><def_stmt>id3 tau X<block_start>"""Create an integer equal to 1-2-3 for (loose,medium,tight)"""<line_sep><return>tau.tauID(X%"Loose")+tau.tauID(X%"Medium")+tau.tauID(X%"Tight")<block_end><def_stmt>id5 tau X<block_start>"""Create an integer equal to 1-2-3-4-5 for (very loose, loose, medium, tight, very tight)"""<line_sep><return>id3(tau X)+tau.tauID(X%"VLoose")+tau.tauID(X%"VTight")<block_end><def_stmt>id6 tau X<block_start>"""Create an integer equal to 1-2-3-4-5-6 for (very loose, loose, medium, tight, very tight, very very tight)"""<line_sep><return>id5(tau X)+tau.tauID(X%"VVTight")<block_end>tau.idMVA=id6(tau "by%sIsolationMVArun2v1DBoldDMwLT")<line_sep>tau.idMVANewDM=id6(tau "by%sIsolationMVArun2v1DBnewDMwLT")<line_sep>tau.idCI3hit=id3(tau "by%sCombinedIsolationDeltaBetaCorr3Hits")<line_sep>tau.idAntiMu=tau.tauID("againstMuonLoose3")+tau.tauID("againstMuonTight3")<line_sep>tau.idAntiE=id5(tau "againstElectron%sMVA6")<line_sep>#print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID)) <if_stmt>tau.tauID(self.cfg_ana.inclusive_tauID)<block_start>event.inclusiveTaus.append(tau)<block_end><block_end><for_stmt>tau event.inclusiveTaus<block_start>tau.loose_lepVeto=<false><if_stmt>self.cfg_ana.loose_vetoLeptons<block_start><for_stmt>lep event.selectedLeptons<block_start><if_stmt>deltaR(lep.eta() lep.phi() tau.eta() tau.phi())<l>self.cfg_ana.loose_leptonVetoDR<block_start>tau.loose_lepVeto=<true><block_end><block_end><block_end><if_stmt>self.cfg_ana.loose_vetoLeptonsPOG<block_start><if_stmt><not>tau.tauID(self.cfg_ana.loose_tauAntiMuonID)<block_start>tau.loose_lepVeto=<true><block_end><if_stmt><not>tau.tauID(self.cfg_ana.loose_tauAntiElectronID)<block_start>tau.loose_lepVeto=<true><block_end><block_end><if_stmt>tau.tauID(self.cfg_ana.loose_decayModeID)<and>tau.pt()<g>self.cfg_ana.loose_ptMin<and>abs(tau.eta())<l>self.cfg_ana.loose_etaMax<and>abs(tau.dxy())<l>self.cfg_ana.loose_dxyMax<and>abs(tau.dz())<l>self.cfg_ana.loose_dzMax<and>tau.tauID(self.cfg_ana.loose_tauID)<and><not>tau.loose_lepVeto<block_start>event.selectedTaus.append(tau)<block_end><else_stmt><block_start>event.otherTaus.append(tau)<block_end><block_end>event.inclusiveTaus.sort(key=<lambda>l:l.pt() reverse=<true>)<line_sep>event.selectedTaus.sort(key=<lambda>l:l.pt() reverse=<true>)<line_sep>event.otherTaus.sort(key=<lambda>l:l.pt() reverse=<true>)<line_sep>self.counters.counter('events').inc('all events')<if_stmt>len(event.inclusiveTaus)<block_start>self.counters.counter('events').inc('has >=1 tau at preselection')<block_end><if_stmt>len(event.selectedTaus)<block_start>self.counters.counter('events').inc('has >=1 selected taus')<block_end><if_stmt>len(event.otherTaus)<block_start>self.counters.counter('events').inc('has >=1 other taus')<block_end><block_end><def_stmt>matchTaus self event<block_start>match=matchObjectCollection3(event.inclusiveTaus event.gentaus deltaRMax=0.5)<for_stmt>lep event.inclusiveTaus<block_start>gen=match[lep]<line_sep>lep.mcMatchId=1<if>gen<else>0<line_sep>lep.genp=gen<block_end><block_end><def_stmt>process self event<block_start>self.readCollections(event.input)<line_sep>self.makeTaus(event)<if_stmt><not>self.cfg_comp.isMC<block_start><return><true><block_end><if_stmt>hasattr(event 'gentaus')<block_start>self.matchTaus(event)<block_end><return><true><block_end><block_end># Find the definitions of the tau ID strings here: # http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py setattr(TauAnalyzer "defaultConfig" cfg.Analyzer(class_object=TauAnalyzer # inclusive very loose hadronic tau selection inclusive_ptMin=18 inclusive_etaMax=9999 inclusive_dxyMax=1000. inclusive_dzMax=0.4 inclusive_vetoLeptons=<false> inclusive_leptonVetoDR=0.4 inclusive_decayModeID="decayModeFindingNewDMs" # ignored if not set or "" inclusive_tauID="decayModeFindingNewDMs" inclusive_vetoLeptonsPOG=<false> # If True, the following two IDs are required inclusive_tauAntiMuonID="" inclusive_tauAntiElectronID="" # loose hadronic tau selection loose_ptMin=18 loose_etaMax=9999 loose_dxyMax=1000. loose_dzMax=0.2 loose_vetoLeptons=<true> loose_leptonVetoDR=0.4 loose_decayModeID="decayModeFindingNewDMs" # ignored if not set or "" loose_tauID="byLooseCombinedIsolationDeltaBetaCorr3Hits" loose_vetoLeptonsPOG=<false> # If True, the following two IDs are required loose_tauAntiMuonID="againstMuonLoose3" loose_tauAntiElectronID="againstElectronLooseMVA5"))<line_sep>
# # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # <import_from_stmt>rlstructures logging<import_from_stmt>rlstructures.env_wrappers GymEnv GymEnvInf<import_from_stmt>rlstructures.tools weight_init<import_stmt>torch.nn<as>nn<import_stmt>copy<import_stmt>torch<import_stmt>time<import_stmt>numpy<as>np<import_stmt>torch.nn.functional<as>F<import_from_stmt>tutorial.tutorial_recurrent_policy.agent RecurrentAgent<import_from_stmt>tutorial.tutorial_recurrent_policy.a2c A2C<import_stmt>gym<import_from_stmt>gym.wrappers TimeLimit<line_sep># We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes <def_stmt>create_gym_env env_name<block_start><return>gym.make(env_name)<block_end><def_stmt>create_env n_envs env_name=<none> max_episode_steps=<none> seed=<none><block_start>envs=[]<for_stmt>k range(n_envs)<block_start>e=create_gym_env(env_name)<line_sep>e=TimeLimit(e max_episode_steps=max_episode_steps)<line_sep>envs.append(e)<block_end><return>GymEnv(envs seed)<block_end><def_stmt>create_train_env n_envs env_name=<none> max_episode_steps=<none> seed=<none><block_start>envs=[]<for_stmt>k range(n_envs)<block_start>e=create_gym_env(env_name)<line_sep>e=TimeLimit(e max_episode_steps=max_episode_steps)<line_sep>envs.append(e)<block_end><return>GymEnvInf(envs seed)<block_end><def_stmt>create_agent model n_actions=1<block_start><return>RecurrentAgent(model=model n_actions=n_actions)<block_end><class_stmt>Experiment(A2C)<block_start><def_stmt>__init__ self config create_env create_train_env create_agent<block_start>super().__init__(config create_env create_train_env create_agent)<block_end><block_end><if_stmt>__name__<eq>"__main__"# We use spawn mode such that most of the environment will run in multiple processes <block_start><import_stmt>torch.multiprocessing<as>mp<line_sep>mp.set_start_method("spawn")<line_sep>config={"env_name":"CartPole-v0" "a2c_timesteps":3 "n_envs":4 "max_episode_steps":100 "env_seed":42 "n_threads":4 "n_evaluation_threads":2 "n_evaluation_episodes":256 "time_limit":3600 "lr":0.001 "discount_factor":0.95 "critic_coef":1.0 "entropy_coef":0.01 "a2c_coef":1.0 "logdir":"./results" }<line_sep>exp=Experiment(config create_env create_train_env create_agent)<line_sep>exp.run()<block_end>
<import_from_stmt>openid.consumer.discover OpenIDServiceEndpoint<import_stmt>datadriven<class_stmt>BadLinksTestCase(datadriven.DataDrivenTestCase)<block_start>cases=['' "http://not.in.a.link.tag/" '<link rel="openid.server" href="not.in.html.or.head" />' ]<def_stmt>__init__ self data<block_start>datadriven.DataDrivenTestCase.__init__(self data)<line_sep>self.data=data<block_end><def_stmt>runOneTest self<block_start>actual=OpenIDServiceEndpoint.fromHTML('http://unused.url/' self.data)<line_sep>expected=[]<line_sep>self.failUnlessEqual(expected actual)<block_end><block_end><def_stmt>pyUnitTests <block_start><return>datadriven.loadTests(__name__)<block_end>
<import_from_stmt>sklearn.linear_model LogisticRegression<import_from_stmt>fightchurn.listings.chap8.listing_8_2_logistic_regression prepare_data save_regression_model<import_from_stmt>fightchurn.listings.chap8.listing_8_2_logistic_regression save_regression_summary save_dataset_predictions<def_stmt>regression_cparam data_set_path C_param<block_start>X,y=prepare_data(data_set_path)<line_sep>retain_reg=LogisticRegression(C=C_param penalty='l1' solver='liblinear' fit_intercept=<true>)<line_sep>retain_reg.fit(X y)<line_sep>c_ext='_c{:.3f}'.format(C_param)<line_sep>save_regression_summary(data_set_path retain_reg ext=c_ext)<line_sep>save_regression_model(data_set_path retain_reg ext=c_ext)<line_sep>save_dataset_predictions(data_set_path retain_reg X ext=c_ext)<block_end>
"""Sensor for data from Austrian Zentralanstalt für Meteorologie."""<import_from_future_stmt> annotations<import_stmt>logging<import_stmt>voluptuous<as>vol<import_from_stmt>homeassistant.components.weather ATTR_WEATHER_HUMIDITY ATTR_WEATHER_PRESSURE ATTR_WEATHER_TEMPERATURE ATTR_WEATHER_WIND_BEARING ATTR_WEATHER_WIND_SPEED PLATFORM_SCHEMA WeatherEntity <import_from_stmt>homeassistant.const CONF_LATITUDE CONF_LONGITUDE CONF_NAME TEMP_CELSIUS<import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.helpers config_validation<as>cv<import_from_stmt>homeassistant.helpers.entity_platform AddEntitiesCallback<import_from_stmt>homeassistant.helpers.typing ConfigType DiscoveryInfoType<line_sep># Reuse data and API logic from the sensor implementation <import_from_stmt>.sensor ATTRIBUTION CONF_STATION_ID ZamgData closest_station zamg_stations <line_sep>_LOGGER=logging.getLogger(__name__)<line_sep>PLATFORM_SCHEMA=PLATFORM_SCHEMA.extend({vol.Optional(CONF_NAME):cv.string vol.Optional(CONF_STATION_ID):cv.string vol.Inclusive(CONF_LATITUDE "coordinates" "Latitude and longitude must exist together"):cv.latitude vol.Inclusive(CONF_LONGITUDE "coordinates" "Latitude and longitude must exist together"):cv.longitude })<def_stmt>setup_platform hass:HomeAssistant config:ConfigType add_entities:AddEntitiesCallback discovery_info:DiscoveryInfoType|<none>=<none> <arrow><none><block_start>"""Set up the ZAMG weather platform."""<line_sep>name=config.get(CONF_NAME)<line_sep>latitude=config.get(CONF_LATITUDE hass.config.latitude)<line_sep>longitude=config.get(CONF_LONGITUDE hass.config.longitude)<line_sep>station_id=config.get(CONF_STATION_ID)<or>closest_station(latitude longitude hass.config.config_dir)<if_stmt>station_id<not><in>zamg_stations(hass.config.config_dir)<block_start>_LOGGER.error("Configured ZAMG %s (%s) is not a known station" CONF_STATION_ID station_id )<line_sep><return><block_end>probe=ZamgData(station_id=station_id)<try_stmt><block_start>probe.update()<block_end><except_stmt>(ValueError TypeError)<as>err<block_start>_LOGGER.error("Received error from ZAMG: %s" err)<line_sep><return><block_end>add_entities([ZamgWeather(probe name)] <true>)<block_end><class_stmt>ZamgWeather(WeatherEntity)<block_start>"""Representation of a weather condition."""<def_stmt>__init__ self zamg_data stationname=<none><block_start>"""Initialise the platform with a data instance and station name."""<line_sep>self.zamg_data=zamg_data<line_sep>self.stationname=stationname<block_end>@property<def_stmt>name self<block_start>"""Return the name of the sensor."""<line_sep><return>(self.stationname<or>f"ZAMG {self.zamg_data.data.get('Name')<or>'(unknown station)'}")<block_end>@property<def_stmt>condition self<block_start>"""Return the current condition."""<line_sep><return><none><block_end>@property<def_stmt>attribution self<block_start>"""Return the attribution."""<line_sep><return>ATTRIBUTION<block_end>@property<def_stmt>temperature self<block_start>"""Return the platform temperature."""<line_sep><return>self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE)<block_end>@property<def_stmt>temperature_unit self<block_start>"""Return the unit of measurement."""<line_sep><return>TEMP_CELSIUS<block_end>@property<def_stmt>pressure self<block_start>"""Return the pressure."""<line_sep><return>self.zamg_data.get_data(ATTR_WEATHER_PRESSURE)<block_end>@property<def_stmt>humidity self<block_start>"""Return the humidity."""<line_sep><return>self.zamg_data.get_data(ATTR_WEATHER_HUMIDITY)<block_end>@property<def_stmt>wind_speed self<block_start>"""Return the wind speed."""<line_sep><return>self.zamg_data.get_data(ATTR_WEATHER_WIND_SPEED)<block_end>@property<def_stmt>wind_bearing self<block_start>"""Return the wind bearing."""<line_sep><return>self.zamg_data.get_data(ATTR_WEATHER_WIND_BEARING)<block_end><def_stmt>update self<block_start>"""Update current conditions."""<line_sep>self.zamg_data.update()<block_end><block_end>
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<def_stmt>get_train_data train_dir batch_size<block_start>train_images=np.load(os.path.join(train_dir 'train_images.npy'))<line_sep>train_labels=np.load(os.path.join(train_dir 'train_labels.npy'))<line_sep>print('train_images' train_images.shape 'train_labels' train_labels.shape)<line_sep>dataset_train=tf.data.Dataset.from_tensor_slices((train_images train_labels))<line_sep>dataset_train=dataset_train.repeat().shuffle(10000).batch(batch_size)<line_sep><return>dataset_train<block_end><def_stmt>get_val_data val_dir<block_start>test_images=np.load(os.path.join(val_dir 'validation_images.npy'))<line_sep>test_labels=np.load(os.path.join(val_dir 'validation_labels.npy'))<line_sep>print('validation_images' test_images.shape 'validation_labels' test_labels.shape)<line_sep>dataset_test=tf.data.Dataset.from_tensor_slices((test_images test_labels))<line_sep><return>dataset_test<block_end>
""" The Tornado Framework By <NAME> University of Ottawa, Ontario, Canada E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com """<import_stmt>re<import_from_stmt>data_structures.attribute Attribute<import_from_stmt>dictionary.tornado_dictionary TornadoDic<class_stmt>ARFFReader<block_start>"""This class is used to read a .arff file."""<line_sep>@staticmethod<def_stmt>read file_path<block_start>labels=[]<line_sep>attributes=[]<line_sep>attributes_min_max=[]<line_sep>records=[]<line_sep>data_flag=<false><line_sep>reader=open(file_path "r")<for_stmt>line reader<block_start><if_stmt>line.strip()<eq>''<block_start><continue><block_end><if_stmt>line.startswith("@attribute")<or>line.startswith("@ATTRIBUTE")<block_start>line=line.strip('\n\r\t')<line_sep>line=line.split(' ')<line_sep>attribute_name=line[1]<line_sep>attribute_value_range=line[2]<line_sep>attribute=Attribute()<line_sep>attribute.set_name(attribute_name)<if_stmt>attribute_value_range.lower()<in>['numeric' 'real' 'integer']<block_start>attribute_type=TornadoDic.NUMERIC_ATTRIBUTE<line_sep>attribute_value_range=[]<line_sep>attributes_min_max.append([0 0])<block_end><else_stmt><block_start>attribute_type=TornadoDic.NOMINAL_ATTRIBUTE<line_sep>attribute_value_range=attribute_value_range.strip('{}').replace("'" "")<line_sep>attribute_value_range=attribute_value_range.split(',')<line_sep>attributes_min_max.append([<none> <none>])<block_end>attribute.set_type(attribute_type)<line_sep>attribute.set_possible_values(attribute_value_range)<line_sep>attributes.append(attribute)<block_end><elif_stmt>line.startswith("@data")<or>line.startswith("@DATA")<block_start>data_flag=<true><line_sep>labels=attributes[len(attributes)-1].POSSIBLE_VALUES<line_sep>attributes.pop(len(attributes)-1)<line_sep><continue><block_end><elif_stmt>data_flag<is><true><block_start>line=re.sub('\s+' '' line)<line_sep>elements=line.split(',')<for_stmt>i range(0 len(elements)-1)<block_start><if_stmt>attributes[i].TYPE<eq>TornadoDic.NUMERIC_ATTRIBUTE<block_start>elements[i]=float(elements[i])<line_sep>min_value=attributes_min_max[i][0]<line_sep>max_value=attributes_min_max[i][1]<if_stmt>elements[i]<l>min_value<block_start>min_value=elements[i]<block_end><elif_stmt>elements[i]<g>max_value<block_start>max_value=elements[i]<block_end>attributes_min_max[i]=[min_value max_value]<block_end><block_end>records.append(elements)<block_end><block_end><for_stmt>i range(0 len(attributes))<block_start><if_stmt>attributes[i].TYPE<eq>TornadoDic.NUMERIC_ATTRIBUTE<block_start>attributes[i].set_bounds_values(attributes_min_max[i][0] attributes_min_max[i][1])<block_end><block_end><return>labels attributes records<block_end><block_end>
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Run an agent in it's own (independent) process. What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process. To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process. This script is called from within SMARTS to instantiate a remote agent. The protocal is as follows: 1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent 2. worker.py will begin listening on port 5467. 3. SMARTS connects to (ip, 5467) as a client. 4. SMARTS calls `build()` rpc with `AgentSpec` as input. 5. worker.py recieves the `AgentSpec` instances and builds the Agent. 6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py. """<import_stmt>argparse<import_stmt>importlib<import_stmt>logging<import_stmt>os<import_stmt>signal<import_stmt>sys<import_from_stmt>concurrent futures<import_stmt>grpc<import_from_stmt>smarts.zoo worker_pb2_grpc worker_servicer<line_sep># Front-load some expensive imports as to not block the simulation modules=["smarts.core.utils.pybullet" "smarts.core.utils.sumo" "smarts.core.sumo_road_network" "numpy" "sklearn" "shapely" "scipy" "trimesh" "panda3d" "gym" "ray" ]<for_stmt>mod modules<block_start><try_stmt><block_start>importlib.import_module(mod)<block_end><except_stmt>ImportError<block_start><if_stmt>mod<eq>"ray"<block_start>print("You need to install the ray dependency using pip install -e .[train] first")<block_end><if_stmt>mod<eq>"panda3d"<block_start>print("You need to install the panda3d dependency using pip install -e .[camera-obs] first")<block_end><pass><block_end><block_end># End front-loaded imports logging.basicConfig(level=logging.INFO)<line_sep>log=logging.getLogger(f"worker.py - pid({os.getpid()})")<def_stmt>serve port<block_start>ip="[::]"<line_sep>server=grpc.server(futures.ThreadPoolExecutor(max_workers=1))<line_sep>worker_pb2_grpc.add_WorkerServicer_to_server(worker_servicer.WorkerServicer() server)<line_sep>server.add_insecure_port(f"{ip}:{port}")<line_sep>server.start()<line_sep>log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Started serving.")<def_stmt>stop_server unused_signum unused_frame<block_start>server.stop(0)<line_sep>log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Received interrupt signal.")<block_end># Catch keyboard interrupt and terminate signal signal.signal(signal.SIGINT stop_server)<line_sep>signal.signal(signal.SIGTERM stop_server)<line_sep># Wait to receive server termination signal server.wait_for_termination()<line_sep>log.debug(f"Worker - ip({ip}), port({port}), pid({os.getpid()}): Server exited")<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser("Run an agent in an independent process.")<line_sep>parser.add_argument("--port" type=int required=<true> help="Port to listen for remote client connections." )<line_sep>args=parser.parse_args()<line_sep>serve(args.port)<block_end>
_base_=['../_base_/base_tensorrt_static-300x300.py']<line_sep>
# -*- coding: utf8 -*- # Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>warnings<import_from_stmt>tencentcloud.common.abstract_model AbstractModel<class_stmt>AddUserContactRequest(AbstractModel)<block_start>"""AddUserContact请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Name: 联系人姓名,由中英文、数字、空格、!@#$%^&*()_+-=()组成,不能以下划线开头,长度在20以内。 :type Name: str :param ContactInfo: 邮箱地址,支持大小写字母、数字、下划线及@字符, 不能以下划线开头,邮箱地址不可重复。 :type ContactInfo: str :param Product: 服务产品类型,固定值:"mysql"。 :type Product: str """<line_sep>self.Name=<none><line_sep>self.ContactInfo=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.Name=params.get("Name")<line_sep>self.ContactInfo=params.get("ContactInfo")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>AddUserContactResponse(AbstractModel)<block_start>"""AddUserContact返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Id: 添加成功的联系人id。 :type Id: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Id=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.Id=params.get("Id")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>ContactItem(AbstractModel)<block_start>"""联系人contact描述。 """<def_stmt>__init__ self<block_start>r""" :param Id: 联系人id。 :type Id: int :param Name: 联系人姓名。 :type Name: str :param Mail: 联系人绑定的邮箱。 :type Mail: str """<line_sep>self.Id=<none><line_sep>self.Name=<none><line_sep>self.Mail=<none><block_end><def_stmt>_deserialize self params<block_start>self.Id=params.get("Id")<line_sep>self.Name=params.get("Name")<line_sep>self.Mail=params.get("Mail")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>CreateDBDiagReportTaskRequest(AbstractModel)<block_start>"""CreateDBDiagReportTask请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param StartTime: 开始时间,如“2020-11-08T14:00:00+08:00”。 :type StartTime: str :param EndTime: 结束时间,如“2020-11-09T14:00:00+08:00”。 :type EndTime: str :param SendMailFlag: 是否发送邮件: 0 - 否,1 - 是。 :type SendMailFlag: int :param ContactPerson: 接收邮件的联系人ID数组。 :type ContactPerson: list of int :param ContactGroup: 接收邮件的联系组ID数组。 :type ContactGroup: list of int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认值为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.SendMailFlag=<none><line_sep>self.ContactPerson=<none><line_sep>self.ContactGroup=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.SendMailFlag=params.get("SendMailFlag")<line_sep>self.ContactPerson=params.get("ContactPerson")<line_sep>self.ContactGroup=params.get("ContactGroup")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>CreateDBDiagReportTaskResponse(AbstractModel)<block_start>"""CreateDBDiagReportTask返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param AsyncRequestId: 异步任务的请求 ID,可使用此 ID 查询异步任务的执行结果。 注意:此字段可能返回 null,表示取不到有效值。 :type AsyncRequestId: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.AsyncRequestId=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.AsyncRequestId=params.get("AsyncRequestId")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>CreateDBDiagReportUrlRequest(AbstractModel)<block_start>"""CreateDBDiagReportUrl请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param AsyncRequestId: 健康报告相应的任务ID,可通过DescribeDBDiagReportTasks查询。 :type AsyncRequestId: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.AsyncRequestId=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.AsyncRequestId=params.get("AsyncRequestId")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>CreateDBDiagReportUrlResponse(AbstractModel)<block_start>"""CreateDBDiagReportUrl返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param ReportUrl: 健康报告浏览地址。 :type ReportUrl: str :param ExpireTime: 健康报告浏览地址到期时间戳(秒)。 :type ExpireTime: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.ReportUrl=<none><line_sep>self.ExpireTime=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.ReportUrl=params.get("ReportUrl")<line_sep>self.ExpireTime=params.get("ExpireTime")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>CreateMailProfileRequest(AbstractModel)<block_start>"""CreateMailProfile请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param ProfileInfo: 邮件配置内容。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` :param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。 :type ProfileLevel: str :param ProfileName: 配置名称,需要保持唯一性,数据库巡检邮件配置名称自拟;定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。 :type ProfileName: str :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 :type ProfileType: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。 :type Product: str :param BindInstanceIds: 配置绑定的实例ID,当配置级别为"Instance"时需要传入且只能为一个实例;当配置级别为“User”时,此参数不填。 :type BindInstanceIds: list of str """<line_sep>self.ProfileInfo=<none><line_sep>self.ProfileLevel=<none><line_sep>self.ProfileName=<none><line_sep>self.ProfileType=<none><line_sep>self.Product=<none><line_sep>self.BindInstanceIds=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("ProfileInfo")<is><not><none><block_start>self.ProfileInfo=ProfileInfo()<line_sep>self.ProfileInfo._deserialize(params.get("ProfileInfo"))<block_end>self.ProfileLevel=params.get("ProfileLevel")<line_sep>self.ProfileName=params.get("ProfileName")<line_sep>self.ProfileType=params.get("ProfileType")<line_sep>self.Product=params.get("Product")<line_sep>self.BindInstanceIds=params.get("BindInstanceIds")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>CreateMailProfileResponse(AbstractModel)<block_start>"""CreateMailProfile返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>CreateSchedulerMailProfileRequest(AbstractModel)<block_start>"""CreateSchedulerMailProfile请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param WeekConfiguration: 取值范围1-7,分别代表周一至周日。 :type WeekConfiguration: list of int :param ProfileInfo: 邮件配置内容。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` :param ProfileName: 配置名称,需要保持唯一性,定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。 :type ProfileName: str :param BindInstanceId: 配置订阅的实例ID。 :type BindInstanceId: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.WeekConfiguration=<none><line_sep>self.ProfileInfo=<none><line_sep>self.ProfileName=<none><line_sep>self.BindInstanceId=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.WeekConfiguration=params.get("WeekConfiguration")<if_stmt>params.get("ProfileInfo")<is><not><none><block_start>self.ProfileInfo=ProfileInfo()<line_sep>self.ProfileInfo._deserialize(params.get("ProfileInfo"))<block_end>self.ProfileName=params.get("ProfileName")<line_sep>self.BindInstanceId=params.get("BindInstanceId")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>CreateSchedulerMailProfileResponse(AbstractModel)<block_start>"""CreateSchedulerMailProfile返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>CreateSecurityAuditLogExportTaskRequest(AbstractModel)<block_start>"""CreateSecurityAuditLogExportTask请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param StartTime: 导出日志开始时间,例如2020-12-28 00:00:00。 :type StartTime: str :param EndTime: 导出日志结束时间,例如2020-12-28 01:00:00。 :type EndTime: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str :param DangerLevels: 日志风险等级列表,支持值包括:0 无风险;1 低风险;2 中风险;3 高风险。 :type DangerLevels: list of int """<line_sep>self.SecAuditGroupId=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.Product=<none><line_sep>self.DangerLevels=<none><block_end><def_stmt>_deserialize self params<block_start>self.SecAuditGroupId=params.get("SecAuditGroupId")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.Product=params.get("Product")<line_sep>self.DangerLevels=params.get("DangerLevels")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>CreateSecurityAuditLogExportTaskResponse(AbstractModel)<block_start>"""CreateSecurityAuditLogExportTask返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param AsyncRequestId: 日志导出任务Id。 :type AsyncRequestId: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.AsyncRequestId=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.AsyncRequestId=params.get("AsyncRequestId")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DeleteSecurityAuditLogExportTasksRequest(AbstractModel)<block_start>"""DeleteSecurityAuditLogExportTasks请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param AsyncRequestIds: 日志导出任务Id列表,接口会忽略不存在或已删除的任务Id。 :type AsyncRequestIds: list of int non-negative :param Product: 服务产品类型,支持值: "mysql" - 云数据库 MySQL。 :type Product: str """<line_sep>self.SecAuditGroupId=<none><line_sep>self.AsyncRequestIds=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.SecAuditGroupId=params.get("SecAuditGroupId")<line_sep>self.AsyncRequestIds=params.get("AsyncRequestIds")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DeleteSecurityAuditLogExportTasksResponse(AbstractModel)<block_start>"""DeleteSecurityAuditLogExportTasks返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeAllUserContactRequest(AbstractModel)<block_start>"""DescribeAllUserContact请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Product: 服务产品类型,固定值:mysql。 :type Product: str :param Names: 联系人名数组,支持模糊搜索。 :type Names: list of str """<line_sep>self.Product=<none><line_sep>self.Names=<none><block_end><def_stmt>_deserialize self params<block_start>self.Product=params.get("Product")<line_sep>self.Names=params.get("Names")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeAllUserContactResponse(AbstractModel)<block_start>"""DescribeAllUserContact返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TotalCount: 联系人的总数量。 :type TotalCount: int :param Contacts: 联系人的信息。 注意:此字段可能返回 null,表示取不到有效值。 :type Contacts: list of ContactItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TotalCount=<none><line_sep>self.Contacts=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.TotalCount=params.get("TotalCount")<if_stmt>params.get("Contacts")<is><not><none><block_start>self.Contacts=[]<for_stmt>item params.get("Contacts")<block_start>obj=ContactItem()<line_sep>obj._deserialize(item)<line_sep>self.Contacts.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeAllUserGroupRequest(AbstractModel)<block_start>"""DescribeAllUserGroup请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Product: 服务产品类型,固定值:mysql。 :type Product: str :param Names: 联系组名称数组,支持模糊搜索。 :type Names: list of str """<line_sep>self.Product=<none><line_sep>self.Names=<none><block_end><def_stmt>_deserialize self params<block_start>self.Product=params.get("Product")<line_sep>self.Names=params.get("Names")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeAllUserGroupResponse(AbstractModel)<block_start>"""DescribeAllUserGroup返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TotalCount: 组总数。 :type TotalCount: int :param Groups: 组信息。 注意:此字段可能返回 null,表示取不到有效值。 :type Groups: list of GroupItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TotalCount=<none><line_sep>self.Groups=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.TotalCount=params.get("TotalCount")<if_stmt>params.get("Groups")<is><not><none><block_start>self.Groups=[]<for_stmt>item params.get("Groups")<block_start>obj=GroupItem()<line_sep>obj._deserialize(item)<line_sep>self.Groups.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeDBDiagEventRequest(AbstractModel)<block_start>"""DescribeDBDiagEvent请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param EventId: 事件 ID 。通过“获取实例诊断历史DescribeDBDiagHistory”获取。 :type EventId: int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.EventId=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.EventId=params.get("EventId")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeDBDiagEventResponse(AbstractModel)<block_start>"""DescribeDBDiagEvent返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param DiagItem: 诊断项。 :type DiagItem: str :param DiagType: 诊断类型。 :type DiagType: str :param EventId: 事件 ID 。 :type EventId: int :param Explanation: 诊断事件详情,若无附加解释信息则输出为空。 :type Explanation: str :param Outline: 诊断概要。 :type Outline: str :param Problem: 诊断出的问题。 :type Problem: str :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param StartTime: 开始时间 :type StartTime: str :param Suggestions: 诊断建议,若无建议则输出为空。 :type Suggestions: str :param Metric: 保留字段。 注意:此字段可能返回 null,表示取不到有效值。 :type Metric: str :param EndTime: 结束时间。 :type EndTime: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.DiagItem=<none><line_sep>self.DiagType=<none><line_sep>self.EventId=<none><line_sep>self.Explanation=<none><line_sep>self.Outline=<none><line_sep>self.Problem=<none><line_sep>self.Severity=<none><line_sep>self.StartTime=<none><line_sep>self.Suggestions=<none><line_sep>self.Metric=<none><line_sep>self.EndTime=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.DiagItem=params.get("DiagItem")<line_sep>self.DiagType=params.get("DiagType")<line_sep>self.EventId=params.get("EventId")<line_sep>self.Explanation=params.get("Explanation")<line_sep>self.Outline=params.get("Outline")<line_sep>self.Problem=params.get("Problem")<line_sep>self.Severity=params.get("Severity")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.Suggestions=params.get("Suggestions")<line_sep>self.Metric=params.get("Metric")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeDBDiagHistoryRequest(AbstractModel)<block_start>"""DescribeDBDiagHistory请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 结束时间,如“2019-09-11 12:13:14”,结束时间与开始时间的间隔最大可为2天。 :type EndTime: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeDBDiagHistoryResponse(AbstractModel)<block_start>"""DescribeDBDiagHistory返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Events: 事件描述。 :type Events: list of DiagHistoryEventItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Events=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("Events")<is><not><none><block_start>self.Events=[]<for_stmt>item params.get("Events")<block_start>obj=DiagHistoryEventItem()<line_sep>obj._deserialize(item)<line_sep>self.Events.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeDBDiagReportTasksRequest(AbstractModel)<block_start>"""DescribeDBDiagReportTasks请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param StartTime: 第一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。 :type StartTime: str :param EndTime: 最后一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。 :type EndTime: str :param InstanceIds: 实例ID数组,用于筛选指定实例的任务列表。 :type InstanceIds: list of str :param Sources: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。 :type Sources: list of str :param HealthLevels: 报告的健康等级,支持的取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK" - 危险;"HIGH_RISK" - 高危。 :type HealthLevels: str :param TaskStatuses: 任务的状态,支持的取值包括:"created" - 新建;"chosen" - 待执行; "running" - 执行中;"failed" - 失败;"finished" - 已完成。 :type TaskStatuses: str :param Offset: 偏移量,默认0。 :type Offset: int :param Limit: 返回数量,默认20,最大值为100。 :type Limit: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.InstanceIds=<none><line_sep>self.Sources=<none><line_sep>self.HealthLevels=<none><line_sep>self.TaskStatuses=<none><line_sep>self.Offset=<none><line_sep>self.Limit=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.InstanceIds=params.get("InstanceIds")<line_sep>self.Sources=params.get("Sources")<line_sep>self.HealthLevels=params.get("HealthLevels")<line_sep>self.TaskStatuses=params.get("TaskStatuses")<line_sep>self.Offset=params.get("Offset")<line_sep>self.Limit=params.get("Limit")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeDBDiagReportTasksResponse(AbstractModel)<block_start>"""DescribeDBDiagReportTasks返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TotalCount: 任务总数目。 :type TotalCount: int :param Tasks: 任务列表。 :type Tasks: list of HealthReportTask :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TotalCount=<none><line_sep>self.Tasks=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.TotalCount=params.get("TotalCount")<if_stmt>params.get("Tasks")<is><not><none><block_start>self.Tasks=[]<for_stmt>item params.get("Tasks")<block_start>obj=HealthReportTask()<line_sep>obj._deserialize(item)<line_sep>self.Tasks.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeDBSpaceStatusRequest(AbstractModel)<block_start>"""DescribeDBSpaceStatus请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param RangeDays: 时间段天数,截止日期为当日,默认为7天。 :type RangeDays: int :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.RangeDays=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.RangeDays=params.get("RangeDays")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeDBSpaceStatusResponse(AbstractModel)<block_start>"""DescribeDBSpaceStatus返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Growth: 磁盘增长量(MB)。 :type Growth: int :param Remain: 磁盘剩余(MB)。 :type Remain: int :param Total: 磁盘总量(MB)。 :type Total: int :param AvailableDays: 预计可用天数。 :type AvailableDays: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Growth=<none><line_sep>self.Remain=<none><line_sep>self.Total=<none><line_sep>self.AvailableDays=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.Growth=params.get("Growth")<line_sep>self.Remain=params.get("Remain")<line_sep>self.Total=params.get("Total")<line_sep>self.AvailableDays=params.get("AvailableDays")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeDiagDBInstancesRequest(AbstractModel)<block_start>"""DescribeDiagDBInstances请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param IsSupported: 是否是DBbrain支持的实例,固定传 true。 :type IsSupported: bool :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Offset: 分页参数,偏移量。 :type Offset: int :param Limit: 分页参数,分页值,最大值为100。 :type Limit: int :param InstanceNames: 根据实例名称条件查询。 :type InstanceNames: list of str :param InstanceIds: 根据实例ID条件查询。 :type InstanceIds: list of str :param Regions: 根据地域条件查询。 :type Regions: list of str """<line_sep>self.IsSupported=<none><line_sep>self.Product=<none><line_sep>self.Offset=<none><line_sep>self.Limit=<none><line_sep>self.InstanceNames=<none><line_sep>self.InstanceIds=<none><line_sep>self.Regions=<none><block_end><def_stmt>_deserialize self params<block_start>self.IsSupported=params.get("IsSupported")<line_sep>self.Product=params.get("Product")<line_sep>self.Offset=params.get("Offset")<line_sep>self.Limit=params.get("Limit")<line_sep>self.InstanceNames=params.get("InstanceNames")<line_sep>self.InstanceIds=params.get("InstanceIds")<line_sep>self.Regions=params.get("Regions")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeDiagDBInstancesResponse(AbstractModel)<block_start>"""DescribeDiagDBInstances返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TotalCount: 实例总数。 :type TotalCount: int :param DbScanStatus: 全实例巡检状态:0:开启全实例巡检;1:未开启全实例巡检。 :type DbScanStatus: int :param Items: 实例相关信息。 :type Items: list of InstanceInfo :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TotalCount=<none><line_sep>self.DbScanStatus=<none><line_sep>self.Items=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.TotalCount=params.get("TotalCount")<line_sep>self.DbScanStatus=params.get("DbScanStatus")<if_stmt>params.get("Items")<is><not><none><block_start>self.Items=[]<for_stmt>item params.get("Items")<block_start>obj=InstanceInfo()<line_sep>obj._deserialize(item)<line_sep>self.Items.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeHealthScoreRequest(AbstractModel)<block_start>"""DescribeHealthScore请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 需要获取健康得分的实例ID。 :type InstanceId: str :param Time: 获取健康得分的时间,时间格式如:2019-09-10 12:13:14。 :type Time: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.Time=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.Time=params.get("Time")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeHealthScoreResponse(AbstractModel)<block_start>"""DescribeHealthScore返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Data: 健康得分以及异常扣分项。 :type Data: :class:`tencentcloud.dbbrain.v20210527.models.HealthScoreInfo` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Data=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("Data")<is><not><none><block_start>self.Data=HealthScoreInfo()<line_sep>self.Data._deserialize(params.get("Data"))<block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeMailProfileRequest(AbstractModel)<block_start>"""DescribeMailProfile请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 :type ProfileType: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Offset: 分页偏移量。 :type Offset: int :param Limit: 分页单位,最大支持50。 :type Limit: int :param ProfileName: 根据邮件配置名称查询,定期发送的邮件配置名称遵循:"scheduler_"+{instanceId}的规则。 :type ProfileName: str """<line_sep>self.ProfileType=<none><line_sep>self.Product=<none><line_sep>self.Offset=<none><line_sep>self.Limit=<none><line_sep>self.ProfileName=<none><block_end><def_stmt>_deserialize self params<block_start>self.ProfileType=params.get("ProfileType")<line_sep>self.Product=params.get("Product")<line_sep>self.Offset=params.get("Offset")<line_sep>self.Limit=params.get("Limit")<line_sep>self.ProfileName=params.get("ProfileName")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeMailProfileResponse(AbstractModel)<block_start>"""DescribeMailProfile返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param ProfileList: 邮件配置详情。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileList: list of UserProfile :param TotalCount: 邮件模版总数。 注意:此字段可能返回 null,表示取不到有效值。 :type TotalCount: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.ProfileList=<none><line_sep>self.TotalCount=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("ProfileList")<is><not><none><block_start>self.ProfileList=[]<for_stmt>item params.get("ProfileList")<block_start>obj=UserProfile()<line_sep>obj._deserialize(item)<line_sep>self.ProfileList.append(obj)<block_end><block_end>self.TotalCount=params.get("TotalCount")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeMySqlProcessListRequest(AbstractModel)<block_start>"""DescribeMySqlProcessList请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param ID: 线程的ID,用于筛选线程列表。 :type ID: int :param User: 线程的操作账号名,用于筛选线程列表。 :type User: str :param Host: 线程的操作主机地址,用于筛选线程列表。 :type Host: str :param DB: 线程的操作数据库,用于筛选线程列表。 :type DB: str :param State: 线程的操作状态,用于筛选线程列表。 :type State: str :param Command: 线程的执行类型,用于筛选线程列表。 :type Command: str :param Time: 线程的操作时长最小值,单位秒,用于筛选操作时长大于该值的线程列表。 :type Time: int :param Info: 线程的操作语句,用于筛选线程列表。 :type Info: str :param Limit: 返回数量,默认20。 :type Limit: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.ID=<none><line_sep>self.User=<none><line_sep>self.Host=<none><line_sep>self.DB=<none><line_sep>self.State=<none><line_sep>self.Command=<none><line_sep>self.Time=<none><line_sep>self.Info=<none><line_sep>self.Limit=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.ID=params.get("ID")<line_sep>self.User=params.get("User")<line_sep>self.Host=params.get("Host")<line_sep>self.DB=params.get("DB")<line_sep>self.State=params.get("State")<line_sep>self.Command=params.get("Command")<line_sep>self.Time=params.get("Time")<line_sep>self.Info=params.get("Info")<line_sep>self.Limit=params.get("Limit")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeMySqlProcessListResponse(AbstractModel)<block_start>"""DescribeMySqlProcessList返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param ProcessList: 实时线程列表。 :type ProcessList: list of MySqlProcess :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.ProcessList=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("ProcessList")<is><not><none><block_start>self.ProcessList=[]<for_stmt>item params.get("ProcessList")<block_start>obj=MySqlProcess()<line_sep>obj._deserialize(item)<line_sep>self.ProcessList.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeSecurityAuditLogDownloadUrlsRequest(AbstractModel)<block_start>"""DescribeSecurityAuditLogDownloadUrls请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param AsyncRequestId: 异步任务Id。 :type AsyncRequestId: int :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str """<line_sep>self.SecAuditGroupId=<none><line_sep>self.AsyncRequestId=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.SecAuditGroupId=params.get("SecAuditGroupId")<line_sep>self.AsyncRequestId=params.get("AsyncRequestId")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeSecurityAuditLogDownloadUrlsResponse(AbstractModel)<block_start>"""DescribeSecurityAuditLogDownloadUrls返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Urls: 导出结果的COS链接列表。当结果集很大时,可能会切分为多个url下载。 :type Urls: list of str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Urls=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.Urls=params.get("Urls")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeSecurityAuditLogExportTasksRequest(AbstractModel)<block_start>"""DescribeSecurityAuditLogExportTasks请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param SecAuditGroupId: 安全审计组Id。 :type SecAuditGroupId: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。 :type Product: str :param AsyncRequestIds: 日志导出任务Id列表。 :type AsyncRequestIds: list of int non-negative :param Offset: 偏移量,默认0。 :type Offset: int :param Limit: 返回数量,默认20,最大值为100。 :type Limit: int """<line_sep>self.SecAuditGroupId=<none><line_sep>self.Product=<none><line_sep>self.AsyncRequestIds=<none><line_sep>self.Offset=<none><line_sep>self.Limit=<none><block_end><def_stmt>_deserialize self params<block_start>self.SecAuditGroupId=params.get("SecAuditGroupId")<line_sep>self.Product=params.get("Product")<line_sep>self.AsyncRequestIds=params.get("AsyncRequestIds")<line_sep>self.Offset=params.get("Offset")<line_sep>self.Limit=params.get("Limit")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeSecurityAuditLogExportTasksResponse(AbstractModel)<block_start>"""DescribeSecurityAuditLogExportTasks返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Tasks: 安全审计日志导出任务列表。 :type Tasks: list of SecLogExportTaskInfo :param TotalCount: 安全审计日志导出任务总数。 :type TotalCount: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Tasks=<none><line_sep>self.TotalCount=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("Tasks")<is><not><none><block_start>self.Tasks=[]<for_stmt>item params.get("Tasks")<block_start>obj=SecLogExportTaskInfo()<line_sep>obj._deserialize(item)<line_sep>self.Tasks.append(obj)<block_end><block_end>self.TotalCount=params.get("TotalCount")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeSlowLogTimeSeriesStatsRequest(AbstractModel)<block_start>"""DescribeSlowLogTimeSeriesStats请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 结束时间,如“2019-09-10 12:13:14”,结束时间与开始时间的间隔最大可为7天。 :type EndTime: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeSlowLogTimeSeriesStatsResponse(AbstractModel)<block_start>"""DescribeSlowLogTimeSeriesStats返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Period: 柱间单位时间间隔,单位为秒。 :type Period: int :param TimeSeries: 单位时间间隔内慢日志数量统计。 :type TimeSeries: list of TimeSlice :param SeriesData: 单位时间间隔内的实例 cpu 利用率监控数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData` :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Period=<none><line_sep>self.TimeSeries=<none><line_sep>self.SeriesData=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.Period=params.get("Period")<if_stmt>params.get("TimeSeries")<is><not><none><block_start>self.TimeSeries=[]<for_stmt>item params.get("TimeSeries")<block_start>obj=TimeSlice()<line_sep>obj._deserialize(item)<line_sep>self.TimeSeries.append(obj)<block_end><block_end><if_stmt>params.get("SeriesData")<is><not><none><block_start>self.SeriesData=MonitorMetricSeriesData()<line_sep>self.SeriesData._deserialize(params.get("SeriesData"))<block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeSlowLogTopSqlsRequest(AbstractModel)<block_start>"""DescribeSlowLogTopSqls请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param StartTime: 开始时间,如“2019-09-10 12:13:14”。 :type StartTime: str :param EndTime: 截止时间,如“2019-09-11 10:13:14”,截止时间与开始时间的间隔小于7天。 :type EndTime: str :param SortBy: 排序键,目前支持 QueryTime,ExecTimes,RowsSent,LockTime以及RowsExamined 等排序键,默认为QueryTime。 :type SortBy: str :param OrderBy: 排序方式,支持ASC(升序)以及DESC(降序),默认为DESC。 :type OrderBy: str :param Limit: 返回数量,默认为20,最大值为100。 :type Limit: int :param Offset: 偏移量,默认为0。 :type Offset: int :param SchemaList: 数据库名称数组。 :type SchemaList: list of SchemaItem :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.SortBy=<none><line_sep>self.OrderBy=<none><line_sep>self.Limit=<none><line_sep>self.Offset=<none><line_sep>self.SchemaList=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.SortBy=params.get("SortBy")<line_sep>self.OrderBy=params.get("OrderBy")<line_sep>self.Limit=params.get("Limit")<line_sep>self.Offset=params.get("Offset")<if_stmt>params.get("SchemaList")<is><not><none><block_start>self.SchemaList=[]<for_stmt>item params.get("SchemaList")<block_start>obj=SchemaItem()<line_sep>obj._deserialize(item)<line_sep>self.SchemaList.append(obj)<block_end><block_end>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeSlowLogTopSqlsResponse(AbstractModel)<block_start>"""DescribeSlowLogTopSqls返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TotalCount: 符合条件的记录总数。 :type TotalCount: int :param Rows: 慢日志 top sql 列表 :type Rows: list of SlowLogTopSqlItem :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TotalCount=<none><line_sep>self.Rows=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.TotalCount=params.get("TotalCount")<if_stmt>params.get("Rows")<is><not><none><block_start>self.Rows=[]<for_stmt>item params.get("Rows")<block_start>obj=SlowLogTopSqlItem()<line_sep>obj._deserialize(item)<line_sep>self.Rows.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeSlowLogUserHostStatsRequest(AbstractModel)<block_start>"""DescribeSlowLogUserHostStats请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param StartTime: 查询范围的开始时间,时间格式如:2019-09-10 12:13:14。 :type StartTime: str :param EndTime: 查询范围的结束时间,时间格式如:2019-09-10 12:13:14。 :type EndTime: str :param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。 :type Product: str :param Md5: SOL模板的MD5值 :type Md5: str """<line_sep>self.InstanceId=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.Product=<none><line_sep>self.Md5=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.Product=params.get("Product")<line_sep>self.Md5=params.get("Md5")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeSlowLogUserHostStatsResponse(AbstractModel)<block_start>"""DescribeSlowLogUserHostStats返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TotalCount: 来源地址数目。 :type TotalCount: int :param Items: 各来源地址的慢日志占比详情列表。 :type Items: list of SlowLogHost :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TotalCount=<none><line_sep>self.Items=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.TotalCount=params.get("TotalCount")<if_stmt>params.get("Items")<is><not><none><block_start>self.Items=[]<for_stmt>item params.get("Items")<block_start>obj=SlowLogHost()<line_sep>obj._deserialize(item)<line_sep>self.Items.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeTopSpaceSchemaTimeSeriesRequest(AbstractModel)<block_start>"""DescribeTopSpaceSchemaTimeSeries请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param Limit: 返回的Top库数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。 :type StartDate: str :param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。 :type EndDate: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.Limit=<none><line_sep>self.SortBy=<none><line_sep>self.StartDate=<none><line_sep>self.EndDate=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.Limit=params.get("Limit")<line_sep>self.SortBy=params.get("SortBy")<line_sep>self.StartDate=params.get("StartDate")<line_sep>self.EndDate=params.get("EndDate")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeTopSpaceSchemaTimeSeriesResponse(AbstractModel)<block_start>"""DescribeTopSpaceSchemaTimeSeries返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TopSpaceSchemaTimeSeries: 返回的Top库空间统计信息的时序数据列表。 :type TopSpaceSchemaTimeSeries: list of SchemaSpaceTimeSeries :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TopSpaceSchemaTimeSeries=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("TopSpaceSchemaTimeSeries")<is><not><none><block_start>self.TopSpaceSchemaTimeSeries=[]<for_stmt>item params.get("TopSpaceSchemaTimeSeries")<block_start>obj=SchemaSpaceTimeSeries()<line_sep>obj._deserialize(item)<line_sep>self.TopSpaceSchemaTimeSeries.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeTopSpaceSchemasRequest(AbstractModel)<block_start>"""DescribeTopSpaceSchemas请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top库数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.Limit=<none><line_sep>self.SortBy=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.Limit=params.get("Limit")<line_sep>self.SortBy=params.get("SortBy")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeTopSpaceSchemasResponse(AbstractModel)<block_start>"""DescribeTopSpaceSchemas返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TopSpaceSchemas: 返回的Top库空间统计信息列表。 :type TopSpaceSchemas: list of SchemaSpaceData :param Timestamp: 采集库空间数据的时间戳(秒)。 :type Timestamp: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TopSpaceSchemas=<none><line_sep>self.Timestamp=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("TopSpaceSchemas")<is><not><none><block_start>self.TopSpaceSchemas=[]<for_stmt>item params.get("TopSpaceSchemas")<block_start>obj=SchemaSpaceData()<line_sep>obj._deserialize(item)<line_sep>self.TopSpaceSchemas.append(obj)<block_end><block_end>self.Timestamp=params.get("Timestamp")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeTopSpaceTableTimeSeriesRequest(AbstractModel)<block_start>"""DescribeTopSpaceTableTimeSeries请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top表数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize,默认为 PhysicalFileSize。 :type SortBy: str :param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。 :type StartDate: str :param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。 :type EndDate: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.Limit=<none><line_sep>self.SortBy=<none><line_sep>self.StartDate=<none><line_sep>self.EndDate=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.Limit=params.get("Limit")<line_sep>self.SortBy=params.get("SortBy")<line_sep>self.StartDate=params.get("StartDate")<line_sep>self.EndDate=params.get("EndDate")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeTopSpaceTableTimeSeriesResponse(AbstractModel)<block_start>"""DescribeTopSpaceTableTimeSeries返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TopSpaceTableTimeSeries: 返回的Top表空间统计信息的时序数据列表。 :type TopSpaceTableTimeSeries: list of TableSpaceTimeSeries :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TopSpaceTableTimeSeries=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("TopSpaceTableTimeSeries")<is><not><none><block_start>self.TopSpaceTableTimeSeries=[]<for_stmt>item params.get("TopSpaceTableTimeSeries")<block_start>obj=TableSpaceTimeSeries()<line_sep>obj._deserialize(item)<line_sep>self.TopSpaceTableTimeSeries.append(obj)<block_end><block_end>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeTopSpaceTablesRequest(AbstractModel)<block_start>"""DescribeTopSpaceTables请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例 ID 。 :type InstanceId: str :param Limit: 返回的Top表数量,最大值为100,默认为20。 :type Limit: int :param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。 :type SortBy: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.Limit=<none><line_sep>self.SortBy=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.Limit=params.get("Limit")<line_sep>self.SortBy=params.get("SortBy")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeTopSpaceTablesResponse(AbstractModel)<block_start>"""DescribeTopSpaceTables返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param TopSpaceTables: 返回的Top表空间统计信息列表。 :type TopSpaceTables: list of TableSpaceData :param Timestamp: 采集表空间数据的时间戳(秒)。 :type Timestamp: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.TopSpaceTables=<none><line_sep>self.Timestamp=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("TopSpaceTables")<is><not><none><block_start>self.TopSpaceTables=[]<for_stmt>item params.get("TopSpaceTables")<block_start>obj=TableSpaceData()<line_sep>obj._deserialize(item)<line_sep>self.TopSpaceTables.append(obj)<block_end><block_end>self.Timestamp=params.get("Timestamp")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DescribeUserSqlAdviceRequest(AbstractModel)<block_start>"""DescribeUserSqlAdvice请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param SqlText: SQL语句。 :type SqlText: str :param Schema: 库名。 :type Schema: str """<line_sep>self.InstanceId=<none><line_sep>self.SqlText=<none><line_sep>self.Schema=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.SqlText=params.get("SqlText")<line_sep>self.Schema=params.get("Schema")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>DescribeUserSqlAdviceResponse(AbstractModel)<block_start>"""DescribeUserSqlAdvice返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Advices: SQL优化建议,可解析为JSON数组,无需优化时输出为空。 :type Advices: str :param Comments: SQL优化建议备注,可解析为String数组,无需优化时输出为空。 :type Comments: str :param SqlText: SQL语句。 :type SqlText: str :param Schema: 库名。 :type Schema: str :param Tables: 相关表的DDL信息,可解析为JSON数组。 :type Tables: str :param SqlPlan: SQL执行计划,可解析为JSON,无需优化时输出为空。 :type SqlPlan: str :param Cost: SQL优化后的成本节约详情,可解析为JSON,无需优化时输出为空。 :type Cost: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Advices=<none><line_sep>self.Comments=<none><line_sep>self.SqlText=<none><line_sep>self.Schema=<none><line_sep>self.Tables=<none><line_sep>self.SqlPlan=<none><line_sep>self.Cost=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.Advices=params.get("Advices")<line_sep>self.Comments=params.get("Comments")<line_sep>self.SqlText=params.get("SqlText")<line_sep>self.Schema=params.get("Schema")<line_sep>self.Tables=params.get("Tables")<line_sep>self.SqlPlan=params.get("SqlPlan")<line_sep>self.Cost=params.get("Cost")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>DiagHistoryEventItem(AbstractModel)<block_start>"""实例诊断历史事件 """<def_stmt>__init__ self<block_start>r""" :param DiagType: 诊断类型。 :type DiagType: str :param EndTime: 结束时间。 :type EndTime: str :param StartTime: 开始时间。 :type StartTime: str :param EventId: 事件唯一ID 。 :type EventId: int :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param Outline: 诊断概要。 :type Outline: str :param DiagItem: 诊断项说明。 :type DiagItem: str :param InstanceId: 实例 ID 。 :type InstanceId: str :param Metric: 保留字段。 注意:此字段可能返回 null,表示取不到有效值。 :type Metric: str :param Region: 地域。 :type Region: str """<line_sep>self.DiagType=<none><line_sep>self.EndTime=<none><line_sep>self.StartTime=<none><line_sep>self.EventId=<none><line_sep>self.Severity=<none><line_sep>self.Outline=<none><line_sep>self.DiagItem=<none><line_sep>self.InstanceId=<none><line_sep>self.Metric=<none><line_sep>self.Region=<none><block_end><def_stmt>_deserialize self params<block_start>self.DiagType=params.get("DiagType")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EventId=params.get("EventId")<line_sep>self.Severity=params.get("Severity")<line_sep>self.Outline=params.get("Outline")<line_sep>self.DiagItem=params.get("DiagItem")<line_sep>self.InstanceId=params.get("InstanceId")<line_sep>self.Metric=params.get("Metric")<line_sep>self.Region=params.get("Region")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>EventInfo(AbstractModel)<block_start>"""异常事件信息。 """<def_stmt>__init__ self<block_start>r""" :param EventId: 事件 ID 。 :type EventId: int :param DiagType: 诊断类型。 :type DiagType: str :param StartTime: 开始时间。 :type StartTime: str :param EndTime: 结束时间。 :type EndTime: str :param Outline: 概要。 :type Outline: str :param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。 :type Severity: int :param ScoreLost: 扣分。 :type ScoreLost: int :param Metric: 保留字段。 :type Metric: str :param Count: 告警数目。 :type Count: int """<line_sep>self.EventId=<none><line_sep>self.DiagType=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.Outline=<none><line_sep>self.Severity=<none><line_sep>self.ScoreLost=<none><line_sep>self.Metric=<none><line_sep>self.Count=<none><block_end><def_stmt>_deserialize self params<block_start>self.EventId=params.get("EventId")<line_sep>self.DiagType=params.get("DiagType")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.Outline=params.get("Outline")<line_sep>self.Severity=params.get("Severity")<line_sep>self.ScoreLost=params.get("ScoreLost")<line_sep>self.Metric=params.get("Metric")<line_sep>self.Count=params.get("Count")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>GroupItem(AbstractModel)<block_start>"""描述组信息。 """<def_stmt>__init__ self<block_start>r""" :param Id: 组id。 :type Id: int :param Name: 组名称。 :type Name: str :param MemberCount: 组成员数量。 :type MemberCount: int """<line_sep>self.Id=<none><line_sep>self.Name=<none><line_sep>self.MemberCount=<none><block_end><def_stmt>_deserialize self params<block_start>self.Id=params.get("Id")<line_sep>self.Name=params.get("Name")<line_sep>self.MemberCount=params.get("MemberCount")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>HealthReportTask(AbstractModel)<block_start>"""健康报告任务详情。 """<def_stmt>__init__ self<block_start>r""" :param AsyncRequestId: 异步任务请求 ID。 :type AsyncRequestId: int :param Source: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。 :type Source: str :param Progress: 任务完成进度,单位%。 :type Progress: int :param CreateTime: 任务创建时间。 :type CreateTime: str :param StartTime: 任务开始执行时间。 :type StartTime: str :param EndTime: 任务完成执行时间。 :type EndTime: str :param InstanceInfo: 任务所属实例的基础信息。 :type InstanceInfo: :class:`tencentcloud.dbbrain.v20210527.models.InstanceBasicInfo` :param HealthStatus: 健康报告中的健康信息。 :type HealthStatus: :class:`tencentcloud.dbbrain.v20210527.models.HealthStatus` """<line_sep>self.AsyncRequestId=<none><line_sep>self.Source=<none><line_sep>self.Progress=<none><line_sep>self.CreateTime=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.InstanceInfo=<none><line_sep>self.HealthStatus=<none><block_end><def_stmt>_deserialize self params<block_start>self.AsyncRequestId=params.get("AsyncRequestId")<line_sep>self.Source=params.get("Source")<line_sep>self.Progress=params.get("Progress")<line_sep>self.CreateTime=params.get("CreateTime")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<if_stmt>params.get("InstanceInfo")<is><not><none><block_start>self.InstanceInfo=InstanceBasicInfo()<line_sep>self.InstanceInfo._deserialize(params.get("InstanceInfo"))<block_end><if_stmt>params.get("HealthStatus")<is><not><none><block_start>self.HealthStatus=HealthStatus()<line_sep>self.HealthStatus._deserialize(params.get("HealthStatus"))<block_end>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>HealthScoreInfo(AbstractModel)<block_start>"""获取健康得分返回的详情。 """<def_stmt>__init__ self<block_start>r""" :param IssueTypes: 异常详情。 :type IssueTypes: list of IssueTypeInfo :param EventsTotalCount: 异常事件总数。 :type EventsTotalCount: int :param HealthScore: 健康得分。 :type HealthScore: int :param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。 :type HealthLevel: str """<line_sep>self.IssueTypes=<none><line_sep>self.EventsTotalCount=<none><line_sep>self.HealthScore=<none><line_sep>self.HealthLevel=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("IssueTypes")<is><not><none><block_start>self.IssueTypes=[]<for_stmt>item params.get("IssueTypes")<block_start>obj=IssueTypeInfo()<line_sep>obj._deserialize(item)<line_sep>self.IssueTypes.append(obj)<block_end><block_end>self.EventsTotalCount=params.get("EventsTotalCount")<line_sep>self.HealthScore=params.get("HealthScore")<line_sep>self.HealthLevel=params.get("HealthLevel")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>HealthStatus(AbstractModel)<block_start>"""实例健康详情。 """<def_stmt>__init__ self<block_start>r""" :param HealthScore: 健康分数,满分100。 :type HealthScore: int :param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。 :type HealthLevel: str :param ScoreLost: 总扣分分数。 :type ScoreLost: int :param ScoreDetails: 扣分详情。 注意:此字段可能返回 null,表示取不到有效值。 :type ScoreDetails: list of ScoreDetail """<line_sep>self.HealthScore=<none><line_sep>self.HealthLevel=<none><line_sep>self.ScoreLost=<none><line_sep>self.ScoreDetails=<none><block_end><def_stmt>_deserialize self params<block_start>self.HealthScore=params.get("HealthScore")<line_sep>self.HealthLevel=params.get("HealthLevel")<line_sep>self.ScoreLost=params.get("ScoreLost")<if_stmt>params.get("ScoreDetails")<is><not><none><block_start>self.ScoreDetails=[]<for_stmt>item params.get("ScoreDetails")<block_start>obj=ScoreDetail()<line_sep>obj._deserialize(item)<line_sep>self.ScoreDetails.append(obj)<block_end><block_end>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>InstanceBasicInfo(AbstractModel)<block_start>"""实例基础信息。 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param InstanceName: 实例名称。 :type InstanceName: str :param Vip: 实例内网IP。 :type Vip: str :param Vport: 实例内网Port。 :type Vport: int :param Product: 实例产品。 :type Product: str :param EngineVersion: 实例引擎版本。 :type EngineVersion: str """<line_sep>self.InstanceId=<none><line_sep>self.InstanceName=<none><line_sep>self.Vip=<none><line_sep>self.Vport=<none><line_sep>self.Product=<none><line_sep>self.EngineVersion=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.InstanceName=params.get("InstanceName")<line_sep>self.Vip=params.get("Vip")<line_sep>self.Vport=params.get("Vport")<line_sep>self.Product=params.get("Product")<line_sep>self.EngineVersion=params.get("EngineVersion")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>InstanceConfs(AbstractModel)<block_start>"""实例配置。 """<def_stmt>__init__ self<block_start>r""" :param DailyInspection: 数据库巡检开关, Yes/No。 :type DailyInspection: str :param OverviewDisplay: 实例概览开关,Yes/No。 :type OverviewDisplay: str """<line_sep>self.DailyInspection=<none><line_sep>self.OverviewDisplay=<none><block_end><def_stmt>_deserialize self params<block_start>self.DailyInspection=params.get("DailyInspection")<line_sep>self.OverviewDisplay=params.get("OverviewDisplay")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>InstanceInfo(AbstractModel)<block_start>"""查询实例列表,返回实例的相关信息的对象。 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param InstanceName: 实例名称。 :type InstanceName: str :param Region: 实例所属地域。 :type Region: str :param HealthScore: 健康得分。 :type HealthScore: int :param Product: 所属产品。 :type Product: str :param EventCount: 异常事件数量。 :type EventCount: int :param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。 :type InstanceType: int :param Cpu: 核心数。 :type Cpu: int :param Memory: 内存,单位MB。 :type Memory: int :param Volume: 硬盘存储,单位GB。 :type Volume: int :param EngineVersion: 数据库版本。 :type EngineVersion: str :param Vip: 内网地址。 :type Vip: str :param Vport: 内网端口。 :type Vport: int :param Source: 接入来源。 :type Source: str :param GroupId: 分组ID。 :type GroupId: str :param GroupName: 分组组名。 :type GroupName: str :param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。 :type Status: int :param UniqSubnetId: 子网统一ID。 :type UniqSubnetId: str :param DeployMode: cdb类型。 :type DeployMode: str :param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。 :type InitFlag: int :param TaskStatus: 任务状态。 :type TaskStatus: int :param UniqVpcId: 私有网络统一ID。 :type UniqVpcId: str :param InstanceConf: 实例巡检/概览的状态。 :type InstanceConf: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs` :param DeadlineTime: 资源到期时间。 :type DeadlineTime: str :param IsSupported: 是否是DBbrain支持的实例。 :type IsSupported: bool :param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。 :type SecAuditStatus: str :param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。 :type AuditPolicyStatus: str :param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。 :type AuditRunningStatus: str """<line_sep>self.InstanceId=<none><line_sep>self.InstanceName=<none><line_sep>self.Region=<none><line_sep>self.HealthScore=<none><line_sep>self.Product=<none><line_sep>self.EventCount=<none><line_sep>self.InstanceType=<none><line_sep>self.Cpu=<none><line_sep>self.Memory=<none><line_sep>self.Volume=<none><line_sep>self.EngineVersion=<none><line_sep>self.Vip=<none><line_sep>self.Vport=<none><line_sep>self.Source=<none><line_sep>self.GroupId=<none><line_sep>self.GroupName=<none><line_sep>self.Status=<none><line_sep>self.UniqSubnetId=<none><line_sep>self.DeployMode=<none><line_sep>self.InitFlag=<none><line_sep>self.TaskStatus=<none><line_sep>self.UniqVpcId=<none><line_sep>self.InstanceConf=<none><line_sep>self.DeadlineTime=<none><line_sep>self.IsSupported=<none><line_sep>self.SecAuditStatus=<none><line_sep>self.AuditPolicyStatus=<none><line_sep>self.AuditRunningStatus=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.InstanceName=params.get("InstanceName")<line_sep>self.Region=params.get("Region")<line_sep>self.HealthScore=params.get("HealthScore")<line_sep>self.Product=params.get("Product")<line_sep>self.EventCount=params.get("EventCount")<line_sep>self.InstanceType=params.get("InstanceType")<line_sep>self.Cpu=params.get("Cpu")<line_sep>self.Memory=params.get("Memory")<line_sep>self.Volume=params.get("Volume")<line_sep>self.EngineVersion=params.get("EngineVersion")<line_sep>self.Vip=params.get("Vip")<line_sep>self.Vport=params.get("Vport")<line_sep>self.Source=params.get("Source")<line_sep>self.GroupId=params.get("GroupId")<line_sep>self.GroupName=params.get("GroupName")<line_sep>self.Status=params.get("Status")<line_sep>self.UniqSubnetId=params.get("UniqSubnetId")<line_sep>self.DeployMode=params.get("DeployMode")<line_sep>self.InitFlag=params.get("InitFlag")<line_sep>self.TaskStatus=params.get("TaskStatus")<line_sep>self.UniqVpcId=params.get("UniqVpcId")<if_stmt>params.get("InstanceConf")<is><not><none><block_start>self.InstanceConf=InstanceConfs()<line_sep>self.InstanceConf._deserialize(params.get("InstanceConf"))<block_end>self.DeadlineTime=params.get("DeadlineTime")<line_sep>self.IsSupported=params.get("IsSupported")<line_sep>self.SecAuditStatus=params.get("SecAuditStatus")<line_sep>self.AuditPolicyStatus=params.get("AuditPolicyStatus")<line_sep>self.AuditRunningStatus=params.get("AuditRunningStatus")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>IssueTypeInfo(AbstractModel)<block_start>"""指标信息。 """<def_stmt>__init__ self<block_start>r""" :param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。 :type IssueType: str :param Events: 异常事件。 :type Events: list of EventInfo :param TotalCount: 异常事件总数。 :type TotalCount: int """<line_sep>self.IssueType=<none><line_sep>self.Events=<none><line_sep>self.TotalCount=<none><block_end><def_stmt>_deserialize self params<block_start>self.IssueType=params.get("IssueType")<if_stmt>params.get("Events")<is><not><none><block_start>self.Events=[]<for_stmt>item params.get("Events")<block_start>obj=EventInfo()<line_sep>obj._deserialize(item)<line_sep>self.Events.append(obj)<block_end><block_end>self.TotalCount=params.get("TotalCount")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>KillMySqlThreadsRequest(AbstractModel)<block_start>"""KillMySqlThreads请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceId: 实例ID。 :type InstanceId: str :param Stage: kill会话任务的阶段,取值包括:"Prepare"-准备阶段,"Commit"-提交阶段。 :type Stage: str :param Threads: 需要kill的sql会话ID列表,此参数用于Prepare阶段。 :type Threads: list of int :param SqlExecId: 执行ID,此参数用于Commit阶段。 :type SqlExecId: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。 :type Product: str """<line_sep>self.InstanceId=<none><line_sep>self.Stage=<none><line_sep>self.Threads=<none><line_sep>self.SqlExecId=<none><line_sep>self.Product=<none><block_end><def_stmt>_deserialize self params<block_start>self.InstanceId=params.get("InstanceId")<line_sep>self.Stage=params.get("Stage")<line_sep>self.Threads=params.get("Threads")<line_sep>self.SqlExecId=params.get("SqlExecId")<line_sep>self.Product=params.get("Product")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>KillMySqlThreadsResponse(AbstractModel)<block_start>"""KillMySqlThreads返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param Threads: kill完成的sql会话ID列表。 :type Threads: list of int :param SqlExecId: 执行ID, Prepare阶段的任务输出,用于Commit阶段中指定执行kill操作的会话ID。 注意:此字段可能返回 null,表示取不到有效值。 :type SqlExecId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.Threads=<none><line_sep>self.SqlExecId=<none><line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.Threads=params.get("Threads")<line_sep>self.SqlExecId=params.get("SqlExecId")<line_sep>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>MailConfiguration(AbstractModel)<block_start>"""邮件发送配置 """<def_stmt>__init__ self<block_start>r""" :param SendMail: 是否开启邮件发送: 0, 否; 1, 是。 :type SendMail: int :param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。 :type Region: list of str :param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。 :type HealthStatus: list of str :param ContactPerson: 联系人id, 联系人/联系组不能都为空。 :type ContactPerson: list of int :param ContactGroup: 联系组id, 联系人/联系组不能都为空。 :type ContactGroup: list of int """<line_sep>self.SendMail=<none><line_sep>self.Region=<none><line_sep>self.HealthStatus=<none><line_sep>self.ContactPerson=<none><line_sep>self.ContactGroup=<none><block_end><def_stmt>_deserialize self params<block_start>self.SendMail=params.get("SendMail")<line_sep>self.Region=params.get("Region")<line_sep>self.HealthStatus=params.get("HealthStatus")<line_sep>self.ContactPerson=params.get("ContactPerson")<line_sep>self.ContactGroup=params.get("ContactGroup")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>ModifyDiagDBInstanceConfRequest(AbstractModel)<block_start>"""ModifyDiagDBInstanceConf请求参数结构体 """<def_stmt>__init__ self<block_start>r""" :param InstanceConfs: 实例配置,包括巡检、概览开关等。 :type InstanceConfs: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs` :param Regions: 生效实例地域,取值为"All",代表全地域。 :type Regions: str :param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。 :type Product: str :param InstanceIds: 指定更改巡检状态的实例ID。 :type InstanceIds: list of str """<line_sep>self.InstanceConfs=<none><line_sep>self.Regions=<none><line_sep>self.Product=<none><line_sep>self.InstanceIds=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("InstanceConfs")<is><not><none><block_start>self.InstanceConfs=InstanceConfs()<line_sep>self.InstanceConfs._deserialize(params.get("InstanceConfs"))<block_end>self.Regions=params.get("Regions")<line_sep>self.Product=params.get("Product")<line_sep>self.InstanceIds=params.get("InstanceIds")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>ModifyDiagDBInstanceConfResponse(AbstractModel)<block_start>"""ModifyDiagDBInstanceConf返回参数结构体 """<def_stmt>__init__ self<block_start>r""" :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str """<line_sep>self.RequestId=<none><block_end><def_stmt>_deserialize self params<block_start>self.RequestId=params.get("RequestId")<block_end><block_end><class_stmt>MonitorFloatMetric(AbstractModel)<block_start>"""监控数据(浮点型) """<def_stmt>__init__ self<block_start>r""" :param Metric: 指标名称。 :type Metric: str :param Unit: 指标单位。 :type Unit: str :param Values: 指标值。 注意:此字段可能返回 null,表示取不到有效值。 :type Values: list of float """<line_sep>self.Metric=<none><line_sep>self.Unit=<none><line_sep>self.Values=<none><block_end><def_stmt>_deserialize self params<block_start>self.Metric=params.get("Metric")<line_sep>self.Unit=params.get("Unit")<line_sep>self.Values=params.get("Values")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>MonitorFloatMetricSeriesData(AbstractModel)<block_start>"""单位时间间隔内的监控指标数据(浮点型) """<def_stmt>__init__ self<block_start>r""" :param Series: 监控指标。 :type Series: list of MonitorFloatMetric :param Timestamp: 监控指标对应的时间戳。 :type Timestamp: list of int """<line_sep>self.Series=<none><line_sep>self.Timestamp=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("Series")<is><not><none><block_start>self.Series=[]<for_stmt>item params.get("Series")<block_start>obj=MonitorFloatMetric()<line_sep>obj._deserialize(item)<line_sep>self.Series.append(obj)<block_end><block_end>self.Timestamp=params.get("Timestamp")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>MonitorMetric(AbstractModel)<block_start>"""监控数据 """<def_stmt>__init__ self<block_start>r""" :param Metric: 指标名称。 :type Metric: str :param Unit: 指标单位。 :type Unit: str :param Values: 指标值。 注意:此字段可能返回 null,表示取不到有效值。 :type Values: list of float """<line_sep>self.Metric=<none><line_sep>self.Unit=<none><line_sep>self.Values=<none><block_end><def_stmt>_deserialize self params<block_start>self.Metric=params.get("Metric")<line_sep>self.Unit=params.get("Unit")<line_sep>self.Values=params.get("Values")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>MonitorMetricSeriesData(AbstractModel)<block_start>"""单位时间间隔内的监控指标数据 """<def_stmt>__init__ self<block_start>r""" :param Series: 监控指标。 :type Series: list of MonitorMetric :param Timestamp: 监控指标对应的时间戳。 :type Timestamp: list of int """<line_sep>self.Series=<none><line_sep>self.Timestamp=<none><block_end><def_stmt>_deserialize self params<block_start><if_stmt>params.get("Series")<is><not><none><block_start>self.Series=[]<for_stmt>item params.get("Series")<block_start>obj=MonitorMetric()<line_sep>obj._deserialize(item)<line_sep>self.Series.append(obj)<block_end><block_end>self.Timestamp=params.get("Timestamp")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>MySqlProcess(AbstractModel)<block_start>"""关系型数据库线程 """<def_stmt>__init__ self<block_start>r""" :param ID: 线程ID。 :type ID: str :param User: 线程的操作账号名。 :type User: str :param Host: 线程的操作主机地址。 :type Host: str :param DB: 线程的操作数据库。 :type DB: str :param State: 线程的操作状态。 :type State: str :param Command: 线程的执行类型。 :type Command: str :param Time: 线程的操作时长,单位秒。 :type Time: str :param Info: 线程的操作语句。 :type Info: str """<line_sep>self.ID=<none><line_sep>self.User=<none><line_sep>self.Host=<none><line_sep>self.DB=<none><line_sep>self.State=<none><line_sep>self.Command=<none><line_sep>self.Time=<none><line_sep>self.Info=<none><block_end><def_stmt>_deserialize self params<block_start>self.ID=params.get("ID")<line_sep>self.User=params.get("User")<line_sep>self.Host=params.get("Host")<line_sep>self.DB=params.get("DB")<line_sep>self.State=params.get("State")<line_sep>self.Command=params.get("Command")<line_sep>self.Time=params.get("Time")<line_sep>self.Info=params.get("Info")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>ProfileInfo(AbstractModel)<block_start>"""用户配置的信息 """<def_stmt>__init__ self<block_start>r""" :param Language: 语言, 如"zh"。 :type Language: str :param MailConfiguration: 邮件模板的内容。 :type MailConfiguration: :class:`tencentcloud.dbbrain.v20210527.models.MailConfiguration` """<line_sep>self.Language=<none><line_sep>self.MailConfiguration=<none><block_end><def_stmt>_deserialize self params<block_start>self.Language=params.get("Language")<if_stmt>params.get("MailConfiguration")<is><not><none><block_start>self.MailConfiguration=MailConfiguration()<line_sep>self.MailConfiguration._deserialize(params.get("MailConfiguration"))<block_end>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>SchemaItem(AbstractModel)<block_start>"""SchemaItem数组 """<def_stmt>__init__ self<block_start>r""" :param Schema: 数据库名称 :type Schema: str """<line_sep>self.Schema=<none><block_end><def_stmt>_deserialize self params<block_start>self.Schema=params.get("Schema")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>SchemaSpaceData(AbstractModel)<block_start>"""库空间统计数据。 """<def_stmt>__init__ self<block_start>r""" :param TableSchema: 库名。 :type TableSchema: str :param DataLength: 数据空间(MB)。 :type DataLength: float :param IndexLength: 索引空间(MB)。 :type IndexLength: float :param DataFree: 碎片空间(MB)。 :type DataFree: float :param TotalLength: 总使用空间(MB)。 :type TotalLength: float :param FragRatio: 碎片率(%)。 :type FragRatio: float :param TableRows: 行数。 :type TableRows: int :param PhysicalFileSize: 库中所有表对应的独立物理文件大小加和(MB)。 注意:此字段可能返回 null,表示取不到有效值。 :type PhysicalFileSize: float """<line_sep>self.TableSchema=<none><line_sep>self.DataLength=<none><line_sep>self.IndexLength=<none><line_sep>self.DataFree=<none><line_sep>self.TotalLength=<none><line_sep>self.FragRatio=<none><line_sep>self.TableRows=<none><line_sep>self.PhysicalFileSize=<none><block_end><def_stmt>_deserialize self params<block_start>self.TableSchema=params.get("TableSchema")<line_sep>self.DataLength=params.get("DataLength")<line_sep>self.IndexLength=params.get("IndexLength")<line_sep>self.DataFree=params.get("DataFree")<line_sep>self.TotalLength=params.get("TotalLength")<line_sep>self.FragRatio=params.get("FragRatio")<line_sep>self.TableRows=params.get("TableRows")<line_sep>self.PhysicalFileSize=params.get("PhysicalFileSize")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>SchemaSpaceTimeSeries(AbstractModel)<block_start>"""库空间时序数据 """<def_stmt>__init__ self<block_start>r""" :param TableSchema: 库名 :type TableSchema: str :param SeriesData: 单位时间间隔内的空间指标数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData` """<line_sep>self.TableSchema=<none><line_sep>self.SeriesData=<none><block_end><def_stmt>_deserialize self params<block_start>self.TableSchema=params.get("TableSchema")<if_stmt>params.get("SeriesData")<is><not><none><block_start>self.SeriesData=MonitorMetricSeriesData()<line_sep>self.SeriesData._deserialize(params.get("SeriesData"))<block_end>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>ScoreDetail(AbstractModel)<block_start>"""扣分详情。 """<def_stmt>__init__ self<block_start>r""" :param IssueType: 扣分项分类,取值包括:可用性、可维护性、性能及可靠性。 :type IssueType: str :param ScoreLost: 扣分总分。 :type ScoreLost: int :param ScoreLostMax: 扣分总分上限。 :type ScoreLostMax: int :param Items: 扣分项列表。 注意:此字段可能返回 null,表示取不到有效值。 :type Items: list of ScoreItem """<line_sep>self.IssueType=<none><line_sep>self.ScoreLost=<none><line_sep>self.ScoreLostMax=<none><line_sep>self.Items=<none><block_end><def_stmt>_deserialize self params<block_start>self.IssueType=params.get("IssueType")<line_sep>self.ScoreLost=params.get("ScoreLost")<line_sep>self.ScoreLostMax=params.get("ScoreLostMax")<if_stmt>params.get("Items")<is><not><none><block_start>self.Items=[]<for_stmt>item params.get("Items")<block_start>obj=ScoreItem()<line_sep>obj._deserialize(item)<line_sep>self.Items.append(obj)<block_end><block_end>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>ScoreItem(AbstractModel)<block_start>"""诊断扣分项。 """<def_stmt>__init__ self<block_start>r""" :param DiagItem: 异常诊断项名称。 :type DiagItem: str :param IssueType: 诊断项分类,取值包括:可用性、可维护性、性能及可靠性。 :type IssueType: str :param TopSeverity: 健康等级,取值包括:信息、提示、告警、严重、致命。 :type TopSeverity: str :param Count: 该异常诊断项出现次数。 :type Count: int :param ScoreLost: 扣分分数。 :type ScoreLost: int """<line_sep>self.DiagItem=<none><line_sep>self.IssueType=<none><line_sep>self.TopSeverity=<none><line_sep>self.Count=<none><line_sep>self.ScoreLost=<none><block_end><def_stmt>_deserialize self params<block_start>self.DiagItem=params.get("DiagItem")<line_sep>self.IssueType=params.get("IssueType")<line_sep>self.TopSeverity=params.get("TopSeverity")<line_sep>self.Count=params.get("Count")<line_sep>self.ScoreLost=params.get("ScoreLost")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>SecLogExportTaskInfo(AbstractModel)<block_start>"""安全审计日志导出任务信息 """<def_stmt>__init__ self<block_start>r""" :param AsyncRequestId: 异步任务Id。 :type AsyncRequestId: int :param StartTime: 任务开始时间。 注意:此字段可能返回 null,表示取不到有效值。 :type StartTime: str :param EndTime: 任务结束时间。 注意:此字段可能返回 null,表示取不到有效值。 :type EndTime: str :param CreateTime: 任务创建时间。 :type CreateTime: str :param Status: 任务状态。 :type Status: str :param Progress: 任务执行进度。 :type Progress: int :param LogStartTime: 导出日志开始时间。 注意:此字段可能返回 null,表示取不到有效值。 :type LogStartTime: str :param LogEndTime: 导出日志结束时间。 注意:此字段可能返回 null,表示取不到有效值。 :type LogEndTime: str :param TotalSize: 日志文件总大小,单位KB。 注意:此字段可能返回 null,表示取不到有效值。 :type TotalSize: int :param DangerLevels: 风险等级列表。0 无风险;1 低风险;2 中风险;3 高风险。 注意:此字段可能返回 null,表示取不到有效值。 :type DangerLevels: list of int non-negative """<line_sep>self.AsyncRequestId=<none><line_sep>self.StartTime=<none><line_sep>self.EndTime=<none><line_sep>self.CreateTime=<none><line_sep>self.Status=<none><line_sep>self.Progress=<none><line_sep>self.LogStartTime=<none><line_sep>self.LogEndTime=<none><line_sep>self.TotalSize=<none><line_sep>self.DangerLevels=<none><block_end><def_stmt>_deserialize self params<block_start>self.AsyncRequestId=params.get("AsyncRequestId")<line_sep>self.StartTime=params.get("StartTime")<line_sep>self.EndTime=params.get("EndTime")<line_sep>self.CreateTime=params.get("CreateTime")<line_sep>self.Status=params.get("Status")<line_sep>self.Progress=params.get("Progress")<line_sep>self.LogStartTime=params.get("LogStartTime")<line_sep>self.LogEndTime=params.get("LogEndTime")<line_sep>self.TotalSize=params.get("TotalSize")<line_sep>self.DangerLevels=params.get("DangerLevels")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>SlowLogHost(AbstractModel)<block_start>"""慢日志来源地址详情。 """<def_stmt>__init__ self<block_start>r""" :param UserHost: 来源地址。 :type UserHost: str :param Ratio: 该来源地址的慢日志数目占总数目的比例,单位%。 :type Ratio: float :param Count: 该来源地址的慢日志数目。 :type Count: int """<line_sep>self.UserHost=<none><line_sep>self.Ratio=<none><line_sep>self.Count=<none><block_end><def_stmt>_deserialize self params<block_start>self.UserHost=params.get("UserHost")<line_sep>self.Ratio=params.get("Ratio")<line_sep>self.Count=params.get("Count")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>SlowLogTopSqlItem(AbstractModel)<block_start>"""慢日志TopSql """<def_stmt>__init__ self<block_start>r""" :param LockTime: sql总锁等待时间,单位秒 :type LockTime: float :param LockTimeMax: 最大锁等待时间,单位秒 :type LockTimeMax: float :param LockTimeMin: 最小锁等待时间,单位秒 :type LockTimeMin: float :param RowsExamined: 总扫描行数 :type RowsExamined: int :param RowsExaminedMax: 最大扫描行数 :type RowsExaminedMax: int :param RowsExaminedMin: 最小扫描行数 :type RowsExaminedMin: int :param QueryTime: 总耗时,单位秒 :type QueryTime: float :param QueryTimeMax: 最大执行时间,单位秒 :type QueryTimeMax: float :param QueryTimeMin: 最小执行时间,单位秒 :type QueryTimeMin: float :param RowsSent: 总返回行数 :type RowsSent: int :param RowsSentMax: 最大返回行数 :type RowsSentMax: int :param RowsSentMin: 最小返回行数 :type RowsSentMin: int :param ExecTimes: 执行次数 :type ExecTimes: int :param SqlTemplate: sql模板 :type SqlTemplate: str :param SqlText: 带参数SQL(随机) :type SqlText: str :param Schema: 数据库名 :type Schema: str :param QueryTimeRatio: 总耗时占比,单位% :type QueryTimeRatio: float :param LockTimeRatio: sql总锁等待时间占比,单位% :type LockTimeRatio: float :param RowsExaminedRatio: 总扫描行数占比,单位% :type RowsExaminedRatio: float :param RowsSentRatio: 总返回行数占比,单位% :type RowsSentRatio: float :param QueryTimeAvg: 平均执行时间,单位秒 :type QueryTimeAvg: float :param RowsSentAvg: 平均返回行数 :type RowsSentAvg: float :param LockTimeAvg: 平均锁等待时间,单位秒 :type LockTimeAvg: float :param RowsExaminedAvg: 平均扫描行数 :type RowsExaminedAvg: float :param Md5: SOL模板的MD5值 :type Md5: str """<line_sep>self.LockTime=<none><line_sep>self.LockTimeMax=<none><line_sep>self.LockTimeMin=<none><line_sep>self.RowsExamined=<none><line_sep>self.RowsExaminedMax=<none><line_sep>self.RowsExaminedMin=<none><line_sep>self.QueryTime=<none><line_sep>self.QueryTimeMax=<none><line_sep>self.QueryTimeMin=<none><line_sep>self.RowsSent=<none><line_sep>self.RowsSentMax=<none><line_sep>self.RowsSentMin=<none><line_sep>self.ExecTimes=<none><line_sep>self.SqlTemplate=<none><line_sep>self.SqlText=<none><line_sep>self.Schema=<none><line_sep>self.QueryTimeRatio=<none><line_sep>self.LockTimeRatio=<none><line_sep>self.RowsExaminedRatio=<none><line_sep>self.RowsSentRatio=<none><line_sep>self.QueryTimeAvg=<none><line_sep>self.RowsSentAvg=<none><line_sep>self.LockTimeAvg=<none><line_sep>self.RowsExaminedAvg=<none><line_sep>self.Md5=<none><block_end><def_stmt>_deserialize self params<block_start>self.LockTime=params.get("LockTime")<line_sep>self.LockTimeMax=params.get("LockTimeMax")<line_sep>self.LockTimeMin=params.get("LockTimeMin")<line_sep>self.RowsExamined=params.get("RowsExamined")<line_sep>self.RowsExaminedMax=params.get("RowsExaminedMax")<line_sep>self.RowsExaminedMin=params.get("RowsExaminedMin")<line_sep>self.QueryTime=params.get("QueryTime")<line_sep>self.QueryTimeMax=params.get("QueryTimeMax")<line_sep>self.QueryTimeMin=params.get("QueryTimeMin")<line_sep>self.RowsSent=params.get("RowsSent")<line_sep>self.RowsSentMax=params.get("RowsSentMax")<line_sep>self.RowsSentMin=params.get("RowsSentMin")<line_sep>self.ExecTimes=params.get("ExecTimes")<line_sep>self.SqlTemplate=params.get("SqlTemplate")<line_sep>self.SqlText=params.get("SqlText")<line_sep>self.Schema=params.get("Schema")<line_sep>self.QueryTimeRatio=params.get("QueryTimeRatio")<line_sep>self.LockTimeRatio=params.get("LockTimeRatio")<line_sep>self.RowsExaminedRatio=params.get("RowsExaminedRatio")<line_sep>self.RowsSentRatio=params.get("RowsSentRatio")<line_sep>self.QueryTimeAvg=params.get("QueryTimeAvg")<line_sep>self.RowsSentAvg=params.get("RowsSentAvg")<line_sep>self.LockTimeAvg=params.get("LockTimeAvg")<line_sep>self.RowsExaminedAvg=params.get("RowsExaminedAvg")<line_sep>self.Md5=params.get("Md5")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>TableSpaceData(AbstractModel)<block_start>"""库表空间统计数据。 """<def_stmt>__init__ self<block_start>r""" :param TableName: 表名。 :type TableName: str :param TableSchema: 库名。 :type TableSchema: str :param Engine: 库表的存储引擎。 :type Engine: str :param DataLength: 数据空间(MB)。 :type DataLength: float :param IndexLength: 索引空间(MB)。 :type IndexLength: float :param DataFree: 碎片空间(MB)。 :type DataFree: float :param TotalLength: 总使用空间(MB)。 :type TotalLength: float :param FragRatio: 碎片率(%)。 :type FragRatio: float :param TableRows: 行数。 :type TableRows: int :param PhysicalFileSize: 表对应的独立物理文件大小(MB)。 :type PhysicalFileSize: float """<line_sep>self.TableName=<none><line_sep>self.TableSchema=<none><line_sep>self.Engine=<none><line_sep>self.DataLength=<none><line_sep>self.IndexLength=<none><line_sep>self.DataFree=<none><line_sep>self.TotalLength=<none><line_sep>self.FragRatio=<none><line_sep>self.TableRows=<none><line_sep>self.PhysicalFileSize=<none><block_end><def_stmt>_deserialize self params<block_start>self.TableName=params.get("TableName")<line_sep>self.TableSchema=params.get("TableSchema")<line_sep>self.Engine=params.get("Engine")<line_sep>self.DataLength=params.get("DataLength")<line_sep>self.IndexLength=params.get("IndexLength")<line_sep>self.DataFree=params.get("DataFree")<line_sep>self.TotalLength=params.get("TotalLength")<line_sep>self.FragRatio=params.get("FragRatio")<line_sep>self.TableRows=params.get("TableRows")<line_sep>self.PhysicalFileSize=params.get("PhysicalFileSize")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>TableSpaceTimeSeries(AbstractModel)<block_start>"""库表空间时序数据 """<def_stmt>__init__ self<block_start>r""" :param TableName: 表名。 :type TableName: str :param TableSchema: 库名。 :type TableSchema: str :param Engine: 库表的存储引擎。 :type Engine: str :param SeriesData: 单位时间间隔内的空间指标数据。 :type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorFloatMetricSeriesData` """<line_sep>self.TableName=<none><line_sep>self.TableSchema=<none><line_sep>self.Engine=<none><line_sep>self.SeriesData=<none><block_end><def_stmt>_deserialize self params<block_start>self.TableName=params.get("TableName")<line_sep>self.TableSchema=params.get("TableSchema")<line_sep>self.Engine=params.get("Engine")<if_stmt>params.get("SeriesData")<is><not><none><block_start>self.SeriesData=MonitorFloatMetricSeriesData()<line_sep>self.SeriesData._deserialize(params.get("SeriesData"))<block_end>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>TimeSlice(AbstractModel)<block_start>"""单位时间间隔内的慢日志统计 """<def_stmt>__init__ self<block_start>r""" :param Count: 总数 :type Count: int :param Timestamp: 统计开始时间 :type Timestamp: int """<line_sep>self.Count=<none><line_sep>self.Timestamp=<none><block_end><def_stmt>_deserialize self params<block_start>self.Count=params.get("Count")<line_sep>self.Timestamp=params.get("Timestamp")<line_sep>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end><class_stmt>UserProfile(AbstractModel)<block_start>"""用户配置的相关信息,包括邮件配置。 """<def_stmt>__init__ self<block_start>r""" :param ProfileId: 配置的id。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileId: str :param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileType: str :param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileLevel: str :param ProfileName: 配置名称。 注意:此字段可能返回 null,表示取不到有效值。 :type ProfileName: str :param ProfileInfo: 配置详情。 :type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo` """<line_sep>self.ProfileId=<none><line_sep>self.ProfileType=<none><line_sep>self.ProfileLevel=<none><line_sep>self.ProfileName=<none><line_sep>self.ProfileInfo=<none><block_end><def_stmt>_deserialize self params<block_start>self.ProfileId=params.get("ProfileId")<line_sep>self.ProfileType=params.get("ProfileType")<line_sep>self.ProfileLevel=params.get("ProfileLevel")<line_sep>self.ProfileName=params.get("ProfileName")<if_stmt>params.get("ProfileInfo")<is><not><none><block_start>self.ProfileInfo=ProfileInfo()<line_sep>self.ProfileInfo._deserialize(params.get("ProfileInfo"))<block_end>memeber_set=set(params.keys())<for_stmt>name,value vars(self).items()<block_start><if_stmt>name<in>memeber_set<block_start>memeber_set.remove(name)<block_end><block_end><if_stmt>len(memeber_set)<g>0<block_start>warnings.warn("%s fileds are useless."%",".join(memeber_set))<block_end><block_end><block_end>
# Ported to Python 3 # Originally from https://github.com/DeprecatedCode/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py <import_stmt>json<import_stmt>logging<import_from_stmt>requests Response<import_from_stmt>io StringIO<try_stmt><block_start><import_from_stmt>werkzeug.exceptions Unauthorized<block_end><except_stmt>ImportError<block_start>Unauthorized=Exception<block_end><import_from_stmt>oauth utils<class_stmt>Provider(object)<block_start>"""Base provider class for different types of OAuth 2.0 providers."""<def_stmt>_handle_exception self exc<block_start>"""Handle an internal exception that was caught and suppressed. :param exc: Exception to process. :type exc: Exception """<line_sep>logger=logging.getLogger(__name__)<line_sep>logger.exception(exc)<block_end><def_stmt>_make_response self body="" headers=<none> status_code=200<block_start>"""Return a response object from the given parameters. :param body: Buffer/string containing the response body. :type body: str :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """<line_sep>res=Response()<line_sep>res.status_code=status_code<if_stmt>headers<is><not><none><block_start>res.headers.update(headers)<block_end>res.raw=StringIO(body)<line_sep><return>res<block_end><def_stmt>_make_redirect_error_response self redirect_uri err<block_start>"""Return a HTTP 302 redirect response object containing the error. :param redirect_uri: Client redirect URI. :type redirect_uri: str :param err: OAuth error message. :type err: str :rtype: requests.Response """<line_sep>params={"error":err "response_type":<none> "client_id":<none> "redirect_uri":<none>}<line_sep>redirect=utils.build_url(redirect_uri params)<line_sep><return>self._make_response(headers={"Location":redirect} status_code=302)<block_end><def_stmt>_make_json_response self data headers=<none> status_code=200<block_start>"""Return a response object from the given JSON data. :param data: Data to JSON-encode. :type data: mixed :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """<line_sep>response_headers={}<if_stmt>headers<is><not><none><block_start>response_headers.update(headers)<block_end>response_headers["Content-Type"]="application/json;charset=UTF-8"<line_sep>response_headers["Cache-Control"]="no-store"<line_sep>response_headers["Pragma"]="no-cache"<line_sep><return>self._make_response(json.dumps(data) response_headers status_code)<block_end><def_stmt>_make_json_error_response self err<block_start>"""Return a JSON-encoded response object representing the error. :param err: OAuth error message. :type err: str :rtype: requests.Response """<line_sep><return>self._make_json_response({"error":err} status_code=400)<block_end><def_stmt>_invalid_redirect_uri_response self<block_start>"""What to return when the redirect_uri parameter is missing. :rtype: requests.Response """<line_sep><return>self._make_json_error_response("invalid_request")<block_end><block_end><class_stmt>AuthorizationProvider(Provider)<block_start>"""OAuth 2.0 authorization provider. This class manages authorization codes and access tokens. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a provider. These are the methods that must be implemented in a subclass: validate_client_id(self, client_id) # Return True or False validate_client_secret(self, client_id, client_secret) # Return True or False validate_scope(self, client_id, scope) # Return True or False validate_redirect_uri(self, client_id, redirect_uri) # Return True or False validate_access(self) # Use this to validate your app session user # Return True or False from_authorization_code(self, client_id, code, scope) # Return mixed data or None on invalid from_refresh_token(self, client_id, refresh_token, scope) # Return mixed data or None on invalid persist_authorization_code(self, client_id, code, scope) # Return value ignored persist_token_information(self, client_id, scope, access_token, token_type, expires_in, refresh_token, data) # Return value ignored discard_authorization_code(self, client_id, code) # Return value ignored discard_refresh_token(self, client_id, refresh_token) # Return value ignored Optionally, the following may be overridden to acheive desired behavior: @property token_length(self) @property token_type(self) @property token_expires_in(self) generate_authorization_code(self) generate_access_token(self) generate_refresh_token(self) """<line_sep>@property<def_stmt>token_length self<block_start>"""Property method to get the length used to generate tokens. :rtype: int """<line_sep><return>40<block_end>@property<def_stmt>token_type self<block_start>"""Property method to get the access token type. :rtype: str """<line_sep><return>"Bearer"<block_end>@property<def_stmt>token_expires_in self<block_start>"""Property method to get the token expiration time in seconds. :rtype: int """<line_sep><return>3600<block_end><def_stmt>generate_authorization_code self<block_start>"""Generate a random authorization code. :rtype: str """<line_sep><return>utils.random_ascii_string(self.token_length)<block_end><def_stmt>generate_access_token self<block_start>"""Generate a random access token. :rtype: str """<line_sep><return>utils.random_ascii_string(self.token_length)<block_end><def_stmt>generate_refresh_token self<block_start>"""Generate a random refresh token. :rtype: str """<line_sep><return>utils.random_ascii_string(self.token_length)<block_end><def_stmt>get_authorization_code self response_type client_id redirect_uri **params<block_start>"""Generate authorization code HTTP response. :param response_type: Desired response type. Must be exactly "code". :type response_type: str :param client_id: Client ID. :type client_id: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :rtype: requests.Response """<line_sep># Ensure proper response_type <if_stmt>response_type<ne>"code"<block_start>err="unsupported_response_type"<line_sep><return>self._make_redirect_error_response(redirect_uri err)<block_end># Check redirect URI is_valid_redirect_uri=self.validate_redirect_uri(client_id redirect_uri)<if_stmt><not>is_valid_redirect_uri<block_start><return>self._invalid_redirect_uri_response()<block_end># Check conditions is_valid_client_id=self.validate_client_id(client_id)<line_sep>is_valid_access=self.validate_access()<line_sep>scope=params.get("scope" "")<line_sep>is_valid_scope=self.validate_scope(client_id scope)<line_sep># Return proper error responses on invalid conditions <if_stmt><not>is_valid_client_id<block_start>err="unauthorized_client"<line_sep><return>self._make_redirect_error_response(redirect_uri err)<block_end><if_stmt><not>is_valid_access<block_start>err="access_denied"<line_sep><return>self._make_redirect_error_response(redirect_uri err)<block_end><if_stmt><not>is_valid_scope<block_start>err="invalid_scope"<line_sep><return>self._make_redirect_error_response(redirect_uri err)<block_end># Generate authorization code code=self.generate_authorization_code()<line_sep># Save information to be used to validate later requests self.persist_authorization_code(client_id=client_id code=code scope=scope)<line_sep># Return redirection response params.update({"code":code "response_type":<none> "client_id":<none> "redirect_uri":<none>})<line_sep>redirect=utils.build_url(redirect_uri params)<line_sep><return>self._make_response(headers={"Location":redirect} status_code=302)<block_end><def_stmt>refresh_token self grant_type client_id client_secret refresh_token **params<block_start>"""Generate access token HTTP response from a refresh token. :param grant_type: Desired grant type. Must be "refresh_token". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param refresh_token: Refresh token. :type refresh_token: str :rtype: requests.Response """<line_sep># Ensure proper grant_type <if_stmt>grant_type<ne>"refresh_token"<block_start><return>self._make_json_error_response("unsupported_grant_type")<block_end># Check conditions is_valid_client_id=self.validate_client_id(client_id)<line_sep>is_valid_client_secret=self.validate_client_secret(client_id client_secret)<line_sep>scope=params.get("scope" "")<line_sep>is_valid_scope=self.validate_scope(client_id scope)<line_sep>data=self.from_refresh_token(client_id refresh_token scope)<line_sep>is_valid_refresh_token=data<is><not><none><line_sep># Return proper error responses on invalid conditions <if_stmt><not>(is_valid_client_id<and>is_valid_client_secret)<block_start><return>self._make_json_error_response("invalid_client")<block_end><if_stmt><not>is_valid_scope<block_start><return>self._make_json_error_response("invalid_scope")<block_end><if_stmt><not>is_valid_refresh_token<block_start><return>self._make_json_error_response("invalid_grant")<block_end># Discard original refresh token self.discard_refresh_token(client_id refresh_token)<line_sep># Generate access tokens once all conditions have been met access_token=self.generate_access_token()<line_sep>token_type=self.token_type<line_sep>expires_in=self.token_expires_in<line_sep>refresh_token=self.generate_refresh_token()<line_sep># Save information to be used to validate later requests self.persist_token_information(client_id=client_id scope=scope access_token=access_token token_type=token_type expires_in=expires_in refresh_token=refresh_token data=data )<line_sep># Return json response <return>self._make_json_response({"access_token":access_token "token_type":token_type "expires_in":expires_in "refresh_token":refresh_token })<block_end><def_stmt>get_token self grant_type client_id client_secret redirect_uri code **params<block_start>"""Generate access token HTTP response. :param grant_type: Desired grant type. Must be "authorization_code". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :param code: Authorization code. :type code: str :rtype: requests.Response """<line_sep># Ensure proper grant_type <if_stmt>grant_type<ne>"authorization_code"<block_start><return>self._make_json_error_response("unsupported_grant_type")<block_end># Check conditions is_valid_client_id=self.validate_client_id(client_id)<line_sep>is_valid_client_secret=self.validate_client_secret(client_id client_secret)<line_sep>is_valid_redirect_uri=self.validate_redirect_uri(client_id redirect_uri)<line_sep>scope=params.get("scope" "")<line_sep>is_valid_scope=self.validate_scope(client_id scope)<line_sep>data=self.from_authorization_code(client_id code scope)<line_sep>is_valid_grant=data<is><not><none><line_sep># Return proper error responses on invalid conditions <if_stmt><not>(is_valid_client_id<and>is_valid_client_secret)<block_start><return>self._make_json_error_response("invalid_client")<block_end><if_stmt><not>is_valid_grant<or><not>is_valid_redirect_uri<block_start><return>self._make_json_error_response("invalid_grant")<block_end><if_stmt><not>is_valid_scope<block_start><return>self._make_json_error_response("invalid_scope")<block_end># Discard original authorization code self.discard_authorization_code(client_id code)<line_sep># Generate access tokens once all conditions have been met access_token=self.generate_access_token()<line_sep>token_type=self.token_type<line_sep>expires_in=self.token_expires_in<line_sep>refresh_token=self.generate_refresh_token()<line_sep># Save information to be used to validate later requests self.persist_token_information(client_id=client_id scope=scope access_token=access_token token_type=token_type expires_in=expires_in refresh_token=refresh_token data=data )<line_sep># Return json response <return>self._make_json_response({"access_token":access_token "token_type":token_type "expires_in":expires_in "refresh_token":refresh_token })<block_end><def_stmt>get_authorization_code_from_uri self uri<block_start>"""Get authorization code response from a URI. This method will ignore the domain and path of the request, instead automatically parsing the query string parameters. :param uri: URI to parse for authorization information. :type uri: str :rtype: requests.Response """<line_sep>params=utils.url_query_params(uri)<try_stmt><block_start><if_stmt>"response_type"<not><in>params<block_start><raise>TypeError("Missing parameter response_type in URL query")<block_end><if_stmt>"client_id"<not><in>params<block_start><raise>TypeError("Missing parameter client_id in URL query")<block_end><if_stmt>"redirect_uri"<not><in>params<block_start><raise>TypeError("Missing parameter redirect_uri in URL query")<block_end><return>self.get_authorization_code(**params)<block_end><except_stmt>TypeError<as>exc<block_start>self._handle_exception(exc)<line_sep># Catch missing parameters in request err="invalid_request"<if_stmt>"redirect_uri"<in>params<block_start>u=params["redirect_uri"]<line_sep><return>self._make_redirect_error_response(u err)<block_end><else_stmt><block_start><return>self._invalid_redirect_uri_response()<block_end><block_end><except_stmt>StandardError<as>exc<block_start>self._handle_exception(exc)<line_sep># Catch all other server errors err="server_error"<line_sep>u=params["redirect_uri"]<line_sep><return>self._make_redirect_error_response(u err)<block_end><block_end><def_stmt>get_token_from_post_data self data<block_start>"""Get a token response from POST data. :param data: POST data containing authorization information. :type data: dict :rtype: requests.Response """<try_stmt># Verify OAuth 2.0 Parameters <block_start><for_stmt>x ["grant_type" "client_id" "client_secret"]<block_start><if_stmt><not>data.get(x)<block_start><raise>TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))<block_end><block_end># Handle get token from refresh_token <if_stmt>"refresh_token"<in>data<block_start><return>self.refresh_token(**data)<block_end># Handle get token from authorization code <for_stmt>x ["redirect_uri" "code"]<block_start><if_stmt><not>data.get(x)<block_start><raise>TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))<block_end><block_end><return>self.get_token(**data)<block_end><except_stmt>TypeError<as>exc<block_start>self._handle_exception(exc)<line_sep># Catch missing parameters in request <return>self._make_json_error_response("invalid_request")<block_end><except_stmt>StandardError<as>exc<block_start>self._handle_exception(exc)<line_sep># Catch all other server errors <return>self._make_json_error_response("server_error")<block_end><block_end><def_stmt>validate_client_id self client_id<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"validate_client_id.")<block_end><def_stmt>validate_client_secret self client_id client_secret<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"validate_client_secret.")<block_end><def_stmt>validate_redirect_uri self client_id redirect_uri<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"validate_redirect_uri.")<block_end><def_stmt>validate_scope self client_id scope<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"validate_scope.")<block_end><def_stmt>validate_access self<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"validate_access.")<block_end><def_stmt>from_authorization_code self client_id code scope<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"from_authorization_code.")<block_end><def_stmt>from_refresh_token self client_id refresh_token scope<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"from_refresh_token.")<block_end><def_stmt>persist_authorization_code self client_id code scope<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"persist_authorization_code.")<block_end><def_stmt>persist_token_information self client_id scope access_token token_type expires_in refresh_token data<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"persist_token_information.")<block_end><def_stmt>discard_authorization_code self client_id code<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"discard_authorization_code.")<block_end><def_stmt>discard_refresh_token self client_id refresh_token<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"discard_refresh_token.")<block_end><block_end><class_stmt>OAuthError(Unauthorized)<block_start>"""OAuth error, including the OAuth error reason."""<def_stmt>__init__ self reason *args **kwargs<block_start>self.reason=reason<line_sep>super(OAuthError self).__init__(*args **kwargs)<block_end><block_end><class_stmt>ResourceAuthorization(object)<block_start>"""A class containing an OAuth 2.0 authorization."""<line_sep>is_oauth=<false><line_sep>is_valid=<none><line_sep>token=<none><line_sep>client_id=<none><line_sep>expires_in=<none><line_sep>error=<none><def_stmt>raise_error_if_invalid self<block_start><if_stmt><not>self.is_valid<block_start><raise>OAuthError(self.error "OAuth authorization error")<block_end><block_end><block_end><class_stmt>ResourceProvider(Provider)<block_start>"""OAuth 2.0 resource provider. This class provides an interface to validate an incoming request and authenticate resource access. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a resource provider. These are the methods that must be implemented in a subclass: get_authorization_header(self) # Return header string for key "Authorization" or None validate_access_token(self, access_token, authorization) # Set is_valid=True, client_id, and expires_in attributes # on authorization if authorization was successful. # Return value is ignored """<line_sep>@property<def_stmt>authorization_class self<block_start><return>ResourceAuthorization<block_end><def_stmt>get_authorization self<block_start>"""Get authorization object representing status of authentication."""<line_sep>auth=self.authorization_class()<line_sep>header=self.get_authorization_header()<if_stmt><not>header<or><not>header.split<block_start><return>auth<block_end>header=header.split()<if_stmt>len(header)<g>1<and>header[0]<eq>"Bearer"<block_start>auth.is_oauth=<true><line_sep>access_token=header[1]<line_sep>self.validate_access_token(access_token auth)<if_stmt><not>auth.is_valid<block_start>auth.error="access_denied"<block_end><block_end><return>auth<block_end><def_stmt>get_authorization_header self<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"get_authorization_header.")<block_end><def_stmt>validate_access_token self access_token authorization<block_start><raise>NotImplementedError("Subclasses must implement "<concat>"validate_token.")<block_end><block_end>
# Copyright (c) 2021 GradsFlow. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>torch<import_from_stmt>gradsflow.models Model<class_stmt>DummyModel(Model)<block_start><def_stmt>__init__ self<block_start>learner=torch.nn.Linear(1 4)<line_sep>super().__init__(learner)<block_end><def_stmt>backward self loss:torch.Tensor<block_start><return><none><block_end><def_stmt>train_step self batch<block_start><return>{"loss":torch.as_tensor(1) "metrics":{"accuracy":1}}<block_end><def_stmt>val_step self batch<block_start><return>{"loss":torch.as_tensor(1) "metrics":{"accuracy":1}}<block_end><block_end>
# -*- coding: utf-8 -*- """Utilities common to CIFAR10 and CIFAR100 datasets. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>sys<import_from_stmt>six.moves cPickle<def_stmt>load_batch fpath label_key='labels'<block_start>"""Internal utility for parsing CIFAR data. # Arguments fpath: path the file to parse. label_key: key for label data in the retrieve dictionary. # Returns A tuple `(data, labels)`. """<with_stmt>open(fpath 'rb')<as>f<block_start><if_stmt>sys.version_info<l>(3 )<block_start>d=cPickle.load(f)<block_end><else_stmt><block_start>d=cPickle.load(f encoding='bytes')<line_sep># decode utf8 d_decoded={}<for_stmt>k,v d.items()<block_start>d_decoded[k.decode('utf8')]=v<block_end>d=d_decoded<block_end><block_end>data=d['data']<line_sep>labels=d[label_key]<line_sep>data=data.reshape(data.shape[0] 3 32 32)<line_sep><return>data labels<block_end>
<import_from_stmt>freight.api.serializer serialize<import_from_stmt>freight.testutils TestCase<class_stmt>UserSerializerTest(TestCase)<block_start><def_stmt>test_simple self<block_start>user=self.create_user()<line_sep>result=serialize(user)<assert_stmt>result["id"]<eq>str(user.id)<assert_stmt>result["name"]<eq>user.name<block_end><block_end>
<import_stmt>os<import_stmt>pytest<import_stmt>torch<import_from_stmt>hivemind RemoteExpert<import_from_stmt>hivemind.moe.server background_server<line_sep>CUSTOM_EXPERTS_PATH=os.path.join(os.path.dirname(__file__) "test_utils" "custom_networks.py")<line_sep>@pytest.mark.forked<def_stmt>test_custom_expert hid_dim=16<block_start><with_stmt>background_server(expert_cls="perceptron" num_experts=2 device="cpu" hidden_dim=hid_dim num_handlers=2 no_dht=<true> custom_module_path=CUSTOM_EXPERTS_PATH )<as>(server_endpoint _)<block_start>expert0=RemoteExpert("expert.0" server_endpoint)<line_sep>expert1=RemoteExpert("expert.1" server_endpoint)<for_stmt>batch_size (1 4)<block_start>batch=torch.randn(batch_size hid_dim)<line_sep>output0=expert0(batch)<line_sep>output1=expert1(batch)<line_sep>loss=output0.sum()<line_sep>loss.backward()<line_sep>loss=output1.sum()<line_sep>loss.backward()<block_end><block_end><block_end>@pytest.mark.forked<def_stmt>test_multihead_expert hid_dim=16<block_start><with_stmt>background_server(expert_cls="multihead" num_experts=2 device="cpu" hidden_dim=hid_dim num_handlers=2 no_dht=<true> custom_module_path=CUSTOM_EXPERTS_PATH )<as>(server_endpoint _)<block_start>expert0=RemoteExpert("expert.0" server_endpoint)<line_sep>expert1=RemoteExpert("expert.1" server_endpoint)<for_stmt>batch_size (1 4)<block_start>batch=(torch.randn(batch_size hid_dim) torch.randn(batch_size 2<times>hid_dim) torch.randn(batch_size 3<times>hid_dim) )<line_sep>output0=expert0(*batch)<line_sep>output1=expert1(*batch)<line_sep>loss=output0.sum()<line_sep>loss.backward()<line_sep>loss=output1.sum()<line_sep>loss.backward()<block_end><block_end><block_end>
''' Largest rectangle area in a histogram:: Find the largest rectangular area possible in a given histogram where the largest rectangle can be made of a number of contiguous bars. For simplicity, assume that all bars have same width and the width is 1 unit. '''<def_stmt>max_area_histogram histogram<block_start>stack=list()<line_sep>max_area=0# Initialize max area index=0<while_stmt>index<l>len(histogram)<block_start><if_stmt>(<not>stack)<or>(histogram[stack[-1]]<le>histogram[index])<block_start>stack.append(index)<line_sep>index<augadd>1<block_end><else_stmt><block_start>top_of_stack=stack.pop()<line_sep>area=(histogram[top_of_stack]<times>((index-stack[-1]-1)<if>stack<else>index))<line_sep>max_area=max(max_area area)<block_end><block_end><while_stmt>stack<block_start>top_of_stack=stack.pop()<line_sep>area=(histogram[top_of_stack]<times>((index-stack[-1]-1)<if>stack<else>index))<line_sep>max_area=max(max_area area)<block_end><return>max_area<block_end>hist=[4 7 1 8 4 9 5]<line_sep>print("Maximum area is" max_area_histogram(hist))<line_sep>
# Copyright <NAME> 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE.txt or copy at https://www.bfgroup.xyz/b2/LICENSE.txt) <import_from_stmt>b2.build type<def_stmt>register <block_start>type.register_type('OBJ' ['obj'] <none> ['NT' 'CYGWIN'])<line_sep>type.register_type('OBJ' ['o'])<block_end>register()<line_sep>
""" Checkpoint Saver Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. Hacked together by / Copyright 2020 <NAME> """<import_stmt>glob<import_stmt>operator<import_stmt>os<import_stmt>logging<import_stmt>torch<import_from_stmt>.model unwrap_model get_state_dict<line_sep>_logger=logging.getLogger(__name__)<class_stmt>CheckpointSaver<block_start><def_stmt>__init__ self model optimizer args=<none> model_ema=<none> amp_scaler=<none> checkpoint_prefix='checkpoint' recovery_prefix='recovery' checkpoint_dir='' recovery_dir='' decreasing=<false> max_history=10 unwrap_fn=unwrap_model# objects to save state_dicts of <block_start>self.model=model<line_sep>self.optimizer=optimizer<line_sep>self.args=args<line_sep>self.model_ema=model_ema<line_sep>self.amp_scaler=amp_scaler<line_sep># state self.checkpoint_files=[]# (filename, metric) tuples in order of decreasing betterness self.best_epoch=<none><line_sep>self.best_metric=<none><line_sep>self.curr_recovery_file=''<line_sep>self.last_recovery_file=''<line_sep># config self.checkpoint_dir=checkpoint_dir<line_sep>self.recovery_dir=recovery_dir<line_sep>self.save_prefix=checkpoint_prefix<line_sep>self.recovery_prefix=recovery_prefix<line_sep>self.extension='.pth.tar'<line_sep>self.decreasing=decreasing# a lower metric is better if True self.cmp=operator.lt<if>decreasing<else>operator.gt# True if lhs better than rhs self.max_history=max_history<line_sep>self.unwrap_fn=unwrap_fn<assert_stmt>self.max_history<ge>1<block_end><def_stmt>save_checkpoint self epoch metric=<none><block_start><assert_stmt>epoch<ge>0<line_sep>tmp_save_path=os.path.join(self.checkpoint_dir 'tmp'+self.extension)<line_sep>last_save_path=os.path.join(self.checkpoint_dir 'last'+self.extension)<line_sep>self._save(tmp_save_path epoch metric)<if_stmt>os.path.exists(last_save_path)<block_start>os.unlink(last_save_path)# required for Windows support. <block_end>os.rename(tmp_save_path last_save_path)<line_sep>worst_file=self.checkpoint_files[-1]<if>self.checkpoint_files<else><none><if_stmt>(len(self.checkpoint_files)<l>self.max_history<or>metric<is><none><or>self.cmp(metric worst_file[1]))<block_start><if_stmt>len(self.checkpoint_files)<ge>self.max_history<block_start>self._cleanup_checkpoints(1)<block_end>filename='-'.join([self.save_prefix str(epoch)])+self.extension<line_sep>save_path=os.path.join(self.checkpoint_dir filename)<line_sep>os.link(last_save_path save_path)<line_sep>self.checkpoint_files.append((save_path metric))<line_sep>self.checkpoint_files=sorted(self.checkpoint_files key=<lambda>x:x[1] reverse=<not>self.decreasing)<line_sep># sort in descending order if a lower metric is not better checkpoints_str="Current checkpoints:\n"<for_stmt>c self.checkpoint_files<block_start>checkpoints_str<augadd>' {}\n'.format(c)<block_end>_logger.info(checkpoints_str)<if_stmt>metric<is><not><none><and>(self.best_metric<is><none><or>self.cmp(metric self.best_metric))<block_start>self.best_epoch=epoch<line_sep>self.best_metric=metric<line_sep>best_save_path=os.path.join(self.checkpoint_dir 'model_best'+self.extension)<if_stmt>os.path.exists(best_save_path)<block_start>os.unlink(best_save_path)<block_end>os.link(last_save_path best_save_path)<block_end><block_end><return>(<none> <none>)<if>self.best_metric<is><none><else>(self.best_metric self.best_epoch)<block_end><def_stmt>_save self save_path epoch metric=<none><block_start>save_state={'epoch':epoch 'arch':type(self.model).__name__.lower() 'state_dict':get_state_dict(self.model self.unwrap_fn) 'optimizer':self.optimizer.state_dict() 'version':2 # version < 2 increments epoch before save }<if_stmt>self.args<is><not><none><block_start>save_state['arch']=self.args.model<line_sep>save_state['args']=self.args<block_end><if_stmt>self.amp_scaler<is><not><none><block_start>save_state[self.amp_scaler.state_dict_key]=self.amp_scaler.state_dict()<block_end><if_stmt>self.model_ema<is><not><none><block_start>save_state['state_dict_ema']=get_state_dict(self.model_ema self.unwrap_fn)<block_end><if_stmt>metric<is><not><none><block_start>save_state['metric']=metric<block_end>torch.save(save_state save_path)<block_end><def_stmt>_cleanup_checkpoints self trim=0<block_start>trim=min(len(self.checkpoint_files) trim)<line_sep>delete_index=self.max_history-trim<if_stmt>delete_index<l>0<or>len(self.checkpoint_files)<le>delete_index<block_start><return><block_end>to_delete=self.checkpoint_files[delete_index:]<for_stmt>d to_delete<block_start><try_stmt><block_start>_logger.debug("Cleaning checkpoint: {}".format(d))<line_sep>os.remove(d[0])<block_end><except_stmt>Exception<as>e<block_start>_logger.error("Exception '{}' while deleting checkpoint".format(e))<block_end><block_end>self.checkpoint_files=self.checkpoint_files[:delete_index]<block_end><def_stmt>save_recovery self epoch batch_idx=0<block_start><assert_stmt>epoch<ge>0<line_sep>filename='-'.join([self.recovery_prefix str(epoch) str(batch_idx)])+self.extension<line_sep>save_path=os.path.join(self.recovery_dir filename)<line_sep>self._save(save_path epoch)<if_stmt>os.path.exists(self.last_recovery_file)<block_start><try_stmt><block_start>_logger.debug("Cleaning recovery: {}".format(self.last_recovery_file))<line_sep>os.remove(self.last_recovery_file)<block_end><except_stmt>Exception<as>e<block_start>_logger.error("Exception '{}' while removing {}".format(e self.last_recovery_file))<block_end><block_end>self.last_recovery_file=self.curr_recovery_file<line_sep>self.curr_recovery_file=save_path<block_end><def_stmt>find_recovery self<block_start>recovery_path=os.path.join(self.recovery_dir self.recovery_prefix)<line_sep>files=glob.glob(recovery_path+'*'+self.extension)<line_sep>files=sorted(files)<line_sep><return>files[0]<if>len(files)<else>''<block_end><block_end>
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). <import_from_stmt>abc ABCMeta abstractmethod<import_from_stmt>pathlib Path<import_from_stmt>textwrap dedent<import_from_stmt>typing ClassVar Iterable List Optional Tuple Type<import_from_stmt>pants.core.goals.check Check CheckRequest CheckResult CheckResults check<import_from_stmt>pants.core.util_rules.distdir DistDir<import_from_stmt>pants.engine.addresses Address<import_from_stmt>pants.engine.fs Workspace<import_from_stmt>pants.engine.target FieldSet MultipleSourcesField Target Targets<import_from_stmt>pants.engine.unions UnionMembership<import_from_stmt>pants.testutil.option_util create_options_bootstrapper<import_from_stmt>pants.testutil.rule_runner MockGet RuleRunner mock_console run_rule_with_mocks<import_from_stmt>pants.util.logging LogLevel<class_stmt>MockTarget(Target)<block_start>alias="mock_target"<line_sep>core_fields=(MultipleSourcesField )<block_end><class_stmt>MockCheckFieldSet(FieldSet)<block_start>required_fields=(MultipleSourcesField )<block_end><class_stmt>MockCheckRequest(CheckRequest metaclass=ABCMeta)<block_start>field_set_type=MockCheckFieldSet<line_sep>checker_name:ClassVar[str]<line_sep>@staticmethod@abstractmethod<def_stmt>exit_code _:Iterable[Address]<arrow>int<block_start><pass><block_end>@property<def_stmt>check_results self<arrow>CheckResults<block_start>addresses=[config.address<for>config self.field_sets]<line_sep><return>CheckResults([CheckResult(self.exit_code(addresses) "" "" )] checker_name=self.checker_name )<block_end><block_end><class_stmt>SuccessfulRequest(MockCheckRequest)<block_start>checker_name="SuccessfulChecker"<line_sep>@staticmethod<def_stmt>exit_code _:Iterable[Address]<arrow>int<block_start><return>0<block_end><block_end><class_stmt>FailingRequest(MockCheckRequest)<block_start>checker_name="FailingChecker"<line_sep>@staticmethod<def_stmt>exit_code _:Iterable[Address]<arrow>int<block_start><return>1<block_end><block_end><class_stmt>ConditionallySucceedsRequest(MockCheckRequest)<block_start>checker_name="ConditionallySucceedsChecker"<line_sep>@staticmethod<def_stmt>exit_code addresses:Iterable[Address]<arrow>int<block_start><if_stmt>any(address.target_name<eq>"bad"<for>address addresses)<block_start><return>127<block_end><return>0<block_end><block_end><class_stmt>SkippedRequest(MockCheckRequest)<block_start>@staticmethod<def_stmt>exit_code _<arrow>int<block_start><return>0<block_end>@property<def_stmt>check_results self<arrow>CheckResults<block_start><return>CheckResults([] checker_name="SkippedChecker")<block_end><block_end><class_stmt>InvalidField(MultipleSourcesField)<block_start><pass><block_end><class_stmt>InvalidFieldSet(MockCheckFieldSet)<block_start>required_fields=(InvalidField )<block_end><class_stmt>InvalidRequest(MockCheckRequest)<block_start>field_set_type=InvalidFieldSet<line_sep>checker_name="InvalidChecker"<line_sep>@staticmethod<def_stmt>exit_code _:Iterable[Address]<arrow>int<block_start><return>-1<block_end><block_end><def_stmt>make_target address:Optional[Address]=<none><arrow>Target<block_start><if_stmt>address<is><none><block_start>address=Address("" target_name="tests")<block_end><return>MockTarget({} address)<block_end><def_stmt>run_typecheck_rule * request_types:List[Type[CheckRequest]] targets:List[Target]<arrow>Tuple[int str]<block_start>union_membership=UnionMembership({CheckRequest:request_types})<with_stmt>mock_console(create_options_bootstrapper())<as>(console stdio_reader)<block_start>rule_runner=RuleRunner()<line_sep>result:Check=run_rule_with_mocks(check rule_args=[console Workspace(rule_runner.scheduler _enforce_effects=<false>) Targets(targets) DistDir(relpath=Path("dist")) union_membership ] mock_gets=[MockGet(output_type=CheckResults input_type=CheckRequest mock=<lambda>field_set_collection:field_set_collection.check_results ) ] union_membership=union_membership )<assert_stmt><not>stdio_reader.get_stdout()<line_sep><return>result.exit_code stdio_reader.get_stderr()<block_end><block_end><def_stmt>test_invalid_target_noops <arrow><none><block_start>exit_code,stderr=run_typecheck_rule(request_types=[InvalidRequest] targets=[make_target()])<assert_stmt>exit_code<eq>0<assert_stmt>stderr<eq>""<block_end><def_stmt>test_summary <arrow><none><block_start>good_address=Address("" target_name="good")<line_sep>bad_address=Address("" target_name="bad")<line_sep>exit_code,stderr=run_typecheck_rule(request_types=[ConditionallySucceedsRequest FailingRequest SkippedRequest SuccessfulRequest ] targets=[make_target(good_address) make_target(bad_address)] )<assert_stmt>exit_code<eq>FailingRequest.exit_code([bad_address])<assert_stmt>stderr<eq>dedent("""\ 𐄂 ConditionallySucceedsChecker failed. 𐄂 FailingChecker failed. - SkippedChecker skipped. ✓ SuccessfulChecker succeeded. """)<block_end><def_stmt>test_streaming_output_skip <arrow><none><block_start>results=CheckResults([] checker_name="typechecker")<assert_stmt>results.level()<eq>LogLevel.DEBUG<assert_stmt>results.message()<eq>"typechecker skipped."<block_end><def_stmt>test_streaming_output_success <arrow><none><block_start>results=CheckResults([CheckResult(0 "stdout" "stderr")] checker_name="typechecker")<assert_stmt>results.level()<eq>LogLevel.INFO<assert_stmt>results.message()<eq>dedent("""\ typechecker succeeded. stdout stderr """)<block_end><def_stmt>test_streaming_output_failure <arrow><none><block_start>results=CheckResults([CheckResult(18 "stdout" "stderr")] checker_name="typechecker")<assert_stmt>results.level()<eq>LogLevel.ERROR<assert_stmt>results.message()<eq>dedent("""\ typechecker failed (exit code 18). stdout stderr """)<block_end><def_stmt>test_streaming_output_partitions <arrow><none><block_start>results=CheckResults([CheckResult(21 "" "" partition_description="ghc8.1") CheckResult(0 "stdout" "stderr" partition_description="ghc9.2") ] checker_name="typechecker" )<assert_stmt>results.level()<eq>LogLevel.ERROR<assert_stmt>results.message()<eq>dedent("""\ typechecker failed (exit code 21). Partition #1 - ghc8.1: Partition #2 - ghc9.2: stdout stderr """)<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension """<import_stmt>torch.nn<as>nn<import_from_stmt>fairseq utils<import_from_stmt>fairseq.models register_model register_model_architecture <import_from_stmt>fairseq.models.transformer TransformerModel<import_from_stmt>fairseq.modules.transformer_sentence_encoder init_bert_params<import_from_stmt>.hub_interface BARTHubInterface<line_sep>@register_model('bart')<class_stmt>BARTModel(TransformerModel)<block_start>@classmethod<def_stmt>hub_models cls<block_start><return>{'bart.large':'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz' 'bart.large.mnli':'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz' }<block_end><def_stmt>__init__ self args encoder decoder<block_start>super().__init__(args encoder decoder)<line_sep># We follow BERT's random weight initialization self.apply(init_bert_params)<line_sep>self.classification_heads=nn.ModuleDict()<block_end>@staticmethod<def_stmt>add_args parser<block_start>super(BARTModel BARTModel).add_args(parser)<line_sep>parser.add_argument('--max-source-positions' default=1024 type=int metavar='N' help='max number of tokens in the source sequence')<line_sep>parser.add_argument('--max-target-positions' default=1024 type=int metavar='N' help='max number of tokens in the target sequence')<line_sep>parser.add_argument('--pooler-dropout' type=float metavar='D' help='dropout probability in the masked_lm pooler layers')<line_sep>parser.add_argument('--pooler-activation-fn' choices=utils.get_available_activation_fns() help='activation function to use for pooler layer')<block_end>@property<def_stmt>supported_targets self<block_start><return>{'self'}<block_end><def_stmt>forward self src_tokens src_lengths prev_output_tokens features_only=<false> classification_head_name=<none> **kwargs<block_start><if_stmt>classification_head_name<is><not><none><block_start>features_only=<true><block_end>encoder_out=self.encoder(src_tokens src_lengths=src_lengths **kwargs )<line_sep>x,extra=self.decoder(prev_output_tokens encoder_out=encoder_out features_only=features_only **kwargs )<if_stmt>classification_head_name<is><not><none><block_start>sentence_representation=x[src_tokens.eq(self.encoder.dictionary.eos()) :].view(x.size(0) -1 x.size(-1))[: -1 :]<line_sep>x=self.classification_heads[classification_head_name](sentence_representation)<block_end><return>x extra<block_end>@classmethod<def_stmt>from_pretrained cls model_name_or_path checkpoint_file='model.pt' data_name_or_path='.' bpe='gpt2' **kwargs <block_start><import_from_stmt>fairseq hub_utils<line_sep>x=hub_utils.from_pretrained(model_name_or_path checkpoint_file data_name_or_path archive_map=cls.hub_models() bpe=bpe load_checkpoint_heads=<true> **kwargs )<line_sep><return>BARTHubInterface(x['args'] x['task'] x['models'][0])<block_end><def_stmt>register_classification_head self name num_classes=<none> inner_dim=<none> **kwargs<block_start>"""Register a classification head."""<line_sep>print("Registering classification head: {0}".format(name))<if_stmt>name<in>self.classification_heads<block_start>prev_num_classes=self.classification_heads[name].out_proj.out_features<line_sep>prev_inner_dim=self.classification_heads[name].dense.out_features<if_stmt>num_classes<ne>prev_num_classes<or>inner_dim<ne>prev_inner_dim<block_start>print('WARNING: re-registering head "{}" with num_classes {} (prev: {}) '<concat>'and inner_dim {} (prev: {})'.format(name num_classes prev_num_classes inner_dim prev_inner_dim))<block_end><block_end>self.classification_heads[name]=BARTClassificationHead(self.args.encoder_embed_dim inner_dim<or>self.args.encoder_embed_dim num_classes self.args.pooler_activation_fn self.args.pooler_dropout )<block_end><def_stmt>upgrade_state_dict_named self state_dict name<block_start>super().upgrade_state_dict_named(state_dict name)<line_sep>prefix=name+'.'<if>name<ne>''<else>''<line_sep>current_head_names=[]<if><not>hasattr(self 'classification_heads')<else>self.classification_heads.keys()<line_sep># Handle new classification heads present in the state dict. keys_to_delete=[]<for_stmt>k state_dict.keys()<block_start><if_stmt><not>k.startswith(prefix+'classification_heads.')<block_start><continue><block_end>head_name=k[len(prefix+'classification_heads.'):].split('.')[0]<line_sep>num_classes=state_dict[prefix+'classification_heads.'+head_name+'.out_proj.weight'].size(0)<line_sep>inner_dim=state_dict[prefix+'classification_heads.'+head_name+'.dense.weight'].size(0)<if_stmt>getattr(self.args 'load_checkpoint_heads' <false>)<block_start><if_stmt>head_name<not><in>current_head_names<block_start>self.register_classification_head(head_name num_classes inner_dim)<block_end><block_end><else_stmt><block_start><if_stmt>head_name<not><in>current_head_names<block_start>print('WARNING: deleting classification head ({}) from checkpoint '<concat>'not present in current model: {}'.format(head_name k))<line_sep>keys_to_delete.append(k)<block_end><elif_stmt>(num_classes<ne>self.classification_heads[head_name].out_proj.out_features<or>inner_dim<ne>self.classification_heads[head_name].dense.out_features)<block_start>print('WARNING: deleting classification head ({}) from checkpoint '<concat>'with different dimensions than current model: {}'.format(head_name k))<line_sep>keys_to_delete.append(k)<block_end><block_end><block_end><for_stmt>k keys_to_delete<block_start><del_stmt>state_dict[k]<block_end># Copy any newly-added classification heads into the state dict # with their current weights. <if_stmt>hasattr(self 'classification_heads')<block_start>cur_state=self.classification_heads.state_dict()<for_stmt>k,v cur_state.items()<block_start><if_stmt>prefix+'classification_heads.'+k<not><in>state_dict<block_start>print('Overwriting' prefix+'classification_heads.'+k)<line_sep>state_dict[prefix+'classification_heads.'+k]=v<block_end><block_end><block_end><block_end><block_end><class_stmt>BARTClassificationHead(nn.Module)<block_start>"""Head for sentence-level classification tasks."""<def_stmt>__init__ self input_dim inner_dim num_classes activation_fn pooler_dropout <block_start>super().__init__()<line_sep>self.dense=nn.Linear(input_dim inner_dim)<line_sep>self.activation_fn=utils.get_activation_fn(activation_fn)<line_sep>self.dropout=nn.Dropout(p=pooler_dropout)<line_sep>self.out_proj=nn.Linear(inner_dim num_classes)<block_end><def_stmt>forward self features **kwargs<block_start>x=features<line_sep>x=self.dropout(x)<line_sep>x=self.dense(x)<line_sep>x=self.activation_fn(x)<line_sep>x=self.dropout(x)<line_sep>x=self.out_proj(x)<line_sep><return>x<block_end><block_end>@register_model_architecture('bart' 'bart_large')<def_stmt>bart_large_architecture args<block_start>args.encoder_embed_path=getattr(args 'encoder_embed_path' <none>)<line_sep>args.encoder_embed_dim=getattr(args 'encoder_embed_dim' 1024)<line_sep>args.encoder_ffn_embed_dim=getattr(args 'encoder_ffn_embed_dim' 4<times>1024)<line_sep>args.encoder_layers=getattr(args 'encoder_layers' 12)<line_sep>args.encoder_attention_heads=getattr(args 'encoder_attention_heads' 16)<line_sep>args.encoder_normalize_before=getattr(args 'encoder_normalize_before' <false>)<line_sep>args.encoder_learned_pos=getattr(args 'encoder_learned_pos' <true>)<line_sep>args.decoder_embed_path=getattr(args 'decoder_embed_path' <none>)<line_sep>args.decoder_embed_dim=getattr(args 'decoder_embed_dim' args.encoder_embed_dim)<line_sep>args.decoder_ffn_embed_dim=getattr(args 'decoder_ffn_embed_dim' args.encoder_ffn_embed_dim)<line_sep>args.decoder_layers=getattr(args 'decoder_layers' 12)<line_sep>args.decoder_attention_heads=getattr(args 'decoder_attention_heads' 16)<line_sep>args.decoder_normalize_before=getattr(args 'decoder_normalize_before' <false>)<line_sep>args.decoder_learned_pos=getattr(args 'decoder_learned_pos' <true>)<line_sep>args.attention_dropout=getattr(args 'attention_dropout' 0.)<line_sep>args.relu_dropout=getattr(args 'relu_dropout' 0.)<line_sep>args.dropout=getattr(args 'dropout' 0.1)<line_sep>args.max_target_positions=getattr(args 'max_target_positions' 1024)<line_sep>args.max_source_positions=getattr(args 'max_source_positions' 1024)<line_sep>args.adaptive_softmax_cutoff=getattr(args 'adaptive_softmax_cutoff' <none>)<line_sep>args.adaptive_softmax_dropout=getattr(args 'adaptive_softmax_dropout' 0)<line_sep>args.share_decoder_input_output_embed=getattr(args 'share_decoder_input_output_embed' <true>)<line_sep>args.share_all_embeddings=getattr(args 'share_all_embeddings' <true>)<line_sep>args.decoder_output_dim=getattr(args 'decoder_output_dim' args.decoder_embed_dim)<line_sep>args.decoder_input_dim=getattr(args 'decoder_input_dim' args.decoder_embed_dim)<line_sep>args.no_scale_embedding=getattr(args 'no_scale_embedding' <true>)<line_sep>args.layernorm_embedding=getattr(args 'layernorm_embedding' <true>)<line_sep>args.activation_fn=getattr(args 'activation_fn' 'gelu')<line_sep>args.pooler_activation_fn=getattr(args 'pooler_activation_fn' 'tanh')<line_sep>args.pooler_dropout=getattr(args 'pooler_dropout' 0.0)<block_end>
<import_from_stmt>typing Dict Optional List Any<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>allennlp.data Vocabulary<import_from_stmt>allennlp.models.model Model<import_from_stmt>allennlp.modules FeedForward TextFieldEmbedder Seq2SeqEncoder<import_from_stmt>allennlp.nn InitializerApplicator RegularizerApplicator<import_from_stmt>allennlp.nn util<import_from_stmt>allennlp.training.metrics CategoricalAccuracy F1Measure<import_from_stmt>overrides overrides<line_sep>@Model.register("text_classifier")<class_stmt>TextClassifier(Model)<block_start>""" Implements a basic text classifier: 1) Embed tokens using `text_field_embedder` 2) Seq2SeqEncoder, e.g. BiLSTM 3) Append the first and last encoder states 4) Final feedforward layer Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1. """<def_stmt>__init__ self vocab:Vocabulary text_field_embedder:TextFieldEmbedder text_encoder:Seq2SeqEncoder classifier_feedforward:FeedForward verbose_metrics:<false> initializer:InitializerApplicator=InitializerApplicator() regularizer:Optional[RegularizerApplicator]=<none> <arrow><none><block_start>super(TextClassifier self).__init__(vocab regularizer)<line_sep>self.text_field_embedder=text_field_embedder<line_sep>self.num_classes=self.vocab.get_vocab_size("labels")<line_sep>self.text_encoder=text_encoder<line_sep>self.classifier_feedforward=classifier_feedforward<line_sep>self.prediction_layer=torch.nn.Linear(self.classifier_feedforward.get_output_dim() self.num_classes)<line_sep>self.label_accuracy=CategoricalAccuracy()<line_sep>self.label_f1_metrics={}<line_sep>self.verbose_metrics=verbose_metrics<for_stmt>i range(self.num_classes)<block_start>self.label_f1_metrics[vocab.get_token_from_index(index=i namespace="labels")]=F1Measure(positive_label=i)<block_end>self.loss=torch.nn.CrossEntropyLoss()<line_sep>self.pool=<lambda>text mask:util.get_final_encoder_states(text mask bidirectional=<true>)<line_sep>initializer(self)<block_end>@overrides<def_stmt>forward self text:Dict[str torch.LongTensor] label:torch.IntTensor=<none> metadata:List[Dict[str Any]]=<none><arrow>Dict[str torch.Tensor]<block_start>""" Parameters ---------- text : Dict[str, torch.LongTensor] From a ``TextField`` label : torch.IntTensor, optional (default = None) From a ``LabelField`` metadata : ``List[Dict[str, Any]]``, optional, (default = None) Metadata containing the original tokenization of the premise and hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively. Returns ------- An output dictionary consisting of: label_logits : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label. label_probs : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label. loss : torch.FloatTensor, optional A scalar loss to be optimised. """<line_sep>embedded_text=self.text_field_embedder(text)<line_sep>mask=util.get_text_field_mask(text)<line_sep>encoded_text=self.text_encoder(embedded_text mask)<line_sep>pooled=self.pool(encoded_text mask)<line_sep>ff_hidden=self.classifier_feedforward(pooled)<line_sep>logits=self.prediction_layer(ff_hidden)<line_sep>class_probs=F.softmax(logits dim=1)<line_sep>output_dict={"logits":logits}<if_stmt>label<is><not><none><block_start>loss=self.loss(logits label)<line_sep>output_dict["loss"]=loss<line_sep># compute F1 per label <for_stmt>i range(self.num_classes)<block_start>metric=self.label_f1_metrics[self.vocab.get_token_from_index(index=i namespace="labels")]<line_sep>metric(class_probs label)<block_end>self.label_accuracy(logits label)<block_end><return>output_dict<block_end>@overrides<def_stmt>decode self output_dict:Dict[str torch.Tensor]<arrow>Dict[str torch.Tensor]<block_start>class_probabilities=F.softmax(output_dict['logits'] dim=-1)<line_sep>output_dict['class_probs']=class_probabilities<line_sep><return>output_dict<block_end><def_stmt>get_metrics self reset:bool=<false><arrow>Dict[str float]<block_start>metric_dict={}<line_sep>sum_f1=0.0<for_stmt>name,metric self.label_f1_metrics.items()<block_start>metric_val=metric.get_metric(reset)<if_stmt>self.verbose_metrics<block_start>metric_dict[name+'_P']=metric_val[0]<line_sep>metric_dict[name+'_R']=metric_val[1]<line_sep>metric_dict[name+'_F1']=metric_val[2]<block_end>sum_f1<augadd>metric_val[2]<block_end>names=list(self.label_f1_metrics.keys())<line_sep>total_len=len(names)<line_sep>average_f1=sum_f1/total_len<line_sep>metric_dict['average_F1']=average_f1<line_sep>metric_dict['accuracy']=self.label_accuracy.get_metric(reset)<line_sep><return>metric_dict<block_end><block_end>
# # -*- coding: utf-8 -*- """Development related tasks to be run with 'invoke'"""<import_stmt>os<import_stmt>pathlib<import_stmt>shutil<import_stmt>invoke<line_sep>TASK_ROOT=pathlib.Path(__file__).resolve().parent<line_sep>TASK_ROOT_STR=str(TASK_ROOT)<line_sep># shared function <def_stmt>rmrf items verbose=<true><block_start>"""Silently remove a list of directories or files"""<if_stmt>isinstance(items str)<block_start>items=[items]<block_end><for_stmt>item items<block_start><if_stmt>verbose<block_start>print("Removing {}".format(item))<block_end>shutil.rmtree(item ignore_errors=<true>)<line_sep># rmtree doesn't remove bare files <try_stmt><block_start>os.remove(item)<block_end><except_stmt>FileNotFoundError<block_start><pass><block_end><block_end><block_end># create namespaces namespace=invoke.Collection()<line_sep>namespace_clean=invoke.Collection('clean')<line_sep>namespace.add_collection(namespace_clean 'clean')<line_sep>##### # # pytest, pylint, and codecov # ##### @invoke.task<def_stmt>pytest context junit=<false> pty=<true> append_cov=<false><block_start>"""Run tests and code coverage using pytest"""<line_sep>ROOT_PATH=TASK_ROOT.parent.parent<with_stmt>context.cd(str(ROOT_PATH))<block_start>command_str='pytest --cov=cmd2_myplugin --cov-report=term --cov-report=html'<if_stmt>append_cov<block_start>command_str<augadd>' --cov-append'<block_end><if_stmt>junit<block_start>command_str<augadd>' --junitxml=junit/test-results.xml'<block_end>command_str<augadd>' '+str((TASK_ROOT/'tests').relative_to(ROOT_PATH))<line_sep>context.run(command_str pty=pty)<block_end><block_end>namespace.add_task(pytest)<line_sep>@invoke.task<def_stmt>pytest_clean context<block_start>"""Remove pytest cache and code coverage files and directories"""<line_sep># pylint: disable=unused-argument <with_stmt>context.cd(TASK_ROOT_STR)<block_start>dirs=['.pytest_cache' '.cache' '.coverage']<line_sep>rmrf(dirs)<block_end><block_end>namespace_clean.add_task(pytest_clean 'pytest')<line_sep>@invoke.task<def_stmt>pylint context<block_start>"""Check code quality using pylint"""<line_sep>context.run('pylint --rcfile=cmd2_myplugin/pylintrc cmd2_myplugin')<block_end>namespace.add_task(pylint)<line_sep>@invoke.task<def_stmt>pylint_tests context<block_start>"""Check code quality of test suite using pylint"""<line_sep>context.run('pylint --rcfile=tests/pylintrc tests')<block_end>namespace.add_task(pylint_tests)<line_sep>##### # # build and distribute # ##### BUILDDIR='build'<line_sep>DISTDIR='dist'<line_sep>@invoke.task<def_stmt>build_clean context<block_start>"""Remove the build directory"""<line_sep># pylint: disable=unused-argument rmrf(BUILDDIR)<block_end>namespace_clean.add_task(build_clean 'build')<line_sep>@invoke.task<def_stmt>dist_clean context<block_start>"""Remove the dist directory"""<line_sep># pylint: disable=unused-argument rmrf(DISTDIR)<block_end>namespace_clean.add_task(dist_clean 'dist')<line_sep>@invoke.task<def_stmt>eggs_clean context<block_start>"""Remove egg directories"""<line_sep># pylint: disable=unused-argument dirs=set()<line_sep>dirs.add('.eggs')<for_stmt>name os.listdir(os.curdir)<block_start><if_stmt>name.endswith('.egg-info')<block_start>dirs.add(name)<block_end><if_stmt>name.endswith('.egg')<block_start>dirs.add(name)<block_end><block_end>rmrf(dirs)<block_end>namespace_clean.add_task(eggs_clean 'eggs')<line_sep>@invoke.task<def_stmt>bytecode_clean context<block_start>"""Remove __pycache__ directories and *.pyc files"""<line_sep># pylint: disable=unused-argument dirs=set()<for_stmt>root,dirnames,files os.walk(os.curdir)<block_start><if_stmt>'__pycache__'<in>dirnames<block_start>dirs.add(os.path.join(root '__pycache__'))<block_end><for_stmt>file files<block_start><if_stmt>file.endswith(".pyc")<block_start>dirs.add(os.path.join(root file))<block_end><block_end><block_end>print("Removing __pycache__ directories and .pyc files")<line_sep>rmrf(dirs verbose=<false>)<block_end>namespace_clean.add_task(bytecode_clean 'bytecode')<line_sep># # make a dummy clean task which runs all the tasks in the clean namespace clean_tasks=list(namespace_clean.tasks.values())<line_sep>@invoke.task(pre=list(namespace_clean.tasks.values()) default=<true>)<def_stmt>clean_all context<block_start>"""Run all clean tasks"""<line_sep># pylint: disable=unused-argument <pass><block_end>namespace_clean.add_task(clean_all 'all')<line_sep>@invoke.task(pre=[clean_all])<def_stmt>sdist context<block_start>"""Create a source distribution"""<line_sep>context.run('python setup.py sdist')<block_end>namespace.add_task(sdist)<line_sep>@invoke.task(pre=[clean_all])<def_stmt>wheel context<block_start>"""Build a wheel distribution"""<line_sep>context.run('python setup.py bdist_wheel')<block_end>namespace.add_task(wheel)<line_sep># # these two tasks are commented out so you don't # accidentally run them and upload this template to pypi # # @invoke.task(pre=[sdist, wheel]) # def pypi(context): # """Build and upload a distribution to pypi""" # context.run('twine upload dist/*') # namespace.add_task(pypi) # @invoke.task(pre=[sdist, wheel]) # def pypi_test(context): # """Build and upload a distribution to https://test.pypi.org""" # context.run('twine upload --repository-url https://test.pypi.org/legacy/ dist/*') # namespace.add_task(pypi_test)
<import_stmt>stl_path<class_stmt>MyNDRPlugin()<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>pre_iteration self finding_max_rate run_results=<none> **kwargs<block_start>""" Function ran before each iteration. :parameters: finding_max_rate: boolean Indicates whether we are running for the first time, trying to find the max rate. In this is the case, the run_results will be None. run_results: dict A dictionary that contains the following keys: queue_full_percentage: Percentage of packets that are queued. drop_rate_percentage: Percentage of packets that were dropped. rate_tx_bps: TX rate in bps. rate_rx_bps: RX rate in bps. tx_util: TX utilization percentage. latency: Latency groups. cpu_util: CPU utilization percentage. tx_pps: TX in pps. rx_pps: RX in pps. tx_bps: TX in bps. rx_bps: RX in bps. bw_per_core: Bandwidth per core. rate_p: Running rate in percentage out of max. total_tx_L1: Total TX L1. total_rx_L1: Total RX L1. iteration: Description of iteration (not necessarily a number) Pay attention: The rate is of the upcoming iteration. All the rest are of the previous iteration. kwargs: dict List of tunables passed as parameters. """<line_sep># Pre iteration function. This function will run before TRex transmits to the DUT. # Could use this to better prepare the DUT, for example define shapers, policers, increase buffers and queues. # You can receive tunables in the command line, through the kwargs argument. <pass><block_end><def_stmt>post_iteration self finding_max_rate run_results **kwargs<block_start>""" Function ran after each iteration. :parameters: finding_max_rate: boolean Indicates whether we are running for the first time, trying to find the max rate. If this is the case, some values of run_results (like iteration for example) are not relevant. run_results: dict A dictionary that contains the following keys: queue_full_percentage: Percentage of packets that are queued. drop_rate_percentage: Percentage of packets that were dropped. rate_tx_bps: TX rate in bps. rate_rx_bps: RX rate in bps. tx_util: TX utilization percentage. latency: Latency groups. cpu_util: CPU utilization percentage. tx_pps: TX in pps. rx_pps: RX in pps. tx_bps: TX in bps. rx_bps: RX in bps. bw_per_core: Bandwidth per core. rate_p: Running rate in percentage out of max. total_tx_L1: Total TX L1. total_rx_L1: Total RX L1. iteration: Description of iteration (not necessarily a number) kwargs: dict List of tunables passed as parameters. :returns: bool: should stop the benchmarking or not. """<line_sep># Post iteration function. This function will run after TRex transmits to the DUT. # Could use this to decide if to continue the benchmark after querying the DUT post run. The DUT might be overheated or any other thing that might make you want to stop the run. # You can receive tunables in the command line, through the kwargs argument. should_stop=<false><line_sep><return>should_stop<block_end><block_end># dynamic load of python module <def_stmt>register <block_start><return>MyNDRPlugin()<block_end>
<import_stmt>argparse<import_stmt>logging<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_stmt>numpy<as>np<import_from_stmt>torch nn<import_from_stmt>torch.autograd Variable<import_from_stmt>transformers GPT2Config<import_from_stmt>transformers GPT2LMHeadModel GPT2Tokenizer BertTokenizer<import_from_stmt>DataLoader *<import_from_stmt>Model BERTGen<import_from_stmt>utils sample_sequence<import_stmt>torch.optim<as>optim<import_stmt>math<import_stmt>sys<import_stmt>pandas<import_stmt>os<import_stmt>numpy<import_stmt>nltk<import_from_stmt>torch.utils.tensorboard SummaryWriter<import_stmt>warnings<import_from_stmt>tqdm tqdm trange<import_from_stmt>torch.utils.data RandomSampler SequentialSampler<import_from_stmt>torch.utils.data DataLoader<as>DL<import_stmt>torch<import_from_stmt>torch.utils.data.distributed DistributedSampler<line_sep>warnings.filterwarnings("ignore" category=UserWarning)<line_sep>device=torch.device('cuda')<def_stmt>set_seed args<block_start>np.random.seed(args.seed)<line_sep>torch.manual_seed(args.seed)<if_stmt>args.n_gpu<g>0<block_start>torch.cuda.manual_seed_all(args.seed)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--model" default='gpt2' type=str)<line_sep>parser.add_argument("--top_k" type=int default=0)<line_sep>parser.add_argument("--top_p" type=float default=0.9)<line_sep>parser.add_argument('--seed' type=int default=42 help="random seed for initialization")<line_sep>parser.add_argument('--do_train' default=<false> action="store_true" help="whether to train or test the model")<line_sep>parser.add_argument('--do_rl' default=<false> action="store_true" help="whether to train or test the model")<line_sep>parser.add_argument('--do_val' default=<false> action="store_true" help="whether to train or test the model")<line_sep>parser.add_argument('--do_test' default=<false> action="store_true" help="whether to compute the BLEU scores on test split")<line_sep>parser.add_argument('--do_test_challenge' default=<false> action="store_true" help="whether to compute the BLEU scores on challenge split")<line_sep>parser.add_argument('--do_ppl' default=<false> action="store_true" help="whether to compute perplexity of the model")<line_sep>parser.add_argument('--do_verify' default=<false> action="store_true" help="whether compute the adv-acc score on test split")<line_sep>parser.add_argument('--do_verify_challenge' default=<false> action="store_true" help="whether compute the adv-acc score on challenge split")<line_sep>parser.add_argument('--epoch' default=10 type=int help="whether to train or test the model")<line_sep>parser.add_argument('--batch_size' default=6 type=int help="whether to train or test the model")<line_sep>parser.add_argument('--local_rank' default=-1 type=int help="whether to train or test the model")<line_sep>parser.add_argument('--learning_rate' default=2e-6 type=float help="whether to train or test the model")<line_sep>parser.add_argument('--dataset' default='table' type=str help="whether to train or test the model")<line_sep>parser.add_argument('--every' default=50 type=int help="whether to train or test the model")<line_sep>parser.add_argument('--load_from' default='' type=str help="whether to train or test the model")<line_sep>parser.add_argument('--id' default='models' type=str help="specify the id of the experiment")<line_sep>parser.add_argument('--max_len' default=800 type=int help="whether to train or test the model")<line_sep>parser.add_argument('--dim' default=768 type=int help="whether to train or test the model")<line_sep>parser.add_argument('--layers' default=3 type=int help="whether to train or test the model")<line_sep>parser.add_argument('--head' default=4 type=int help="whether to train or test the model")<line_sep>parser.add_argument("--modelpath" type=str default="bert-base-uncased" help="For distributed training: local_rank")<line_sep>parser.add_argument('--gradient_accumulation_steps' type=int default=5 help="accumulation steps for gradient")<line_sep>parser.add_argument('--decode_first_K' type=int default=10000 help="For debugging purpose")<line_sep>args=parser.parse_args()<if_stmt>args.local_rank<eq>-1<block_start>device=torch.device("cuda")<line_sep>args.n_gpu=1<block_end><else_stmt><block_start>torch.cuda.set_device(args.local_rank)<line_sep>device=torch.device('cuda' args.local_rank)<line_sep>torch.distributed.init_process_group(backend='nccl')<line_sep>args.n_gpu=1<block_end>args.device=device<if_stmt>args.local_rank<not><in>[-1 0]<block_start>torch.distributed.barrier()<block_end>tokenizer=GPT2Tokenizer.from_pretrained(args.model)<line_sep>model=GPT2LMHeadModel.from_pretrained(args.model)<line_sep>#model = nn.DataParallel(model) model.to(args.device)<if_stmt>args.local_rank<eq>0<block_start>torch.distributed.barrier()<block_end>criterion=nn.CrossEntropyLoss(reduction='none' ignore_index=-1)<if_stmt>args.do_train<block_start><if_stmt>args.local_rank<in>[-1 0]<block_start><if_stmt><not>os.path.exists(args.id)<block_start>os.mkdir(args.id)<block_end>tb_writer=SummaryWriter(log_dir='tensorboard/GPT2-{}'.format(args.model))<block_end>dataset=GPTTableDataset2('data/train_lm_preprocessed.json' tokenizer args.max_len)<if_stmt>args.local_rank<eq>-1<block_start>sampler=RandomSampler(dataset)<block_end><else_stmt><block_start>sampler=DistributedSampler(dataset)<block_end>train_dataloader=DL(dataset sampler=sampler batch_size=args.batch_size num_workers=0)<line_sep>model.train()<line_sep>optimizer=optim.Adam(model.parameters() args.learning_rate)<line_sep>avg_loss=0<line_sep>global_step=0<if_stmt>args.local_rank<ne>-1<block_start>model=torch.nn.parallel.DistributedDataParallel(model device_ids=[args.local_rank] output_device=args.local_rank find_unused_parameters=<true>)<block_end><else_stmt><block_start>model=torch.nn.DataParallel(model)<block_end><for_stmt>epoch_idx trange(0 args.epoch desc='Epoch' disable=args.local_rank<not><in>[-1 0])#for idx in range(0, dataset.train_len()): <block_start><for_stmt>idx,batch enumerate(tqdm(train_dataloader desc="Iteration" disable=args.local_rank<not><in>[-1 0]))<block_start>batch=tuple(Variable(t).to(device)<for>t batch)<line_sep>trg_inp,trg_out,mask,caption=batch<line_sep>inputs=torch.cat([caption trg_inp] 1)<line_sep>model.zero_grad()<line_sep>optimizer.zero_grad()<line_sep>logits=model(inputs)[0]<line_sep>logits=logits[: -trg_out.shape[1]: :].contiguous()<line_sep>loss=criterion(logits.view(-1 logits.shape[-1]) trg_out.view(-1))<line_sep>loss=loss<times>mask.view(-1)<line_sep>loss=loss.sum()/mask.sum()<line_sep>avg_loss<augadd>loss.item()<line_sep>loss.backward()<line_sep>optimizer.step()<line_sep>global_step<augadd>1<if_stmt>args.local_rank<in>[-1 0]<and>idx%args.every<eq>0<and>idx<g>0<block_start>tb_writer.add_scalar("perplexity" math.exp(avg_loss/args.every) global_step)<line_sep>fake_inputs=caption<line_sep>gt_inputs=trg_out.cpu().data.numpy()<line_sep>#samples = model.sample(fake_inputs, tabfeat, caption, highlight_idx, bert) samples=sample_sequence(model 30 fake_inputs [])<line_sep>samples=samples[: caption.shape[1]:]<line_sep>samples=samples.cpu().data.numpy()<for_stmt>s,gt zip(samples gt_inputs)<block_start>text=tokenizer.decode(s clean_up_tokenization_spaces=<true>)<line_sep>text=text[:text.find(tokenizer.eos_token)]<line_sep>print("PREDICTION |||||| " text)<line_sep>text=tokenizer.decode(gt clean_up_tokenization_spaces=<true>)<line_sep>text=text[:text.find(tokenizer.eos_token)]<line_sep>print("GROUNDTRUH |||||| " text)<line_sep><break><block_end>avg_loss=0<block_end><block_end><if_stmt>args.local_rank<in>[-1 0]<block_start><if_stmt>args.model<eq>'gpt2'<block_start>torch.save(model.state_dict() '{}/GPT_ep{}.pt'.format(args.id epoch_idx))<block_end><else_stmt><block_start>torch.save(model.state_dict() '{}/GPT_medium_ep{}.pt'.format(args.id epoch_idx))<block_end><block_end><block_end><if_stmt>args.local_rank<in>[-1 0]<block_start>tb_writer.close()<block_end><block_end><block_end>
# Copyright 2019 Atalaya Tech, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>io<import_stmt>os<import_stmt>sys<import_stmt>tarfile<import_stmt>logging<import_stmt>tempfile<import_stmt>shutil<import_from_stmt>functools wraps<import_from_stmt>contextlib contextmanager<import_from_stmt>urllib.parse urlparse<import_from_stmt>typing TYPE_CHECKING<import_from_stmt>pathlib PureWindowsPath PurePosixPath<import_from_stmt>bentoml.utils.s3 is_s3_url<import_from_stmt>bentoml.utils.gcs is_gcs_url<import_from_stmt>bentoml.exceptions BentoMLException<import_from_stmt>bentoml.saved_bundle.config SavedBundleConfig<import_from_stmt>bentoml.saved_bundle.pip_pkg ZIPIMPORT_DIR<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>bentoml.yatai.proto.repository_pb2 BentoServiceMetadata<block_end>logger=logging.getLogger(__name__)<def_stmt>_is_http_url bundle_path<arrow>bool<block_start><try_stmt><block_start><return>urlparse(bundle_path).scheme<in>["http" "https"]<block_end><except_stmt>ValueError<block_start><return><false><block_end><block_end><def_stmt>_is_remote_path bundle_path<arrow>bool<block_start><return>isinstance(bundle_path str)<and>(is_s3_url(bundle_path)<or>is_gcs_url(bundle_path)<or>_is_http_url(bundle_path))<block_end>@contextmanager<def_stmt>_resolve_remote_bundle_path bundle_path<block_start><if_stmt>is_s3_url(bundle_path)<block_start><import_stmt>boto3<line_sep>parsed_url=urlparse(bundle_path)<line_sep>bucket_name=parsed_url.netloc<line_sep>object_name=parsed_url.path.lstrip('/')<line_sep>s3=boto3.client('s3')<line_sep>fileobj=io.BytesIO()<line_sep>s3.download_fileobj(bucket_name object_name fileobj)<line_sep>fileobj.seek(0 0)<block_end><elif_stmt>is_gcs_url(bundle_path)<block_start><try_stmt><block_start><import_from_stmt>google.cloud storage<block_end><except_stmt>ImportError<block_start><raise>BentoMLException('"google-cloud-storage" package is required. You can install it with '<concat>'pip: "pip install google-cloud-storage"')<block_end>gcs=storage.Client()<line_sep>fileobj=io.BytesIO()<line_sep>gcs.download_blob_to_file(bundle_path fileobj)<line_sep>fileobj.seek(0 0)<block_end><elif_stmt>_is_http_url(bundle_path)<block_start><import_stmt>requests<line_sep>response=requests.get(bundle_path)<if_stmt>response.status_code<ne>200<block_start><raise>BentoMLException(f"Error retrieving BentoService bundle. "<concat>f"{response.status_code}: {response.text}")<block_end>fileobj=io.BytesIO()<line_sep>fileobj.write(response.content)<line_sep>fileobj.seek(0 0)<block_end><else_stmt><block_start><raise>BentoMLException(f"Saved bundle path: '{bundle_path}' is not supported")<block_end><with_stmt>tarfile.open(mode="r:gz" fileobj=fileobj)<as>tar<block_start><with_stmt>tempfile.TemporaryDirectory()<as>tmpdir<block_start>filename=tar.getmembers()[0].name<line_sep>tar.extractall(path=tmpdir)<line_sep><yield>os.path.join(tmpdir filename)<block_end><block_end><block_end><def_stmt>resolve_remote_bundle func<block_start>"""Decorate a function to handle remote bundles."""<line_sep>@wraps(func)<def_stmt>wrapper bundle_path *args<block_start><if_stmt>_is_remote_path(bundle_path)<block_start><with_stmt>_resolve_remote_bundle_path(bundle_path)<as>local_bundle_path<block_start><return>func(local_bundle_path *args)<block_end><block_end><return>func(bundle_path *args)<block_end><return>wrapper<block_end>@resolve_remote_bundle<def_stmt>load_saved_bundle_config bundle_path<arrow>"SavedBundleConfig"<block_start><try_stmt><block_start><return>SavedBundleConfig.load(os.path.join(bundle_path "bentoml.yml"))<block_end><except_stmt>FileNotFoundError<block_start><raise>BentoMLException("BentoML can't locate config file 'bentoml.yml'"<concat>" in saved bundle in path: {}".format(bundle_path))<block_end><block_end><def_stmt>load_bento_service_metadata bundle_path:str<arrow>"BentoServiceMetadata"<block_start><return>load_saved_bundle_config(bundle_path).get_bento_service_metadata_pb()<block_end><def_stmt>_find_module_file bundle_path service_name module_file# Simply join full path when module_file is just a file name, # e.g. module_file=="iris_classifier.py" <block_start>module_file_path=os.path.join(bundle_path service_name module_file)<if_stmt><not>os.path.isfile(module_file_path)# Try loading without service_name prefix, for loading from a installed PyPi <block_start>module_file_path=os.path.join(bundle_path module_file)<block_end># When module_file is located in sub directory # e.g. module_file=="foo/bar/iris_classifier.py" # This needs to handle the path differences between posix and windows platform: <if_stmt><not>os.path.isfile(module_file_path)<block_start><if_stmt>sys.platform<eq>"win32"# Try load a saved bundle created from posix platform on windows <block_start>module_file_path=os.path.join(bundle_path service_name str(PurePosixPath(module_file)))<if_stmt><not>os.path.isfile(module_file_path)<block_start>module_file_path=os.path.join(bundle_path str(PurePosixPath(module_file)))<block_end><block_end><else_stmt># Try load a saved bundle created from windows platform on posix <block_start>module_file_path=os.path.join(bundle_path service_name PureWindowsPath(module_file).as_posix())<if_stmt><not>os.path.isfile(module_file_path)<block_start>module_file_path=os.path.join(bundle_path PureWindowsPath(module_file).as_posix())<block_end><block_end><block_end><if_stmt><not>os.path.isfile(module_file_path)<block_start><raise>BentoMLException("Can not locate module_file {} in saved bundle {}".format(module_file bundle_path))<block_end><return>module_file_path<block_end>@resolve_remote_bundle<def_stmt>load_bento_service_class bundle_path<block_start>""" Load a BentoService class from saved bundle in given path :param bundle_path: A path to Bento files generated from BentoService#save, #save_to_dir, or the path to pip installed BentoService directory :return: BentoService class """<line_sep>config=load_saved_bundle_config(bundle_path)<line_sep>metadata=config["metadata"]<line_sep># Find and load target module containing BentoService class from given path module_file_path=_find_module_file(bundle_path metadata["service_name"] metadata["module_file"])<line_sep># Prepend bundle_path to sys.path for loading extra python dependencies sys.path.insert(0 bundle_path)<line_sep>sys.path.insert(0 os.path.join(bundle_path metadata["service_name"]))<line_sep># Include zipimport modules zipimport_dir=os.path.join(bundle_path metadata["service_name"] ZIPIMPORT_DIR)<if_stmt>os.path.exists(zipimport_dir)<block_start><for_stmt>p os.listdir(zipimport_dir)<block_start>logger.debug('adding %s to sys.path' p)<line_sep>sys.path.insert(0 os.path.join(zipimport_dir p))<block_end><block_end>module_name=metadata["module_name"]<if_stmt>module_name<in>sys.modules<block_start>logger.warning("Module `%s` already loaded, using existing imported module." module_name)<line_sep>module=sys.modules[module_name]<block_end><elif_stmt>sys.version_info<ge>(3 5)<block_start><import_stmt>importlib.util<line_sep>spec=importlib.util.spec_from_file_location(module_name module_file_path)<line_sep>module=importlib.util.module_from_spec(spec)<line_sep>spec.loader.exec_module(module)<block_end><elif_stmt>sys.version_info<ge>(3 3)<block_start><import_from_stmt>importlib.machinery SourceFileLoader<line_sep># pylint:disable=deprecated-method module=SourceFileLoader(module_name module_file_path).load_module(module_name)<line_sep># pylint:enable=deprecated-method <block_end><else_stmt><block_start><raise>BentoMLException("BentoML requires Python 3.4 and above")<block_end># Remove bundle_path from sys.path to avoid import naming conflicts sys.path.remove(bundle_path)<line_sep>model_service_class=module.__getattribute__(metadata["service_name"])<line_sep># Set _bento_service_bundle_path, where BentoService will load its artifacts model_service_class._bento_service_bundle_path=bundle_path<line_sep># Set cls._version, service instance can access it via svc.version model_service_class._bento_service_bundle_version=metadata["service_version"]<if_stmt>(model_service_class._env<and>model_service_class._env._requirements_txt_file<is><not><none>)# Load `requirement.txt` from bundle directory instead of the user-provided # file path, which may only available during the bundle save process <block_start>model_service_class._env._requirements_txt_file=os.path.join(bundle_path "requirements.txt")<block_end><return>model_service_class<block_end>@resolve_remote_bundle<def_stmt>safe_retrieve bundle_path:str target_dir:str<block_start>"""Safely retrieve bento service to local path Args: bundle_path (:obj:`str`): The path that contains saved BentoService bundle, supporting both local file path and s3 path target_dir (:obj:`str`): Where the service contents should end up. Returns: :obj:`str`: location of safe local path """<line_sep>shutil.copytree(bundle_path target_dir)<block_end>@resolve_remote_bundle<def_stmt>load_from_dir bundle_path<block_start>"""Load bento service from local file path or s3 path Args: bundle_path (str): The path that contains saved BentoService bundle, supporting both local file path and s3 path Returns: bentoml.service.BentoService: a loaded BentoService instance """<line_sep>svc_cls=load_bento_service_class(bundle_path)<line_sep><return>svc_cls()<block_end>@resolve_remote_bundle<def_stmt>load_bento_service_api bundle_path api_name=<none><block_start>bento_service=load_from_dir(bundle_path)<line_sep><return>bento_service.get_inference_api(api_name)<block_end>
# encoding: utf-8 <import_stmt>os<import_stmt>roms<def_stmt>console_roms_directory configuration console<block_start>""" If the user has specified a custom ROMs directory in consoles.txt then return that. Otherwise, append the shortname of the console to the default ROMs directory given by config.txt. """<if_stmt>console.custom_roms_directory<block_start><return>console.custom_roms_directory<block_end><return>os.path.join(roms.roms_directory(configuration) console.shortname)<block_end><def_stmt>path_is_rom console path<block_start>""" This function determines if a given path is actually a valid ROM file. If a list of extensions is supplied for this console, we check if the path has a valid extension If no extensions are defined for this console, we just accept any file """<if_stmt>console.extensions<eq>""<block_start><return><true><block_end># Normalize the extension based on the things we validly ignore. # Aka capitalization, whitespace, and leading dots normalize=<lambda>ext:ext.lower().strip().lstrip('.')<line_sep>(name ext)=os.path.splitext(path)<line_sep>valid_extensions=console.extensions.split(',')<line_sep><return>normalize(ext)<in>map(normalize valid_extensions)<block_end>
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_from_stmt>benchmarks smoothness thread_times<import_stmt>page_sets<import_from_stmt>telemetry benchmark<line_sep># pylint: disable=protected-access <def_stmt>CustomizeBrowserOptionsForOopRasterization options<block_start>"""Enables flags needed for out of process rasterization."""<line_sep>options.AppendExtraBrowserArgs('--force-gpu-rasterization')<line_sep>options.AppendExtraBrowserArgs('--enable-oop-rasterization')<block_end>@benchmark.Owner(emails=['<EMAIL>'])<class_stmt>SmoothnessOopRasterizationTop25(smoothness._Smoothness)<block_start>"""Measures rendering statistics for the top 25 with oop rasterization. """<line_sep>tag='oop_rasterization'<line_sep>page_set=page_sets.Top25SmoothPageSet<def_stmt>SetExtraBrowserOptions self options<block_start>CustomizeBrowserOptionsForOopRasterization(options)<block_end>@classmethod<def_stmt>Name cls<block_start><return>'smoothness.oop_rasterization.top_25_smooth'<block_end><block_end>@benchmark.Owner(emails=['<EMAIL>'])<class_stmt>ThreadTimesOopRasterKeyMobile(thread_times._ThreadTimes)<block_start>"""Measure timeline metrics for key mobile pages while using out of process raster."""<line_sep>tag='oop_rasterization'<line_sep>page_set=page_sets.KeyMobileSitesSmoothPageSet<line_sep>options={'story_tag_filter':'fastpath'}<def_stmt>SetExtraBrowserOptions self options<block_start>super(ThreadTimesOopRasterKeyMobile self).SetExtraBrowserOptions(options)<line_sep>CustomizeBrowserOptionsForOopRasterization(options)<block_end>@classmethod<def_stmt>Name cls<block_start><return>'thread_times.oop_rasterization.key_mobile'<block_end><block_end>
<import_stmt>base64<import_stmt>operator<import_stmt>struct<import_stmt>time<import_stmt>pytest<import_stmt>six<import_stmt>consul<import_stmt>consul.std<line_sep>Check=consul.Check<class_stmt>TestHTTPClient(object)<block_start><def_stmt>test_uri self<block_start>http=consul.std.HTTPClient()<assert_stmt>http.uri('/v1/kv')<eq>'http://127.0.0.1:8500/v1/kv'<assert_stmt>http.uri('/v1/kv' params={'index':1})<eq>'http://127.0.0.1:8500/v1/kv?index=1'<block_end><block_end><class_stmt>TestConsul(object)<block_start><def_stmt>test_kv self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>index,data=c.kv.get('foo')<assert_stmt>data<is><none><assert_stmt>c.kv.put('foo' 'bar')<is><true><line_sep>index,data=c.kv.get('foo')<assert_stmt>data['Value']<eq>six.b('bar')<block_end><def_stmt>test_kv_wait self consul_port<block_start>c=consul.Consul(port=consul_port)<assert_stmt>c.kv.put('foo' 'bar')<is><true><line_sep>index,data=c.kv.get('foo')<line_sep>check,data=c.kv.get('foo' index=index wait='20ms')<assert_stmt>index<eq>check<block_end><def_stmt>test_kv_encoding self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep># test binary c.kv.put('foo' struct.pack('i' 1000))<line_sep>index,data=c.kv.get('foo')<assert_stmt>struct.unpack('i' data['Value'])<eq>(1000 )<line_sep># test unicode c.kv.put('foo' u'bar')<line_sep>index,data=c.kv.get('foo')<assert_stmt>data['Value']<eq>six.b('bar')<line_sep># test empty-string comes back as `None` c.kv.put('foo' '')<line_sep>index,data=c.kv.get('foo')<assert_stmt>data['Value']<is><none><line_sep># test None c.kv.put('foo' <none>)<line_sep>index,data=c.kv.get('foo')<assert_stmt>data['Value']<is><none><line_sep># check unencoded values raises assert pytest.raises(AssertionError c.kv.put 'foo' {1:2})<block_end><def_stmt>test_kv_put_cas self consul_port<block_start>c=consul.Consul(port=consul_port)<assert_stmt>c.kv.put('foo' 'bar' cas=50)<is><false><assert_stmt>c.kv.put('foo' 'bar' cas=0)<is><true><line_sep>index,data=c.kv.get('foo')<assert_stmt>c.kv.put('foo' 'bar2' cas=data['ModifyIndex']-1)<is><false><assert_stmt>c.kv.put('foo' 'bar2' cas=data['ModifyIndex'])<is><true><line_sep>index,data=c.kv.get('foo')<assert_stmt>data['Value']<eq>six.b('bar2')<block_end><def_stmt>test_kv_put_flags self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>c.kv.put('foo' 'bar')<line_sep>index,data=c.kv.get('foo')<assert_stmt>data['Flags']<eq>0<assert_stmt>c.kv.put('foo' 'bar' flags=50)<is><true><line_sep>index,data=c.kv.get('foo')<assert_stmt>data['Flags']<eq>50<block_end><def_stmt>test_kv_recurse self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>index,data=c.kv.get('foo/' recurse=<true>)<assert_stmt>data<is><none><line_sep>c.kv.put('foo/' <none>)<line_sep>index,data=c.kv.get('foo/' recurse=<true>)<assert_stmt>len(data)<eq>1<line_sep>c.kv.put('foo/bar1' '1')<line_sep>c.kv.put('foo/bar2' '2')<line_sep>c.kv.put('foo/bar3' '3')<line_sep>index,data=c.kv.get('foo/' recurse=<true>)<assert_stmt>[x['Key']<for>x data]<eq>['foo/' 'foo/bar1' 'foo/bar2' 'foo/bar3']<assert_stmt>[x['Value']<for>x data]<eq>[<none> six.b('1') six.b('2') six.b('3')]<block_end><def_stmt>test_kv_delete self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>c.kv.put('foo1' '1')<line_sep>c.kv.put('foo2' '2')<line_sep>c.kv.put('foo3' '3')<line_sep>index,data=c.kv.get('foo' recurse=<true>)<assert_stmt>[x['Key']<for>x data]<eq>['foo1' 'foo2' 'foo3']<assert_stmt>c.kv.delete('foo2')<is><true><line_sep>index,data=c.kv.get('foo' recurse=<true>)<assert_stmt>[x['Key']<for>x data]<eq>['foo1' 'foo3']<assert_stmt>c.kv.delete('foo' recurse=<true>)<is><true><line_sep>index,data=c.kv.get('foo' recurse=<true>)<assert_stmt>data<is><none><block_end><def_stmt>test_kv_delete_cas self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>c.kv.put('foo' 'bar')<line_sep>index,data=c.kv.get('foo')<assert_stmt>c.kv.delete('foo' cas=data['ModifyIndex']-1)<is><false><assert_stmt>c.kv.get('foo')<eq>(index data)<assert_stmt>c.kv.delete('foo' cas=data['ModifyIndex'])<is><true><line_sep>index,data=c.kv.get('foo')<assert_stmt>data<is><none><block_end><def_stmt>test_kv_acquire_release self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>pytest.raises(consul.ConsulException c.kv.put 'foo' 'bar' acquire='foo')<line_sep>s1=c.session.create()<line_sep>s2=c.session.create()<assert_stmt>c.kv.put('foo' '1' acquire=s1)<is><true><assert_stmt>c.kv.put('foo' '2' acquire=s2)<is><false><assert_stmt>c.kv.put('foo' '1' acquire=s1)<is><true><assert_stmt>c.kv.put('foo' '1' release='foo')<is><false><assert_stmt>c.kv.put('foo' '2' release=s2)<is><false><assert_stmt>c.kv.put('foo' '2' release=s1)<is><true><line_sep>c.session.destroy(s1)<line_sep>c.session.destroy(s2)<block_end><def_stmt>test_kv_keys_only self consul_port<block_start>c=consul.Consul(port=consul_port)<assert_stmt>c.kv.put('bar' '4')<is><true><assert_stmt>c.kv.put('base/foo' '1')<is><true><assert_stmt>c.kv.put('base/base/foo' '5')<is><true><line_sep>index,data=c.kv.get('base/' keys=<true> separator='/')<assert_stmt>data<eq>['base/base/' 'base/foo']<block_end><def_stmt>test_transaction self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>value=base64.b64encode(b"1").decode("utf8")<line_sep>d={"KV":{"Verb":"set" "Key":"asdf" "Value":value}}<line_sep>r=c.txn.put([d])<assert_stmt>r["Errors"]<is><none><line_sep>d={"KV":{"Verb":"get" "Key":"asdf"}}<line_sep>r=c.txn.put([d])<assert_stmt>r["Results"][0]["KV"]["Value"]<eq>value<block_end><def_stmt>test_event self consul_port<block_start>c=consul.Consul(port=consul_port)<assert_stmt>c.event.fire("fooname" "foobody")<line_sep>index,events=c.event.list()<assert_stmt>[x['Name']<eq>'fooname'<for>x events]<assert_stmt>[x['Payload']<eq>'foobody'<for>x events]<block_end><def_stmt>test_event_targeted self consul_port<block_start>c=consul.Consul(port=consul_port)<assert_stmt>c.event.fire("fooname" "foobody")<line_sep>index,events=c.event.list(name="othername")<assert_stmt>events<eq>[]<line_sep>index,events=c.event.list(name="fooname")<assert_stmt>[x['Name']<eq>'fooname'<for>x events]<assert_stmt>[x['Payload']<eq>'foobody'<for>x events]<block_end><def_stmt>test_agent_checks self consul_port<block_start>c=consul.Consul(port=consul_port)<def_stmt>verify_and_dereg_check check_id<block_start><assert_stmt>set(c.agent.checks().keys())<eq>set([check_id])<assert_stmt>c.agent.check.deregister(check_id)<is><true><assert_stmt>set(c.agent.checks().keys())<eq>set([])<block_end><def_stmt>verify_check_status check_id status notes=<none><block_start>checks=c.agent.checks()<assert_stmt>checks[check_id]['Status']<eq>status<if_stmt>notes<block_start><assert_stmt>checks[check_id]['Output']<eq>notes<block_end><block_end># test setting notes on a check c.agent.check.register('check' Check.ttl('1s') notes='foo')<assert_stmt>c.agent.checks()['check']['Notes']<eq>'foo'<line_sep>c.agent.check.deregister('check')<assert_stmt>set(c.agent.checks().keys())<eq>set([])<assert_stmt>c.agent.check.register('script_check' Check.script('/bin/true' 10))<is><true><line_sep>verify_and_dereg_check('script_check')<assert_stmt>c.agent.check.register('check name' Check.script('/bin/true' 10) check_id='check_id')<is><true><line_sep>verify_and_dereg_check('check_id')<line_sep>http_addr="http://127.0.0.1:{0}".format(consul_port)<assert_stmt>c.agent.check.register('http_check' Check.http(http_addr '10ms'))<is><true><line_sep>time.sleep(1)<line_sep>verify_check_status('http_check' 'passing')<line_sep>verify_and_dereg_check('http_check')<assert_stmt>c.agent.check.register('http_timeout_check' Check.http(http_addr '100ms' timeout='2s'))<is><true><line_sep>verify_and_dereg_check('http_timeout_check')<assert_stmt>c.agent.check.register('ttl_check' Check.ttl('100ms'))<is><true><assert_stmt>c.agent.check.ttl_warn('ttl_check')<is><true><line_sep>verify_check_status('ttl_check' 'warning')<assert_stmt>c.agent.check.ttl_warn('ttl_check' notes='its not quite right')<is><true><line_sep>verify_check_status('ttl_check' 'warning' 'its not quite right')<assert_stmt>c.agent.check.ttl_fail('ttl_check')<is><true><line_sep>verify_check_status('ttl_check' 'critical')<assert_stmt>c.agent.check.ttl_fail('ttl_check' notes='something went boink!')<is><true><line_sep>verify_check_status('ttl_check' 'critical' notes='something went boink!')<assert_stmt>c.agent.check.ttl_pass('ttl_check')<is><true><line_sep>verify_check_status('ttl_check' 'passing')<assert_stmt>c.agent.check.ttl_pass('ttl_check' notes='all hunky dory!')<is><true><line_sep>verify_check_status('ttl_check' 'passing' notes='all hunky dory!')<line_sep># wait for ttl to expire time.sleep(120/1000.0)<line_sep>verify_check_status('ttl_check' 'critical')<line_sep>verify_and_dereg_check('ttl_check')<block_end><def_stmt>test_service_dereg_issue_156 self consul_port# https://github.com/cablehead/python-consul/issues/156 <block_start>service_name='app#127.0.0.1#3000'<line_sep>c=consul.Consul(port=consul_port)<line_sep>c.agent.service.register(service_name)<line_sep>time.sleep(80/1000.0)<line_sep>index,nodes=c.health.service(service_name)<assert_stmt>[node['Service']['ID']<for>node nodes]<eq>[service_name]<line_sep># Clean up tasks <assert_stmt>c.agent.service.deregister(service_name)<is><true><line_sep>time.sleep(40/1000.0)<line_sep>index,nodes=c.health.service(service_name)<assert_stmt>[node['Service']['ID']<for>node nodes]<eq>[]<block_end><def_stmt>test_agent_checks_service_id self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>c.agent.service.register('foo1')<line_sep>time.sleep(40/1000.0)<line_sep>index,nodes=c.health.service('foo1')<assert_stmt>[node['Service']['ID']<for>node nodes]<eq>['foo1']<line_sep>c.agent.check.register('foo' Check.ttl('100ms') service_id='foo1')<line_sep>time.sleep(40/1000.0)<line_sep>index,nodes=c.health.service('foo1')<assert_stmt>set([check['ServiceID']<for>node nodes<for>check node['Checks']])<eq>set(['foo1' ''])<assert_stmt>set([check['CheckID']<for>node nodes<for>check node['Checks']])<eq>set(['foo' 'serfHealth'])<line_sep># Clean up tasks <assert_stmt>c.agent.check.deregister('foo')<is><true><line_sep>time.sleep(40/1000.0)<assert_stmt>c.agent.service.deregister('foo1')<is><true><line_sep>time.sleep(40/1000.0)<block_end><def_stmt>test_agent_register_check_no_service_id self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>index,nodes=c.health.service("foo1")<assert_stmt>nodes<eq>[]<line_sep>pytest.raises(consul.std.base.ConsulException c.agent.check.register 'foo' Check.ttl('100ms') service_id='foo1')<line_sep>time.sleep(40/1000.0)<assert_stmt>c.agent.checks()<eq>{}<line_sep># Cleanup tasks c.agent.check.deregister('foo')<line_sep>time.sleep(40/1000.0)<block_end><def_stmt>test_agent_register_enable_tag_override self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>index,nodes=c.health.service("foo1")<assert_stmt>nodes<eq>[]<line_sep>c.agent.service.register('foo' enable_tag_override=<true>)<assert_stmt>c.agent.services()['foo']['EnableTagOverride']<line_sep># Cleanup tasks c.agent.check.deregister('foo')<block_end><def_stmt>test_agent_service_maintenance self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>c.agent.service.register('foo' check=Check.ttl('100ms'))<line_sep>time.sleep(40/1000.0)<line_sep>c.agent.service.maintenance('foo' 'true' "test")<line_sep>time.sleep(40/1000.0)<line_sep>checks_pre=c.agent.checks()<assert_stmt>'_service_maintenance:foo'<in>checks_pre.keys()<assert_stmt>'test'<eq>checks_pre['_service_maintenance:foo']['Notes']<line_sep>c.agent.service.maintenance('foo' 'false')<line_sep>time.sleep(40/1000.0)<line_sep>checks_post=c.agent.checks()<assert_stmt>'_service_maintenance:foo'<not><in>checks_post.keys()<line_sep># Cleanup c.agent.service.deregister('foo')<line_sep>time.sleep(40/1000.0)<block_end><def_stmt>test_agent_node_maintenance self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>c.agent.maintenance('true' "test")<line_sep>time.sleep(40/1000.0)<line_sep>checks_pre=c.agent.checks()<assert_stmt>'_node_maintenance'<in>checks_pre.keys()<assert_stmt>'test'<eq>checks_pre['_node_maintenance']['Notes']<line_sep>c.agent.maintenance('false')<line_sep>time.sleep(40/1000.0)<line_sep>checks_post=c.agent.checks()<assert_stmt>'_node_maintenance'<not><in>checks_post.keys()<block_end><def_stmt>test_agent_members self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>members=c.agent.members()<for_stmt>x members<block_start><assert_stmt>x['Status']<eq>1<assert_stmt><not>x['Name']<is><none><assert_stmt><not>x['Tags']<is><none><block_end><assert_stmt>c.agent.self()['Member']<in>members<line_sep>wan_members=c.agent.members(wan=<true>)<for_stmt>x wan_members<block_start><assert_stmt>'dc1'<in>x['Name']<block_end><block_end><def_stmt>test_agent_self self consul_port<block_start>c=consul.Consul(port=consul_port)<assert_stmt>set(c.agent.self().keys())<eq>set(['Member' 'Stats' 'Config' 'Coord' 'DebugConfig' 'Meta'])<block_end><def_stmt>test_agent_services self consul_port<block_start>c=consul.Consul(port=consul_port)<assert_stmt>c.agent.service.register('foo')<is><true><assert_stmt>set(c.agent.services().keys())<eq>set(['foo'])<assert_stmt>c.agent.service.deregister('foo')<is><true><assert_stmt>set(c.agent.services().keys())<eq>set()<line_sep># test address param <assert_stmt>c.agent.service.register('foo' address='10.10.10.1')<is><true><assert_stmt>[v['Address']<for>k,v c.agent.services().items()<if>k<eq>'foo'][0]<eq>'10.10.10.1'<assert_stmt>c.agent.service.deregister('foo')<is><true><block_end><def_stmt>test_catalog self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep># grab the node our server created, so we can ignore it _,nodes=c.catalog.nodes()<assert_stmt>len(nodes)<eq>1<line_sep>current=nodes[0]<line_sep># test catalog.datacenters <assert_stmt>c.catalog.datacenters()<eq>['dc1']<line_sep># test catalog.register pytest.raises(consul.ConsulException c.catalog.register 'foo' '10.1.10.11' dc='dc2')<assert_stmt>c.catalog.register('n1' '10.1.10.11' service={'service':'s1'} check={'name':'c1'})<is><true><assert_stmt>c.catalog.register('n1' '10.1.10.11' service={'service':'s2'})<is><true><assert_stmt>c.catalog.register('n2' '10.1.10.12' service={'service':'s1' 'tags':['master']})<is><true><line_sep># test catalog.nodes pytest.raises(consul.ConsulException c.catalog.nodes dc='dc2')<line_sep>_,nodes=c.catalog.nodes()<line_sep>nodes.remove(current)<assert_stmt>[x['Node']<for>x nodes]<eq>['n1' 'n2']<line_sep># test catalog.services pytest.raises(consul.ConsulException c.catalog.services dc='dc2')<line_sep>_,services=c.catalog.services()<assert_stmt>services<eq>{'s1':[u'master'] 's2':[] 'consul':[]}<line_sep># test catalog.node pytest.raises(consul.ConsulException c.catalog.node 'n1' dc='dc2')<line_sep>_,node=c.catalog.node('n1')<assert_stmt>set(node['Services'].keys())<eq>set(['s1' 's2'])<line_sep>_,node=c.catalog.node('n3')<assert_stmt>node<is><none><line_sep># test catalog.service pytest.raises(consul.ConsulException c.catalog.service 's1' dc='dc2')<line_sep>_,nodes=c.catalog.service('s1')<assert_stmt>set([x['Node']<for>x nodes])<eq>set(['n1' 'n2'])<line_sep>_,nodes=c.catalog.service('s1' tag='master')<assert_stmt>set([x['Node']<for>x nodes])<eq>set(['n2'])<line_sep># test catalog.deregister pytest.raises(consul.ConsulException c.catalog.deregister 'n2' dc='dc2')<assert_stmt>c.catalog.deregister('n1' check_id='c1')<is><true><assert_stmt>c.catalog.deregister('n2' service_id='s1')<is><true><line_sep># check the nodes weren't removed _,nodes=c.catalog.nodes()<line_sep>nodes.remove(current)<assert_stmt>[x['Node']<for>x nodes]<eq>['n1' 'n2']<line_sep># check n2's s1 service was removed though _,nodes=c.catalog.service('s1')<assert_stmt>set([x['Node']<for>x nodes])<eq>set(['n1'])<line_sep># cleanup <assert_stmt>c.catalog.deregister('n1')<is><true><assert_stmt>c.catalog.deregister('n2')<is><true><line_sep>_,nodes=c.catalog.nodes()<line_sep>nodes.remove(current)<assert_stmt>[x['Node']<for>x nodes]<eq>[]<block_end><def_stmt>test_health_service self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep># check there are no nodes for the service 'foo' index,nodes=c.health.service('foo')<assert_stmt>nodes<eq>[]<line_sep># register two nodes, one with a long ttl, the other shorter c.agent.service.register('foo' service_id='foo:1' check=Check.ttl('10s') tags=['tag:foo:1'])<line_sep>c.agent.service.register('foo' service_id='foo:2' check=Check.ttl('100ms'))<line_sep>time.sleep(40/1000.0)<line_sep># check the nodes show for the /health/service endpoint index,nodes=c.health.service('foo')<assert_stmt>[node['Service']['ID']<for>node nodes]<eq>['foo:1' 'foo:2']<line_sep># but that they aren't passing their health check index,nodes=c.health.service('foo' passing=<true>)<assert_stmt>nodes<eq>[]<line_sep># ping the two node's health check c.agent.check.ttl_pass('service:foo:1')<line_sep>c.agent.check.ttl_pass('service:foo:2')<line_sep>time.sleep(40/1000.0)<line_sep># both nodes are now available index,nodes=c.health.service('foo' passing=<true>)<assert_stmt>[node['Service']['ID']<for>node nodes]<eq>['foo:1' 'foo:2']<line_sep># wait until the short ttl node fails time.sleep(120/1000.0)<line_sep># only one node available index,nodes=c.health.service('foo' passing=<true>)<assert_stmt>[node['Service']['ID']<for>node nodes]<eq>['foo:1']<line_sep># ping the failed node's health check c.agent.check.ttl_pass('service:foo:2')<line_sep>time.sleep(40/1000.0)<line_sep># check both nodes are available index,nodes=c.health.service('foo' passing=<true>)<assert_stmt>[node['Service']['ID']<for>node nodes]<eq>['foo:1' 'foo:2']<line_sep># check that tag works index,nodes=c.health.service('foo' tag='tag:foo:1')<assert_stmt>[node['Service']['ID']<for>node nodes]<eq>['foo:1']<line_sep># deregister the nodes c.agent.service.deregister('foo:1')<line_sep>c.agent.service.deregister('foo:2')<line_sep>time.sleep(40/1000.0)<line_sep>index,nodes=c.health.service('foo')<assert_stmt>nodes<eq>[]<block_end><def_stmt>test_health_state self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep># The empty string is for the Serf Health Status check, which has an # empty ServiceID index,nodes=c.health.state('any')<assert_stmt>[node['ServiceID']<for>node nodes]<eq>['']<line_sep># register two nodes, one with a long ttl, the other shorter c.agent.service.register('foo' service_id='foo:1' check=Check.ttl('10s'))<line_sep>c.agent.service.register('foo' service_id='foo:2' check=Check.ttl('100ms'))<line_sep>time.sleep(40/1000.0)<line_sep># check the nodes show for the /health/state/any endpoint index,nodes=c.health.state('any')<assert_stmt>set([node['ServiceID']<for>node nodes])<eq>set(['' 'foo:1' 'foo:2'])<line_sep># but that they aren't passing their health check index,nodes=c.health.state('passing')<assert_stmt>[node['ServiceID']<for>node nodes]<ne>'foo'<line_sep># ping the two node's health check c.agent.check.ttl_pass('service:foo:1')<line_sep>c.agent.check.ttl_pass('service:foo:2')<line_sep>time.sleep(40/1000.0)<line_sep># both nodes are now available index,nodes=c.health.state('passing')<assert_stmt>set([node['ServiceID']<for>node nodes])<eq>set(['' 'foo:1' 'foo:2'])<line_sep># wait until the short ttl node fails time.sleep(2200/1000.0)<line_sep># only one node available index,nodes=c.health.state('passing')<assert_stmt>set([node['ServiceID']<for>node nodes])<eq>set(['' 'foo:1'])<line_sep># ping the failed node's health check c.agent.check.ttl_pass('service:foo:2')<line_sep>time.sleep(40/1000.0)<line_sep># check both nodes are available index,nodes=c.health.state('passing')<assert_stmt>set([node['ServiceID']<for>node nodes])<eq>set(['' 'foo:1' 'foo:2'])<line_sep># deregister the nodes c.agent.service.deregister('foo:1')<line_sep>c.agent.service.deregister('foo:2')<line_sep>time.sleep(40/1000.0)<line_sep>index,nodes=c.health.state('any')<assert_stmt>[node['ServiceID']<for>node nodes]<eq>['']<block_end><def_stmt>test_health_node self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep># grab local node name node=c.agent.self()['Config']['NodeName']<line_sep>index,checks=c.health.node(node)<assert_stmt>node<in>[check["Node"]<for>check checks]<block_end><def_stmt>test_health_checks self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>c.agent.service.register('foobar' service_id='foobar' check=Check.ttl('10s'))<line_sep>time.sleep(40/1000.00)<line_sep>index,checks=c.health.checks('foobar')<assert_stmt>[check['ServiceID']<for>check checks]<eq>['foobar']<assert_stmt>[check['CheckID']<for>check checks]<eq>['service:foobar']<line_sep>c.agent.service.deregister('foobar')<line_sep>time.sleep(40/1000.0)<line_sep>index,checks=c.health.checks('foobar')<assert_stmt>len(checks)<eq>0<block_end><def_stmt>test_session self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep># session.create pytest.raises(consul.ConsulException c.session.create node='n2')<line_sep>pytest.raises(consul.ConsulException c.session.create dc='dc2')<line_sep>session_id=c.session.create('my-session')<line_sep># session.list pytest.raises(consul.ConsulException c.session.list dc='dc2')<line_sep>_,sessions=c.session.list()<assert_stmt>[x['Name']<for>x sessions]<eq>['my-session']<line_sep># session.info pytest.raises(consul.ConsulException c.session.info session_id dc='dc2')<line_sep>index,session=c.session.info('1'<times>36)<assert_stmt>session<is><none><line_sep>index,session=c.session.info(session_id)<assert_stmt>session['Name']<eq>'my-session'<line_sep># session.node node=session['Node']<line_sep>pytest.raises(consul.ConsulException c.session.node node dc='dc2')<line_sep>_,sessions=c.session.node(node)<assert_stmt>[x['Name']<for>x sessions]<eq>['my-session']<line_sep># session.destroy pytest.raises(consul.ConsulException c.session.destroy session_id dc='dc2')<assert_stmt>c.session.destroy(session_id)<is><true><line_sep>_,sessions=c.session.list()<assert_stmt>sessions<eq>[]<block_end><def_stmt>test_session_delete_ttl_renew self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>s=c.session.create(behavior='delete' ttl=20)<line_sep># attempt to renew an unknown session pytest.raises(consul.NotFound c.session.renew '1'<times>36)<line_sep>session=c.session.renew(s)<assert_stmt>session['Behavior']<eq>'delete'<assert_stmt>session['TTL']<eq>'20s'<line_sep># trying out the behavior <assert_stmt>c.kv.put('foo' '1' acquire=s)<is><true><line_sep>index,data=c.kv.get('foo')<assert_stmt>data['Value']<eq>six.b('1')<line_sep>c.session.destroy(s)<line_sep>index,data=c.kv.get('foo')<assert_stmt>data<is><none><block_end><def_stmt>test_acl_disabled self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>pytest.raises(consul.ACLDisabled c.acl.list)<line_sep>pytest.raises(consul.ACLDisabled c.acl.info '1'<times>36)<line_sep>pytest.raises(consul.ACLDisabled c.acl.create)<line_sep>pytest.raises(consul.ACLDisabled c.acl.update 'foo')<line_sep>pytest.raises(consul.ACLDisabled c.acl.clone 'foo')<line_sep>pytest.raises(consul.ACLDisabled c.acl.destroy 'foo')<block_end><def_stmt>test_acl_permission_denied self acl_consul<block_start>c=consul.Consul(port=acl_consul.port)<line_sep>pytest.raises(consul.ACLPermissionDenied c.acl.list)<line_sep>pytest.raises(consul.ACLPermissionDenied c.acl.create)<line_sep>pytest.raises(consul.ACLPermissionDenied c.acl.update 'anonymous')<line_sep>pytest.raises(consul.ACLPermissionDenied c.acl.clone 'anonymous')<line_sep>pytest.raises(consul.ACLPermissionDenied c.acl.destroy 'anonymous')<block_end><def_stmt>test_acl_explict_token_use self acl_consul<block_start>c=consul.Consul(port=acl_consul.port)<line_sep>master_token=acl_consul.token<line_sep>acls=c.acl.list(token=master_token)<assert_stmt>set([x['ID']<for>x acls])<eq>set(['anonymous' master_token])<assert_stmt>c.acl.info('1'<times>36)<is><none><line_sep>compare=[c.acl.info(master_token) c.acl.info('anonymous')]<line_sep>compare.sort(key=operator.itemgetter('ID'))<assert_stmt>acls<eq>compare<line_sep>rules=""" key "" { policy = "read" } key "private/" { policy = "deny" } service "foo-" { policy = "write" } service "bar-" { policy = "read" } """<line_sep>token=c.acl.create(rules=rules token=master_token)<assert_stmt>c.acl.info(token)['Rules']<eq>rules<line_sep>token2=c.acl.clone(token token=master_token)<assert_stmt>c.acl.info(token2)['Rules']<eq>rules<assert_stmt>c.acl.update(token2 name='Foo' token=master_token)<eq>token2<assert_stmt>c.acl.info(token2)['Name']<eq>'Foo'<assert_stmt>c.acl.destroy(token2 token=master_token)<is><true><assert_stmt>c.acl.info(token2)<is><none><line_sep>c.kv.put('foo' 'bar')<line_sep>c.kv.put('private/foo' 'bar')<assert_stmt>c.kv.get('foo' token=token)[1]['Value']<eq>six.b('bar')<line_sep>pytest.raises(consul.ACLPermissionDenied c.kv.put 'foo' 'bar2' token=token)<line_sep>pytest.raises(consul.ACLPermissionDenied c.kv.delete 'foo' token=token)<assert_stmt>c.kv.get('private/foo')[1]['Value']<eq>six.b('bar')<line_sep>pytest.raises(consul.ACLPermissionDenied c.kv.get 'private/foo' token=token)<line_sep>pytest.raises(consul.ACLPermissionDenied c.kv.put 'private/foo' 'bar2' token=token)<line_sep>pytest.raises(consul.ACLPermissionDenied c.kv.delete 'private/foo' token=token)<line_sep># test token pass through for service registration pytest.raises(consul.ACLPermissionDenied c.agent.service.register "bar-1" token=token)<line_sep>c.agent.service.register("foo-1" token=token)<line_sep>index,data=c.health.service('foo-1' token=token)<assert_stmt>data[0]['Service']['ID']<eq>"foo-1"<line_sep>index,data=c.health.checks('foo-1' token=token)<assert_stmt>data<eq>[]<line_sep>index,data=c.health.service('bar-1' token=token)<assert_stmt><not>data<line_sep># clean up <assert_stmt>c.agent.service.deregister('foo-1')<is><true><line_sep>c.acl.destroy(token token=master_token)<line_sep>acls=c.acl.list(token=master_token)<assert_stmt>set([x['ID']<for>x acls])<eq>set(['anonymous' master_token])<block_end><def_stmt>test_acl_implicit_token_use self acl_consul# configure client to use the master token by default <block_start>c=consul.Consul(port=acl_consul.port token=acl_consul.token)<line_sep>master_token=acl_consul.token<line_sep>acls=c.acl.list()<assert_stmt>set([x['ID']<for>x acls])<eq>set(['anonymous' master_token])<assert_stmt>c.acl.info('foo')<is><none><line_sep>compare=[c.acl.info(master_token) c.acl.info('anonymous')]<line_sep>compare.sort(key=operator.itemgetter('ID'))<assert_stmt>acls<eq>compare<line_sep>rules=""" key "" { policy = "read" } key "private/" { policy = "deny" } """<line_sep>token=c.acl.create(rules=rules)<assert_stmt>c.acl.info(token)['Rules']<eq>rules<line_sep>token2=c.acl.clone(token)<assert_stmt>c.acl.info(token2)['Rules']<eq>rules<assert_stmt>c.acl.update(token2 name='Foo')<eq>token2<assert_stmt>c.acl.info(token2)['Name']<eq>'Foo'<assert_stmt>c.acl.destroy(token2)<is><true><assert_stmt>c.acl.info(token2)<is><none><line_sep>c.kv.put('foo' 'bar')<line_sep>c.kv.put('private/foo' 'bar')<line_sep>c_limited=consul.Consul(port=acl_consul.port token=token)<assert_stmt>c_limited.kv.get('foo')[1]['Value']<eq>six.b('bar')<line_sep>pytest.raises(consul.ACLPermissionDenied c_limited.kv.put 'foo' 'bar2')<line_sep>pytest.raises(consul.ACLPermissionDenied c_limited.kv.delete 'foo')<assert_stmt>c.kv.get('private/foo')[1]['Value']<eq>six.b('bar')<line_sep>pytest.raises(consul.ACLPermissionDenied c_limited.kv.get 'private/foo')<line_sep>pytest.raises(consul.ACLPermissionDenied c_limited.kv.put 'private/foo' 'bar2')<line_sep>pytest.raises(consul.ACLPermissionDenied c_limited.kv.delete 'private/foo')<line_sep># check we can override the client's default token pytest.raises(consul.ACLPermissionDenied c.kv.get 'private/foo' token=token)<line_sep>pytest.raises(consul.ACLPermissionDenied c.kv.put 'private/foo' 'bar2' token=token)<line_sep>pytest.raises(consul.ACLPermissionDenied c.kv.delete 'private/foo' token=token)<line_sep># clean up c.acl.destroy(token)<line_sep>acls=c.acl.list()<assert_stmt>set([x['ID']<for>x acls])<eq>set(['anonymous' master_token])<block_end><def_stmt>test_status_leader self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>agent_self=c.agent.self()<line_sep>leader=c.status.leader()<line_sep>addr_port=agent_self['Stats']['consul']['leader_addr']<assert_stmt>leader<eq>addr_port "Leader value was {0}, expected value "<concat>"was {1}".format(leader addr_port)<block_end><def_stmt>test_status_peers self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>agent_self=c.agent.self()<line_sep>addr_port=agent_self['Stats']['consul']['leader_addr']<line_sep>peers=c.status.peers()<assert_stmt>addr_port<in>peers "Expected value '{0}' "<concat>"in peer list but it was not present".format(addr_port)<block_end><def_stmt>test_query self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep># check that query list is empty queries=c.query.list()<assert_stmt>queries<eq>[]<line_sep># create a new named query query_service='foo'<line_sep>query_name='fooquery'<line_sep>query=c.query.create(query_service query_name)<line_sep># assert response contains query ID <assert_stmt>'ID'<in>query<and>query['ID']<is><not><none><and>str(query['ID'])<ne>''<line_sep># retrieve query using id and name queries=c.query.get(query['ID'])<assert_stmt>queries<ne>[]<and>len(queries)<eq>1<assert_stmt>queries[0]['Name']<eq>query_name<and>queries[0]['ID']<eq>query['ID']<line_sep># explain query <assert_stmt>c.query.explain(query_name)['Query']<line_sep># delete query <assert_stmt>c.query.delete(query['ID'])<block_end><def_stmt>test_coordinate self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>c.coordinate.nodes()<line_sep>c.coordinate.datacenters()<assert_stmt>set(c.coordinate.datacenters()[0].keys())<eq>set(['Datacenter' 'Coordinates' 'AreaID'])<block_end><def_stmt>test_operator self consul_port<block_start>c=consul.Consul(port=consul_port)<line_sep>config=c.operator.raft_config()<assert_stmt>config["Index"]<eq>1<line_sep>leader=<false><line_sep>voter=<false><for_stmt>server config["Servers"]<block_start><if_stmt>server["Leader"]<block_start>leader=<true><block_end><if_stmt>server["Voter"]<block_start>voter=<true><block_end><block_end><assert_stmt>leader<assert_stmt>voter<block_end><block_end>
# coding=utf-8 # *** WARNING: this file was generated by pulumigen. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>... _utilities<import_from_stmt>... meta<as>_meta<line_sep>__all__=['LeaseSpecArgs' 'LeaseArgs' ]<line_sep>@pulumi.input_type<class_stmt>LeaseSpecArgs<block_start><def_stmt>__init__ __self__ * acquire_time:Optional[pulumi.Input[str]]=<none> holder_identity:Optional[pulumi.Input[str]]=<none> lease_duration_seconds:Optional[pulumi.Input[int]]=<none> lease_transitions:Optional[pulumi.Input[int]]=<none> renew_time:Optional[pulumi.Input[str]]=<none><block_start>""" LeaseSpec is a specification of a Lease. :param pulumi.Input[str] acquire_time: acquireTime is a time when the current lease was acquired. :param pulumi.Input[str] holder_identity: holderIdentity contains the identity of the holder of a current lease. :param pulumi.Input[int] lease_duration_seconds: leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime. :param pulumi.Input[int] lease_transitions: leaseTransitions is the number of transitions of a lease between holders. :param pulumi.Input[str] renew_time: renewTime is a time when the current holder of a lease has last updated the lease. """<if_stmt>acquire_time<is><not><none><block_start>pulumi.set(__self__ "acquire_time" acquire_time)<block_end><if_stmt>holder_identity<is><not><none><block_start>pulumi.set(__self__ "holder_identity" holder_identity)<block_end><if_stmt>lease_duration_seconds<is><not><none><block_start>pulumi.set(__self__ "lease_duration_seconds" lease_duration_seconds)<block_end><if_stmt>lease_transitions<is><not><none><block_start>pulumi.set(__self__ "lease_transitions" lease_transitions)<block_end><if_stmt>renew_time<is><not><none><block_start>pulumi.set(__self__ "renew_time" renew_time)<block_end><block_end>@[email protected](name="acquireTime")<def_stmt>acquire_time self<arrow>Optional[pulumi.Input[str]]<block_start>""" acquireTime is a time when the current lease was acquired. """<line_sep><return>pulumi.get(self "acquire_time")<block_end>@acquire_time.setter<def_stmt>acquire_time self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "acquire_time" value)<block_end>@[email protected](name="holderIdentity")<def_stmt>holder_identity self<arrow>Optional[pulumi.Input[str]]<block_start>""" holderIdentity contains the identity of the holder of a current lease. """<line_sep><return>pulumi.get(self "holder_identity")<block_end>@holder_identity.setter<def_stmt>holder_identity self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "holder_identity" value)<block_end>@[email protected](name="leaseDurationSeconds")<def_stmt>lease_duration_seconds self<arrow>Optional[pulumi.Input[int]]<block_start>""" leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime. """<line_sep><return>pulumi.get(self "lease_duration_seconds")<block_end>@lease_duration_seconds.setter<def_stmt>lease_duration_seconds self value:Optional[pulumi.Input[int]]<block_start>pulumi.set(self "lease_duration_seconds" value)<block_end>@[email protected](name="leaseTransitions")<def_stmt>lease_transitions self<arrow>Optional[pulumi.Input[int]]<block_start>""" leaseTransitions is the number of transitions of a lease between holders. """<line_sep><return>pulumi.get(self "lease_transitions")<block_end>@lease_transitions.setter<def_stmt>lease_transitions self value:Optional[pulumi.Input[int]]<block_start>pulumi.set(self "lease_transitions" value)<block_end>@[email protected](name="renewTime")<def_stmt>renew_time self<arrow>Optional[pulumi.Input[str]]<block_start>""" renewTime is a time when the current holder of a lease has last updated the lease. """<line_sep><return>pulumi.get(self "renew_time")<block_end>@renew_time.setter<def_stmt>renew_time self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "renew_time" value)<block_end><block_end>@pulumi.input_type<class_stmt>LeaseArgs<block_start><def_stmt>__init__ __self__ * api_version:Optional[pulumi.Input[str]]=<none> kind:Optional[pulumi.Input[str]]=<none> metadata:Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]=<none> spec:Optional[pulumi.Input['LeaseSpecArgs']]=<none><block_start>""" Lease defines a lease concept. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input['LeaseSpecArgs'] spec: Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status """<if_stmt>api_version<is><not><none><block_start>pulumi.set(__self__ "api_version" 'coordination.k8s.io/v1')<block_end><if_stmt>kind<is><not><none><block_start>pulumi.set(__self__ "kind" 'Lease')<block_end><if_stmt>metadata<is><not><none><block_start>pulumi.set(__self__ "metadata" metadata)<block_end><if_stmt>spec<is><not><none><block_start>pulumi.set(__self__ "spec" spec)<block_end><block_end>@[email protected](name="apiVersion")<def_stmt>api_version self<arrow>Optional[pulumi.Input[str]]<block_start>""" APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources """<line_sep><return>pulumi.get(self "api_version")<block_end>@api_version.setter<def_stmt>api_version self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "api_version" value)<block_end>@[email protected]<def_stmt>kind self<arrow>Optional[pulumi.Input[str]]<block_start>""" Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds """<line_sep><return>pulumi.get(self "kind")<block_end>@kind.setter<def_stmt>kind self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "kind" value)<block_end>@[email protected]<def_stmt>metadata self<arrow>Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]<block_start>""" More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata """<line_sep><return>pulumi.get(self "metadata")<block_end>@metadata.setter<def_stmt>metadata self value:Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]<block_start>pulumi.set(self "metadata" value)<block_end>@[email protected]<def_stmt>spec self<arrow>Optional[pulumi.Input['LeaseSpecArgs']]<block_start>""" Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status """<line_sep><return>pulumi.get(self "spec")<block_end>@spec.setter<def_stmt>spec self value:Optional[pulumi.Input['LeaseSpecArgs']]<block_start>pulumi.set(self "spec" value)<block_end><block_end>
<import_from_stmt>lemur database<def_stmt>rotate_certificate endpoint new_cert<block_start>""" Rotates a certificate on a given endpoint. :param endpoint: :param new_cert: :return: """<line_sep># ensure that certificate is available for rotation endpoint.source.plugin.update_endpoint(endpoint new_cert)<line_sep>endpoint.certificate=new_cert<line_sep>database.update(endpoint)<block_end>
# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>oslo_versionedobjects fields<import_from_stmt>cinder objects<import_from_stmt>cinder.tests.unit fake_constants<as>fake<def_stmt>fake_db_group_snapshot **updates<block_start>db_group_snapshot={'id':fake.GROUP_SNAPSHOT_ID 'name':'group-1' 'status':'available' 'user_id':fake.USER_ID 'project_id':fake.PROJECT_ID 'group_type_id':fake.GROUP_TYPE_ID 'group_id':fake.GROUP_ID }<for_stmt>name,field objects.GroupSnapshot.fields.items()<block_start><if_stmt>name<in>db_group_snapshot<block_start><continue><block_end><if_stmt>field.nullable<block_start>db_group_snapshot[name]=<none><block_end><elif_stmt>field.default<ne>fields.UnspecifiedDefault<block_start>db_group_snapshot[name]=field.default<block_end><else_stmt><block_start><raise>Exception('fake_db_group_snapshot needs help with %s.'%name)<block_end><block_end><if_stmt>updates<block_start>db_group_snapshot.update(updates)<block_end><return>db_group_snapshot<block_end><def_stmt>fake_group_snapshot_obj context **updates<block_start><return>objects.GroupSnapshot._from_db_object(context objects.GroupSnapshot() fake_db_group_snapshot(**updates))<block_end>
<import_stmt>network<def_stmt>conncb task<block_start>print("[{}] Connected".format(task))<block_end><def_stmt>disconncb task<block_start>print("[{}] Disconnected".format(task))<block_end><def_stmt>subscb task<block_start>print("[{}] Subscribed".format(task))<block_end><def_stmt>pubcb pub<block_start>print("[{}] Published: {}".format(pub[0] pub[1]))<block_end><def_stmt>datacb msg<block_start>print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0] msg[1]) msg[2])<block_end>mqtt=network.mqtt("loboris" "mqtt://loboris.eu" user="wifimcu" password="<PASSWORD>" cleansession=<true> connected_cb=conncb disconnected_cb=disconncb subscribed_cb=subscb published_cb=pubcb data_cb=datacb)<line_sep># secure connection requires more memory and may not work # mqtts = network.mqtt("eclipse", "mqtts//iot.eclipse.org", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb) # wsmqtt = network.mqtt("eclipse", "ws://iot.eclipse.org:80/ws", cleansession=True, data_cb=datacb) mqtt.start()<line_sep>#mqtt.config(lwt_topic='status', lwt_msg='Disconected') ''' # Wait until status is: (1, 'Connected') mqtt.subscribe('test') mqtt.publish('test', 'Hi from Micropython') mqtt.stop() '''<line_sep># ================== # ThingSpeak example # ================== <import_stmt>network<def_stmt>datacb msg<block_start>print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0] msg[1]) msg[2])<block_end>thing=network.mqtt("thingspeak" "mqtt://mqtt.thingspeak.com" user="anyName" password="<PASSWORD>" cleansession=<true> data_cb=datacb)<line_sep># or secure connection #thing = network.mqtt("thingspeak", "mqtts://mqtt.thingspeak.com", user="anyName", password="<PASSWORD>", cleansession=True, data_cb=datacb) thingspeakChannelId="123456"# enter Thingspeak Channel ID thingspeakChannelWriteApiKey="ThingspeakWriteAPIKey"# EDIT - enter Thingspeak Write API Key thingspeakFieldNo=1<line_sep>thingSpeakChanelFormat="json"<line_sep>pubchan="channels/{:s}/publish/{:s}".format(thingspeakChannelId thingspeakChannelWriteApiKey)<line_sep>pubfield="channels/{:s}/publish/fields/field{}/{:s}".format(thingspeakChannelId thingspeakFieldNo thingspeakChannelWriteApiKey)<line_sep>subchan="channels/{:s}/subscribe/{:s}/{:s}".format(thingspeakChannelId thingSpeakChanelFormat thingspeakChannelWriteApiKey)<line_sep>subfield="channels/{:s}/subscribe/fields/field{}/{:s}".format(thingspeakChannelId thingspeakFieldNo thingspeakChannelWriteApiKey)<line_sep>thing.start()<line_sep>tmo=0<while_stmt>thing.status()[0]<ne>2<block_start>utime.sleep_ms(100)<line_sep>tmo<augadd>1<if_stmt>tmo<g>80<block_start>print("Not connected")<line_sep><break><block_end><block_end># subscribe to channel thing.subscribe(subchan)<line_sep># subscribe to field thing.subscribe(subfield)<line_sep># publish to channel # Payload can include any of those fields separated b< ';': # "field1=value;field2=value;...;field8=value;latitude=value;longitude=value;elevation=value;status=value" thing.publish(pubchan "field1=25.2;status=On line")<line_sep># Publish to field thing.publish(pubfield "24.5")<line_sep>
# Copyright (c) 2020. <NAME>, <EMAIL> <import_stmt>os open3d numpy<as>np<line_sep>File_=open('ModelNet_flist_short.txt' 'w')<if_stmt>__name__<eq>"__main__"<block_start>root_dir="../data/ModelNet_subset/"<for_stmt>root,dirs,files os.walk(root_dir topdown=<false>)<block_start><for_stmt>file files<block_start><if_stmt>'.ply'<in>file<block_start>amesh=open3d.io.read_triangle_mesh(os.path.join(root file))<line_sep>out_file_name=os.path.join(root file).replace('.ply' '_normalised.obj')<line_sep>center=amesh.get_center()<line_sep>amesh.translate(-center)<line_sep>maxR=(np.asarray(amesh.vertices)<power>2).sum(axis=1).max()<power>(1/2)<line_sep># we found divided by (2*maxR) has best rendered visualisation results amesh.scale(1/(2<times>maxR))<line_sep>open3d.io.write_triangle_mesh(out_file_name amesh)<line_sep>File_.writelines(out_file_name.replace('.obj' '').replace(root_dir '')+'\n')<line_sep>print(out_file_name)<block_end><block_end><block_end><block_end>
########################################################################## # # Copyright (c) 2013, <NAME>. All rights reserved. # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of <NAME> nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## <import_stmt>weakref<import_stmt>imath<import_stmt>Gaffer<import_stmt>GafferUI<class_stmt>ColorSwatchPlugValueWidget(GafferUI.PlugValueWidget)<block_start><def_stmt>__init__ self plugs **kw<block_start>self.__swatch=GafferUI.ColorSwatch()<line_sep>GafferUI.PlugValueWidget.__init__(self self.__swatch plugs **kw)<line_sep>## \todo How do set maximum height with a public API? self.__swatch._qtWidget().setMaximumHeight(20)<line_sep>self._addPopupMenu(self.__swatch)<line_sep>self.__swatch.buttonPressSignal().connect(Gaffer.WeakMethod(self.__buttonPress) scoped=<false>)<line_sep>self.__swatch.dragBeginSignal().connect(Gaffer.WeakMethod(self.__dragBegin) scoped=<false>)<line_sep>self.__swatch.dragEndSignal().connect(Gaffer.WeakMethod(self.__dragEnd) scoped=<false>)<line_sep>self.__swatch.buttonReleaseSignal().connect(Gaffer.WeakMethod(self.__buttonRelease) scoped=<false>)<line_sep>self._updateFromPlugs()<block_end><def_stmt>setHighlighted self highlighted<block_start>GafferUI.PlugValueWidget.setHighlighted(self highlighted)<line_sep>self.__swatch.setHighlighted(highlighted)<block_end><def_stmt>_updateFromPlugs self<block_start><with_stmt>self.getContext()<block_start>value=_colorFromPlugs(self.getPlugs())<block_end>self.__swatch.setColor(value)<block_end><def_stmt>__buttonPress self widget event<block_start><if_stmt>event.buttons<eq>event.Buttons.Left<block_start><return><true><block_end><return><false><block_end><def_stmt>__dragBegin self widget event<block_start>GafferUI.Pointer.setCurrent("rgba")<line_sep><return>self.__swatch.getColor()<block_end><def_stmt>__dragEnd self widget event<block_start>GafferUI.Pointer.setCurrent(<none>)<block_end><def_stmt>__buttonRelease self widget event<block_start><if_stmt>event.button<ne>event.Buttons.Left<block_start><return><false><block_end><if_stmt><not>self._editable()<block_start><return><false><block_end>_ColorPlugValueDialogue.acquire(self.getPlugs())<line_sep><return><true><block_end><block_end><def_stmt>_colorFromPlugs plugs<block_start><if_stmt><not>len(plugs)<block_start><return>imath.Color4f(0)<block_end># ColorSwatch only supports one colour, and doesn't have # an "indeterminate" state, so when we have multiple plugs # the best we can do is take an average. <return>sum(p.getValue()<for>p plugs)/len(plugs)<block_end>## \todo Perhaps we could make this a part of the public API? Perhaps we could also make a # PlugValueDialogue base class to share some of the work with the dialogue made by the # SplinePlugValueWidget. Or perhaps the `acquire()` here and `NodeSetEditor.acquire()` should # actually be functionality of CompoundEditor? <class_stmt>_ColorPlugValueDialogue(GafferUI.ColorChooserDialogue)<block_start><def_stmt>__init__ self plugs parentWindow<block_start>GafferUI.ColorChooserDialogue.__init__(self color=_colorFromPlugs(plugs))<line_sep># we use these to decide which actions to merge into a single undo self.__lastChangedReason=<none><line_sep>self.__mergeGroupId=0<line_sep>self.__colorChangedConnection=self.colorChooser().colorChangedSignal().connect(Gaffer.WeakMethod(self.__colorChanged) scoped=<false>)<line_sep>self.confirmButton.clickedSignal().connect(Gaffer.WeakMethod(self.__buttonClicked) scoped=<false>)<line_sep>self.cancelButton.clickedSignal().connect(Gaffer.WeakMethod(self.__buttonClicked) scoped=<false>)<line_sep>self.__plugs=plugs<line_sep>self.__initialValues={p:p.getValue()<for>p self.__plugs}<line_sep>nodes={p.node()<for>p self.__plugs}<line_sep>self.__plugSetConnections=[n.plugSetSignal().connect(Gaffer.WeakMethod(self.__plugSet) scoped=<false>)<for>n nodes]<for_stmt>node nodes<block_start>node.parentChangedSignal().connect(Gaffer.WeakMethod(self.__destroy) scoped=<false>)<block_end>plug=next(iter(self.__plugs))<if_stmt>len(self.__plugs)<eq>1<block_start>self.setTitle(plug.relativeName(plug.ancestor(Gaffer.ScriptNode)))<block_end><else_stmt><block_start>self.setTitle("{} plugs".format(len(self.__plugs)))<block_end>self.__plugSet(plug)<line_sep>parentWindow.addChildWindow(self removeOnClose=<true>)<block_end>@classmethod<def_stmt>acquire cls plugs<block_start>plug=next(iter(plugs))<line_sep>script=plug.node().scriptNode()<line_sep>scriptWindow=GafferUI.ScriptWindow.acquire(script)<for_stmt>window scriptWindow.childWindows()<block_start><if_stmt>isinstance(window cls)<and>window.__plugs<eq>plugs<block_start>window.setVisible(<true>)<line_sep><return>window<block_end><block_end>window=_ColorPlugValueDialogue(plugs scriptWindow)<line_sep>window.setVisible(<true>)<line_sep><return><false><block_end><def_stmt>__plugSet self plug<block_start><if_stmt>plug<in>self.__plugs<block_start><with_stmt>Gaffer.BlockedConnection(self.__colorChangedConnection)<block_start>self.colorChooser().setColor(_colorFromPlugs(self.__plugs))<block_end><block_end><block_end><def_stmt>__colorChanged self colorChooser reason<block_start><if_stmt><not>GafferUI.ColorChooser.changesShouldBeMerged(self.__lastChangedReason reason)<block_start>self.__mergeGroupId<augadd>1<block_end>self.__lastChangedReason=reason<with_stmt>Gaffer.UndoScope(next(iter(self.__plugs)).ancestor(Gaffer.ScriptNode) mergeGroup="ColorPlugValueDialogue%d%d"%(id(self ) self.__mergeGroupId))<block_start><with_stmt>Gaffer.BlockedConnection(self.__plugSetConnections)<block_start><for_stmt>plug self.__plugs<block_start>plug.setValue(self.colorChooser().getColor())<block_end><block_end><block_end><block_end><def_stmt>__buttonClicked self button<block_start><if_stmt>button<is>self.cancelButton<block_start><with_stmt>Gaffer.UndoScope(next(iter(self.__plugs)).ancestor(Gaffer.ScriptNode))<block_start><for_stmt>p,v self.__initialValues.items()<block_start>p.setValue(v)<block_end><block_end><block_end>self.parent().removeChild(self)<line_sep># Workaround for https://bugreports.qt-project.org/browse/QTBUG-26761. <assert_stmt>(<not>self.visible())<line_sep>GafferUI.WidgetAlgo.keepUntilIdle(self)<block_end><def_stmt>__destroy self *unused<block_start>self.parent().removeChild(self)<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Import Local Modules <import_stmt>marvin<import_from_stmt>nose.plugins.attrib attr<import_from_stmt>marvin.cloudstackTestCase cloudstackTestCase<import_stmt>unittest<import_from_stmt>marvin.cloudstackAPI *<import_from_stmt>marvin.lib.utils *<import_from_stmt>marvin.lib.base *<import_from_stmt>marvin.lib.common *<import_from_stmt>marvin.codes PASS FAILED SUCCESS XEN_SERVER<import_from_stmt>marvin.sshClient SshClient<import_stmt>requests<line_sep>requests.packages.urllib3.disable_warnings()<import_stmt>random<import_stmt>string<import_stmt>telnetlib<import_stmt>os<import_stmt>urllib.request urllib.parse urllib.error<import_stmt>time<import_stmt>tempfile<line_sep>_multiprocess_shared_=<true><class_stmt>TestBrowseUploadTemplate(cloudstackTestCase)<block_start>""" Tests for browser based upload template feature. Once all issues in test_browse_templates.py are fixed, this should be merged back """<line_sep>@classmethod<def_stmt>setUpClass cls<block_start>cls.testClient=super(TestBrowseUploadTemplate cls).getClsTestClient()<line_sep>cls.testdata=cls.testClient.getParsedTestDataConfig()<line_sep>cls.apiclient=cls.testClient.getApiClient()<line_sep>cls.hypervisor=cls.testClient.getHypervisorInfo()<line_sep>cls._cleanup=[]<line_sep>cls.cleanup=[]<line_sep>hosts=list_hosts(cls.apiclient type="Routing")<if_stmt>hosts<is><none><block_start>cls.SkipTest("There are no hypervisor's available. Check list hosts response")<block_end>cls.uploadtemplateformat="VHD"<line_sep>cls.templatename="test"<line_sep>cls.templatehypervisor="XenServer"<line_sep>cls.templateostypeid=142<line_sep>cls.zone=get_zone(cls.apiclient cls.testClient.getZoneForTests())<line_sep>cls.domain=get_domain(cls.apiclient)<line_sep>cls.pod=get_pod(cls.apiclient cls.zone.id)<line_sep>cls.account=Account.create(cls.apiclient cls.testdata["account"] domainid=cls.domain.id)<line_sep>cls._cleanup=[cls.account]<block_end><def_stmt>waitForSystemVMAgent self vmname<block_start>timeout=self.testdata["timeout"]<while_stmt><true><block_start>list_host_response=list_hosts(self.apiclient name=vmname)<if_stmt>list_host_response<and>list_host_response[0].state<eq>'Up'<block_start><break><block_end><if_stmt>timeout<eq>0<block_start><raise>Exception("Timed out waiting for SSVM agent to be Up")<block_end>time.sleep(self.testdata["sleep"])<line_sep>timeout=timeout-1<block_end><block_end><def_stmt>destroy_ssvm self<block_start>list_ssvm_response=list_ssvms(self.apiclient systemvmtype='secondarystoragevm' state='Running' zoneid=self.zone.id)<line_sep>self.assertEqual(isinstance(list_ssvm_response list) <true> "Check list response returns a valid list")<line_sep>ssvm_response=list_ssvm_response[0]<line_sep>old_name=ssvm_response.name<line_sep>self.debug("Destroying SSVM: %s"%ssvm_response.id)<line_sep>cmd=destroySystemVm.destroySystemVmCmd()<line_sep>cmd.id=ssvm_response.id<line_sep>self.apiclient.destroySystemVm(cmd)<line_sep>timeout=self.testdata["timeout"]<while_stmt><true><block_start>list_ssvm_response=list_ssvms(self.apiclient zoneid=self.zone.id systemvmtype='secondarystoragevm')<if_stmt>isinstance(list_ssvm_response list)<block_start><if_stmt>list_ssvm_response[0].state<eq>'Running'<block_start><break><block_end><block_end><if_stmt>timeout<eq>0<block_start><raise>Exception("List SSVM call failed!")<block_end>time.sleep(self.testdata["sleep"])<line_sep>timeout=timeout-1<block_end>ssvm_response=list_ssvm_response[0]<line_sep># Verify Name, Public IP, Private IP and Link local IP # for newly created SSVM self.assertNotEqual(ssvm_response.name old_name "Check SSVM new name with name of destroyed SSVM")<line_sep>self.assertEqual(hasattr(ssvm_response 'privateip') <true> "Check whether SSVM has private IP field")<line_sep>self.assertEqual(hasattr(ssvm_response 'linklocalip') <true> "Check whether SSVM has link local IP field")<line_sep>self.assertEqual(hasattr(ssvm_response 'publicip') <true> "Check whether SSVM has public IP field")<line_sep># Wait for the agent to be up self.waitForSystemVMAgent(ssvm_response.name)<line_sep><return><block_end>@attr(tags=["advanced" "advancedns" "smoke" "basic"] required_hardware="false")<def_stmt>test_browser_upload_template_incomplete self<block_start>""" Test browser based incomplete template upload, followed by SSVM destroy. Template should go to UploadAbandoned state and get cleaned up. """<try_stmt><block_start>self.debug("========================= Test browser based incomplete template upload ========================")<line_sep>#Only register template, without uploading cmd=getUploadParamsForTemplate.getUploadParamsForTemplateCmd()<line_sep>cmd.zoneid=self.zone.id<line_sep>cmd.format=self.uploadtemplateformat<line_sep>cmd.name=self.templatename+self.account.name+(random.choice(string.ascii_uppercase))<line_sep>cmd.account=self.account.name<line_sep>cmd.domainid=self.domain.id<line_sep>cmd.displaytext=cmd.name<line_sep>cmd.hypervisor=self.templatehypervisor<line_sep>cmd.ostypeid=self.templateostypeid<line_sep>template_response=self.apiclient.getUploadParamsForTemplate(cmd)<line_sep>#Destroy SSVM, and wait for new one to start self.destroy_ssvm()<line_sep>wait_for_cleanup(self.apiclient ["expunge.delay" "expunge.interval"])<line_sep>#Verify that the template is cleaned up as part of sync-up during new SSVM start list_template_response=Template.list(self.apiclient id=template_response.id templatefilter="all" zoneid=self.zone.id)<line_sep>self.assertEqual(list_template_response <none> "Template is not cleaned up, some issue with template sync-up")<block_end><except_stmt>Exception<as>e<block_start>self.fail("Exception occurred : %s"%e)<block_end><return><block_end>@classmethod<def_stmt>tearDownClass self<block_start><try_stmt><block_start>self.apiclient=super(TestBrowseUploadTemplate self).getClsTestClient().getApiClient()<line_sep>cleanup_resources(self.apiclient self._cleanup)<block_end><except_stmt>Exception<as>e<block_start><raise>Exception("Warning: Exception during cleanup : %s"%e)<block_end><return><block_end><block_end>
<import_from_stmt>.provider Provider<import_from_stmt>.adapter Adapter<import_from_stmt>.device Device<import_from_stmt>.gatt GattService GattCharacteristic GattDescriptor<line_sep>
# Copyright 2021 TUNiB inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>contextlib suppress<import_from_stmt>typing List Union<import_from_stmt>torch nn<import_from_stmt>parallelformers.policies.base Policy<class_stmt>AutoPolicy<block_start>"""Class for finds automatically appropriate policies for the current model"""<def_stmt>__init__ self<block_start>self.builtin_policies={}<with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.gpt_neo.modeling_gpt_neo GPTNeoPreTrainedModel <import_from_stmt>parallelformers.policies.gpt_neo GPTNeoPolicy<line_sep>self.builtin_policies[GPTNeoPreTrainedModel]=[GPTNeoPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.bert.modeling_bert BertPreTrainedModel <import_from_stmt>parallelformers.policies.bert BertPolicy<line_sep>self.builtin_policies[BertPreTrainedModel]=[BertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.bart.modeling_bart BartPretrainedModel <import_from_stmt>parallelformers.policies.bart BartDecoderPolicy BartEncoderPolicy <line_sep>self.builtin_policies[BartPretrainedModel]=[BartEncoderPolicy BartDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.blenderbot.modeling_blenderbot BlenderbotPreTrainedModel <import_from_stmt>parallelformers.policies.blenderbot BlenderbotDecoderPolicy BlenderbotEncoderPolicy <line_sep>self.builtin_policies[BlenderbotPreTrainedModel]=[BlenderbotEncoderPolicy BlenderbotDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.deberta.modeling_deberta DebertaPreTrainedModel <import_from_stmt>parallelformers.policies.deberta DebertaPolicy<line_sep>self.builtin_policies[DebertaPreTrainedModel]=[DebertaPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.transfo_xl.modeling_transfo_xl TransfoXLPreTrainedModel <import_from_stmt>parallelformers.policies.transfo_xl TransfoXLPolicy<line_sep>self.builtin_policies[TransfoXLPreTrainedModel]=[TransfoXLPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.roberta.modeling_roberta RobertaPreTrainedModel <import_from_stmt>parallelformers.policies.roberta RobertaPolicy<line_sep>self.builtin_policies[RobertaPreTrainedModel]=[RobertaPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.albert.modeling_albert AlbertPreTrainedModel <import_from_stmt>parallelformers.policies.albert AlbertPolicy<line_sep>self.builtin_policies[AlbertPreTrainedModel]=[AlbertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.gpt2.modeling_gpt2 GPT2PreTrainedModel <import_from_stmt>parallelformers.policies.gpt2 GPT2Policy<line_sep>self.builtin_policies[GPT2PreTrainedModel]=[GPT2Policy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.ctrl.modeling_ctrl CTRLPreTrainedModel <import_from_stmt>parallelformers.policies.ctrl CTRLPolicy<line_sep>self.builtin_policies[CTRLPreTrainedModel]=[CTRLPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.deberta_v2.modeling_deberta_v2 DebertaV2PreTrainedModel <import_from_stmt>parallelformers.policies.deberta_v2 DebertaV2Policy<line_sep>self.builtin_policies[DebertaV2PreTrainedModel]=[DebertaV2Policy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.openai.modeling_openai OpenAIGPTPreTrainedModel <import_from_stmt>parallelformers.policies.openai OpenAIGPTPolicy<line_sep>self.builtin_policies[OpenAIGPTPreTrainedModel]=[OpenAIGPTPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.electra.modeling_electra ElectraPreTrainedModel <import_from_stmt>parallelformers.policies.electra ElectraPolicy<line_sep>self.builtin_policies[ElectraPreTrainedModel]=[ElectraPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.blenderbot_small.modeling_blenderbot_small BlenderbotSmallPreTrainedModel <import_from_stmt>parallelformers.policies.blenderbot_small BlenderbotSmallDecoderPolicy BlenderbotSmallEncoderPolicy <line_sep>self.builtin_policies[BlenderbotSmallPreTrainedModel]=[BlenderbotSmallEncoderPolicy BlenderbotSmallDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.distilbert.modeling_distilbert DistilBertPreTrainedModel <import_from_stmt>parallelformers.policies.distil_bert DistilBertPolicy<line_sep>self.builtin_policies[DistilBertPreTrainedModel]=[DistilBertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.convbert.modeling_convbert ConvBertPreTrainedModel <import_from_stmt>parallelformers.policies.convbert ConvBertPolicy<line_sep>self.builtin_policies[ConvBertPreTrainedModel]=[ConvBertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.bert_generation.modeling_bert_generation BertGenerationPreTrainedModel <import_from_stmt>parallelformers.policies.bert BertPolicy<line_sep>self.builtin_policies[BertGenerationPreTrainedModel]=[BertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.big_bird.modeling_big_bird BigBirdPreTrainedModel <import_from_stmt>parallelformers.policies.bigbird BigBirdPolicy<line_sep>self.builtin_policies[BigBirdPreTrainedModel]=[BigBirdPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.bigbird_pegasus.modeling_bigbird_pegasus BigBirdPegasusPreTrainedModel <import_from_stmt>parallelformers.policies.bigbird_pegasus BigBirdPegasusDecoderPolicy BigBirdPegasusEncoderPolicy <line_sep>self.builtin_policies[BigBirdPegasusPreTrainedModel]=[BigBirdPegasusEncoderPolicy BigBirdPegasusDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.vit.modeling_vit ViTPreTrainedModel<import_from_stmt>parallelformers.policies.vit ViTPolicy<line_sep>self.builtin_policies[ViTPreTrainedModel]=[ViTPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.deit.modeling_deit DeiTPreTrainedModel <import_from_stmt>parallelformers.policies.deit DeiTPolicy<line_sep>self.builtin_policies[DeiTPreTrainedModel]=[DeiTPolicy]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.mbart.modeling_mbart MBartPreTrainedModel <import_from_stmt>parallelformers.policies.mbart MBartDecoderPolicy MBartEncoderPolicy <line_sep>self.builtin_policies[MBartPreTrainedModel]=[MBartEncoderPolicy MBartDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.t5.modeling_t5 T5PreTrainedModel<import_from_stmt>parallelformers.policies.t5 T5Policy<line_sep>self.builtin_policies[T5PreTrainedModel]=[T5Policy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.pegasus.modeling_pegasus PegasusPreTrainedModel <import_from_stmt>parallelformers.policies.pegasus PegasusDecoderPolicy PegasusEncoderPolicy <line_sep>self.builtin_policies[PegasusPreTrainedModel]=[PegasusEncoderPolicy PegasusDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.fsmt.modeling_fsmt PretrainedFSMTModel <import_from_stmt>parallelformers.policies.fsmt FSMTDecoderPolicy FSMTEncoderPolicy <line_sep>self.builtin_policies[PretrainedFSMTModel]=[FSMTEncoderPolicy FSMTDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.xlm.modeling_xlm XLMPreTrainedModel<import_from_stmt>parallelformers.policies.xlm XLMAttentionPolicy XLMMLPPolicy <line_sep>self.builtin_policies[XLMPreTrainedModel]=[XLMAttentionPolicy XLMMLPPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.m2m_100.modeling_m2m_100 M2M100PreTrainedModel <import_from_stmt>parallelformers.policies.m2m_100 M2M100DecoderPolicy M2M100EncoderPolicy <line_sep>self.builtin_policies[M2M100PreTrainedModel]=[M2M100EncoderPolicy M2M100DecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.marian.modeling_marian MarianPreTrainedModel <import_from_stmt>parallelformers.policies.marian MarianDecoderPolicy MarianEncoderPolicy <line_sep>self.builtin_policies[MarianPreTrainedModel]=[MarianEncoderPolicy MarianDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.mobilebert.modeling_mobilebert MobileBertPreTrainedModel <import_from_stmt>parallelformers.policies.mobilebert MobileBertPolicy<line_sep>self.builtin_policies[MobileBertPreTrainedModel]=[MobileBertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.mpnet.modeling_mpnet MPNetPreTrainedModel <import_from_stmt>parallelformers.policies.mpnet MPNetEncoderPolicy MPNetLayerPolicy <line_sep>self.builtin_policies[MPNetPreTrainedModel]=[MPNetEncoderPolicy MPNetLayerPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.luke.modeling_luke LukePreTrainedModel <import_from_stmt>parallelformers.policies.luke LukePolicy<line_sep>self.builtin_policies[LukePreTrainedModel]=[LukePolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.dpr.modeling_dpr DPRPretrainedContextEncoder DPRPretrainedQuestionEncoder DPRPretrainedReader <line_sep>self.builtin_policies[DPRPretrainedReader]=[BertPolicy ]<line_sep>self.builtin_policies[DPRPretrainedQuestionEncoder]=[BertPolicy ]<line_sep>self.builtin_policies[DPRPretrainedContextEncoder]=[BertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.lxmert.modeling_lxmert LxmertPreTrainedModel <import_from_stmt>parallelformers.policies.lxmert LxmertPolicy<line_sep>self.builtin_policies[LxmertPreTrainedModel]=[LxmertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.hubert.modeling_hubert HubertPreTrainedModel <import_from_stmt>parallelformers.policies.hubert HubertPolicy<line_sep>self.builtin_policies[HubertPreTrainedModel]=[HubertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.wav2vec2.modeling_wav2vec2 Wav2Vec2PreTrainedModel <import_from_stmt>parallelformers.policies.wav2vec Wav2VecPolicy<line_sep>self.builtin_policies[Wav2Vec2PreTrainedModel]=[Wav2VecPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.xlnet.modeling_xlnet XLNetPreTrainedModel <import_from_stmt>parallelformers.policies.xlnet XLNetPolicy<line_sep>self.builtin_policies[XLNetPreTrainedModel]=[XLNetPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.retribert.modeling_retribert RetriBertPreTrainedModel <line_sep>self.builtin_policies[RetriBertPreTrainedModel]=[BertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.clip.modeling_clip CLIPPreTrainedModel <import_from_stmt>parallelformers.policies.clip CLIPLayerPolicy CLIPTextPolicy CLIPVisionPolicy <line_sep>self.builtin_policies[CLIPPreTrainedModel]=[CLIPLayerPolicy CLIPTextPolicy CLIPVisionPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.detr.modeling_detr DetrPreTrainedModel <import_from_stmt>parallelformers.policies.detr DetrDecoderPolicy DetrEncoderPolicy <line_sep>self.builtin_policies[DetrPreTrainedModel]=[DetrEncoderPolicy DetrDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.reformer.modeling_reformer ReformerPreTrainedModel <import_from_stmt>parallelformers.policies.reformer ReformerPolicy<line_sep>self.builtin_policies[ReformerPreTrainedModel]=[ReformerPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.longformer.modeling_longformer LongformerPreTrainedModel <import_from_stmt>parallelformers.policies.longformer LongformerPolicy<line_sep>self.builtin_policies[LongformerPreTrainedModel]=[LongformerPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.roformer.modeling_roformer RoFormerPreTrainedModel <import_from_stmt>parallelformers.policies.roformer RoformerPolicy<line_sep>self.builtin_policies[RoFormerPreTrainedModel]=[RoformerPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.ibert.modeling_ibert IBertPreTrainedModel <import_from_stmt>parallelformers.policies.ibert IBertPolicy<line_sep>self.builtin_policies[IBertPreTrainedModel]=[IBertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.tapas.modeling_tapas TapasPreTrainedModel <import_from_stmt>parallelformers.policies.tapas TapasPolicy<line_sep>self.builtin_policies[TapasPreTrainedModel]=[TapasPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.funnel.modeling_funnel FunnelPreTrainedModel <import_from_stmt>parallelformers.policies.funnel FunnelPolicy<line_sep>self.builtin_policies[FunnelPreTrainedModel]=[FunnelPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.layoutlm.modeling_layoutlm LayoutLMPreTrainedModel <import_from_stmt>parallelformers.policies.layoutlm LayoutLMPolicy<line_sep>self.builtin_policies[LayoutLMPreTrainedModel]=[LayoutLMPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.led.modeling_led LEDPreTrainedModel<import_from_stmt>parallelformers.policies.led LEDDecoderPolicy LEDEncoderPolicy <line_sep>self.builtin_policies[LEDPreTrainedModel]=[LEDEncoderPolicy LEDDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.prophetnet.modeling_prophetnet ProphetNetPreTrainedModel <import_from_stmt>parallelformers.policies.prophetnet ProphetNetDecoderPolicy ProphetNetEncoderPolicy <line_sep>self.builtin_policies[ProphetNetPreTrainedModel]=[ProphetNetEncoderPolicy ProphetNetDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.visual_bert.modeling_visual_bert VisualBertPreTrainedModel <import_from_stmt>parallelformers.policies.visual_bert VisualBertPolicy<line_sep>self.builtin_policies[VisualBertPreTrainedModel]=[VisualBertPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.speech_to_text.modeling_speech_to_text Speech2TextPreTrainedModel <import_from_stmt>parallelformers.policies.speech_to_text Speech2TextDecoderPolicy Speech2TextEncoderPolicy <line_sep>self.builtin_policies[Speech2TextPreTrainedModel]=[Speech2TextEncoderPolicy Speech2TextDecoderPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.gptj.modeling_gptj GPTJPreTrainedModel <import_from_stmt>parallelformers.policies.gptj GPTJPolicy<line_sep>self.builtin_policies[GPTJPreTrainedModel]=[GPTJPolicy ]<block_end><with_stmt>suppress(Exception)<block_start><import_from_stmt>transformers.models.megatron_bert MegatronBertPreTrainedModel <import_from_stmt>parallelformers.policies.megtron_bert MegatronBertPolicy <line_sep>self.builtin_policies[MegatronBertPreTrainedModel]=[MegatronBertPolicy ]<block_end><block_end><def_stmt>get_policy self model:nn.Module<arrow>Union[List[Policy] <none>]<block_start>""" Find appropriate policies for the current model Args: model (nn.Module): model to parallelize Returns: Union[List[Policy], None]: appropriate policies or none """<for_stmt>k,v self.available().items()<block_start><if_stmt>isinstance(model k)<block_start><return>v<block_end><block_end><return><none><block_end><def_stmt>available self<block_start>"""Dictionary of available models and policies"""<line_sep><return>self.builtin_policies<block_end><block_end>
<import_from_stmt>.core EqualityHashKey unzip<import_from_stmt>.parallel fold<line_sep>
# Original work Copyright 2018 The Google AI Language Team Authors. # Modified work Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_from_stmt>lm.modeling model_fn_builder GroverConfig<import_stmt>tensorflow<as>tf<import_from_stmt>lm.dataloader input_fn_builder<import_stmt>numpy<as>np<import_stmt>tempfile<import_stmt>h5py<import_from_stmt>google.cloud storage<line_sep>flags=tf.flags<line_sep>FLAGS=flags.FLAGS<line_sep>## Required parameters flags.DEFINE_string("config_file" 'configs/base.json' "The config json file corresponding to the pre-trained news model. "<concat>"This specifies the model architecture.")<line_sep>flags.DEFINE_string("input_file" <none> "Input TF example files (can be a glob or comma separated).")<line_sep>flags.DEFINE_string("output_dir" <none> "The output directory where the model checkpoints will be written.")<line_sep>flags.DEFINE_string("validation_name" 'preds.h5' "Name to use")<line_sep>## Other parameters flags.DEFINE_string("init_checkpoint" <none> "Initial checkpoint (usually from a pre-trained model).")<line_sep>flags.DEFINE_integer("max_seq_length" 1024 "The maximum total input sequence length after WordPiece tokenization. "<concat>"Sequences longer than this will be truncated, and sequences shorter "<concat>"than this will be padded. Must match data generation.")<line_sep>flags.DEFINE_integer("iterations_per_loop" 1000 "How many steps to make in each estimator call.")<line_sep>flags.DEFINE_integer("batch_size" 32 "Batch size used for eval")<line_sep>flags.DEFINE_bool("use_tpu" <false> "Whether to use TPU or GPU/CPU.")<line_sep>flags.DEFINE_string("tpu_name" <none> "The Cloud TPU to use for training. This should be either the name "<concat>"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "<concat>"url.")<line_sep>flags.DEFINE_string("tpu_zone" <none> "[Optional] GCE zone where the Cloud TPU is located in. If not "<concat>"specified, we will attempt to automatically detect the GCE project from "<concat>"metadata.")<line_sep>flags.DEFINE_string("gcp_project" <none> "[Optional] Project name for the Cloud TPU-enabled project. If not "<concat>"specified, we will attempt to automatically detect the GCE project from "<concat>"metadata.")<line_sep>flags.DEFINE_string("master" <none> "[Optional] TensorFlow master URL.")<line_sep>flags.DEFINE_integer("num_tpu_cores" 8 "Only used if `use_tpu` is True. Total number of TPU cores to use.")<line_sep># This is a handy little utility so that we can save the perplexities to TPU <class_stmt>gcloudwriter()<block_start><def_stmt>__init__ self gcloud_name<block_start><assert_stmt>gcloud_name.startswith('gs://')<line_sep>self.gcloud_name=gcloud_name<line_sep>bucket_name,blob_name=gcloud_name.split('gs://')[1].split('/' 1)<line_sep>bucket=storage.Client().get_bucket(bucket_name)<line_sep>self.blob=bucket.blob(blob_name)<block_end><def_stmt>__enter__ self<block_start>self.tempfile=tempfile.NamedTemporaryFile()<line_sep><return>self.tempfile<block_end><def_stmt>__exit__ self *args<block_start>self.tempfile.flush()<line_sep>print("UPLOADING TO {}".format(self.gcloud_name) flush=<true>)<line_sep>self.blob.upload_from_filename(self.tempfile.name)<line_sep>self.tempfile.close()<block_end><block_end><def_stmt>ind_where array:np.ndarray target return_first_match=<true> default_value=-1<block_start>""" :param array: Single dimension array :param target: target to search for :param return_first_match: If true, return the first index that matches, otherwise, return the last one :param default_value: Index to return if there was no match :return: index of the first match, or -1 if nothing """<assert_stmt>array.ndim<eq>1<line_sep>matching_inds=np.where(array<eq>target)[0]<if_stmt>len(matching_inds)<g>0<block_start><if_stmt>return_first_match<block_start><return>int(matching_inds[0])<block_end><else_stmt><block_start><return>int(matching_inds[-1])<block_end><block_end><return>default_value<block_end><def_stmt>main _<block_start>tf.logging.set_verbosity(tf.logging.INFO)<line_sep>news_config=GroverConfig.from_json_file(FLAGS.config_file)<line_sep>tf.gfile.MakeDirs(FLAGS.output_dir)<line_sep>input_files=[]<for_stmt>input_pattern FLAGS.input_file.split(",")<block_start>input_files.extend(tf.gfile.Glob(input_pattern))<block_end>tf.logging.info("*** Input Files ***")<for_stmt>input_file input_files<block_start>tf.logging.info(" %s"%input_file)<block_end>tpu_cluster_resolver=<none><if_stmt>FLAGS.use_tpu<and>FLAGS.tpu_name<block_start>tpu_cluster_resolver=tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name zone=FLAGS.tpu_zone project=FLAGS.gcp_project)<block_end>is_per_host=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2<line_sep>run_config=tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver master=FLAGS.master model_dir=FLAGS.output_dir save_checkpoints_steps=FLAGS.iterations_per_loop keep_checkpoint_max=<none> tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop num_shards=FLAGS.num_tpu_cores per_host_input_for_training=is_per_host))<line_sep>model_fn=model_fn_builder(news_config init_checkpoint=FLAGS.init_checkpoint learning_rate=1e-4 num_train_steps=0 num_warmup_steps=0 use_tpu=FLAGS.use_tpu )<line_sep># If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator=tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu model_fn=model_fn config=run_config train_batch_size=FLAGS.batch_size eval_batch_size=FLAGS.batch_size predict_batch_size=FLAGS.batch_size params={'model_dir':FLAGS.output_dir})<line_sep>eval_input_fn=input_fn_builder(input_files=input_files seq_length=FLAGS.max_seq_length evaluate_for_fixed_number_of_steps=<false> num_cpu_threads=1 is_training=<false>)<line_sep>result=[x<for>x estimator.predict(input_fn=eval_input_fn yield_single_examples=<true>)]<line_sep>cats=sorted(result[0].keys())<line_sep>result_stack={cat:np.stack([x[cat]<for>x result])<for>cat cats}<with_stmt>gcloudwriter(os.path.join(FLAGS.output_dir FLAGS.validation_name))<as>tempfile_name<block_start><with_stmt>h5py.File(tempfile_name 'w')<as>h5<block_start><for_stmt>cat,data result_stack.items()<block_start>dtype2use=np.float16<if>cat.endswith(('logprobs' 'top_p_required'))<else>np.uint16<line_sep>h5.create_dataset(cat data=data.astype(dtype2use))<block_end>h5.create_dataset('model' data=FLAGS.config_file)<line_sep>h5.create_dataset('ckpt' data=FLAGS.init_checkpoint)<line_sep>h5.create_dataset('input_file' data=FLAGS.input_file)<block_end><block_end># This gives the perplexity of the entire article. if you want to replicate the results of the paper you # might need to do something different to extract the ppl of just the body in particular. ppl_ex=[]<for_stmt>logprobs_i,ids_i zip(result_stack['gt_logprobs'] result_stack['labels'])# Omit the first token. Keep in mind input_ids is shifted by 1 <block_start>start_ind=ind_where(ids_i target=50265 default_value=0)<line_sep>end_ind=ind_where(ids_i target=50266 default_value=ids_i.shape[0]-1)<line_sep>ppl_ex.append(logprobs_i[start_ind:end_ind])<block_end>ppl_ex=np.concatenate(ppl_ex 0)<line_sep>print("Article perplexity is {:.3f}".format(np.exp(-np.mean(ppl_ex))) flush=<true>)<block_end><if_stmt>__name__<eq>"__main__"<block_start>flags.mark_flag_as_required("input_file")<line_sep>flags.mark_flag_as_required("output_dir")<line_sep>tf.app.run()<block_end>
# -*- coding: utf-8 -*- # Thanks to @skelsec for his awesome tool Pypykatz # Checks his project here: https://github.com/skelsec/pypykatz <import_stmt>codecs<import_stmt>traceback<import_from_stmt>lazagne.config.module_info ModuleInfo<import_from_stmt>lazagne.config.constant constant<import_from_stmt>pypykatz.pypykatz pypykatz<class_stmt>Pypykatz(ModuleInfo)<block_start>""" Pypykatz dumps all secrets from the lsass.exe memory It does not work if: - LSASS is running as a protected process - A security product blocks this access """<def_stmt>__init__ self<block_start>ModuleInfo.__init__(self 'pypykatz' 'windows' system_module=<true>)<block_end><def_stmt>run self<block_start>mimi=<none><try_stmt><block_start>mimi=pypykatz.go_live()<block_end><except_stmt>Exception<block_start>self.debug(traceback.format_exc())<block_end><if_stmt>mimi<block_start>results={}<line_sep>logon_sessions=mimi.to_dict().get('logon_sessions' [])<for_stmt>logon_session logon_sessions# Right now kerberos_creds, dpapi_creds results are not used <block_start>user=logon_sessions[logon_session]<line_sep># Get cleartext password <for_stmt>i ['credman_creds' 'ssp_creds' 'livessp_creds' 'tspkg_creds' 'wdigest_creds']<block_start><for_stmt>data user.get(i [])<block_start><if_stmt>all((data['username'] data['password']))<block_start>login=data['username']<if_stmt>login<not><in>results<block_start>results[login]={}<block_end>results[login]['Type']=i<line_sep>results[login]['Domain']=data.get('domainname' 'N/A')<line_sep>results[login]['Password']=data['password']<block_end><block_end><block_end># msv_creds to get sha1 user hash <for_stmt>data user.get('msv_creds' [])<block_start><if_stmt>data['username']<block_start>login=data['username']<block_end><else_stmt><block_start>login=user['username']<block_end><if_stmt>login<not><in>results<block_start>results[login]={}<block_end><if_stmt>data['SHAHash']<block_start>results[login]['Shahash']=codecs.encode(data['SHAHash'] 'hex')<block_end><if_stmt>data['LMHash']<block_start>results[login]['Lmhash']=codecs.encode(data['LMHash'] 'hex')<block_end><if_stmt>data['NThash']<block_start>results[login]['Nthash']=codecs.encode(data['NThash'] 'hex')<block_end><block_end><block_end>constant.pypykatz_result=results<line_sep>pwd_found=[]<for_stmt>user results<block_start>results[user]['Login']=user<line_sep>pwd_found.append(results[user])<block_end><return>pwd_found<block_end><block_end><block_end>
# Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>datetime datetime<import_from_stmt>qf_lib.common.tickers.tickers Ticker<import_from_stmt>qf_lib.containers.dataframe.prices_dataframe PricesDataFrame<class_stmt>FutureContract(object)<block_start>""" Class representing a single future contract. The FutureContract is a simple class representing one futures contract. The FutureContract objects are used by the FuturesChain, in order to provide the contracts chaining possibilities. It requires 3 parameters: ticker, which is the symbol of the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the contract and a PricesDataFrame, containing dates with price field values. Parameters ---------- ticker: Ticker symbol of the future contract exp_date: datetime expiration date data: PricesDataFrame data frame containing dates with price fields values """<def_stmt>__init__ self ticker:Ticker exp_date:datetime data:PricesDataFrame<block_start>self.ticker=ticker<line_sep>self.exp_date=exp_date<line_sep>self.data=data<block_end><def_stmt>__str__ self<block_start><return>'Contract: ticker: {}, expiration date: {}'.format(self.ticker self.exp_date)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>self<is>other<block_start><return><true><block_end><if_stmt><not>isinstance(other FutureContract)<block_start><return><false><block_end><return>(self.ticker self.exp_date self.data)<eq>(other.ticker other.exp_date other.data)<block_end><def_stmt>__hash__ self<block_start><return>hash((self.ticker self.exp_date self.data))<block_end><block_end>
<import_stmt>operator_benchmark<as>op_bench<import_stmt>torch<import_stmt>numpy<import_from_stmt>. configs<line_sep>"""EmbeddingBag Operator Benchmark"""<class_stmt>EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase)<block_start><def_stmt>init self embeddingbags dim mode input_size offset sparse include_last_offset device<block_start>self.embedding=torch.nn.EmbeddingBag(num_embeddings=embeddingbags embedding_dim=dim mode=mode include_last_offset=include_last_offset sparse=sparse).to(device=device)<line_sep>numpy.random.seed((1<lshift>32)-1)<line_sep>self.input=torch.tensor(numpy.random.randint(0 embeddingbags input_size) device=device).long()<line_sep>offsets=torch.LongTensor([offset] device=device)<line_sep>self.offset=torch.cat((offsets torch.tensor([self.input.size(0)] dtype=torch.long)) 0)<line_sep>self.set_module_name('embeddingbag')<block_end><def_stmt>forward self<block_start><return>self.embedding(self.input self.offset)<block_end><block_end>op_bench.generate_pt_test(configs.embeddingbag_short_configs EmbeddingBagBenchmark)<line_sep>op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs EmbeddingBagBenchmark)<if_stmt>__name__<eq>"__main__"<block_start>op_bench.benchmark_runner.main()<block_end>
"""Python interfaces to DGL farthest point sampler."""<import_from_stmt>dgl._ffi.base DGLError<import_stmt>numpy<as>np<import_from_stmt>.._ffi.function _init_api<import_from_stmt>.. backend<as>F<import_from_stmt>.. ndarray<as>nd<def_stmt>_farthest_point_sampler data batch_size sample_points dist start_idx result<block_start>r"""Farthest Point Sampler Parameters ---------- data : tensor A tensor of shape (N, d) where N is the number of points and d is the dimension. batch_size : int The number of batches in the ``data``. N should be divisible by batch_size. sample_points : int The number of points to sample in each batch. dist : tensor Pre-allocated tensor of shape (N, ) for to-sample distance. start_idx : tensor of int Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch. result : tensor of int Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index. Returns ------- No return value. The input variable ``result`` will be overwriten with sampled indices. """<assert_stmt>F.shape(data)[0]<ge>sample_points<times>batch_size<assert_stmt>F.shape(data)[0]%batch_size<eq>0<line_sep>_CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data) batch_size sample_points F.zerocopy_to_dgl_ndarray(dist) F.zerocopy_to_dgl_ndarray(start_idx) F.zerocopy_to_dgl_ndarray(result))<block_end><def_stmt>_neighbor_matching graph_idx num_nodes edge_weights=<none> relabel_idx=<true><block_start>""" Description ----------- The neighbor matching procedure of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure keeps picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight) until no match can be done. If no edge weight is given, this procedure will randomly pick neighbor for each vertex. The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you are not sure your graph is bi-directed. Parameters ---------- graph : HeteroGraphIndex The input homogeneous graph. num_nodes : int The number of nodes in this homogeneous graph. edge_weight : tensor, optional The edge weight tensor holding non-negative scalar weight for each edge. default: :obj:`None` relabel_idx : bool, optional If true, relabel resulting node labels to have consecutive node ids. default: :obj:`True` Returns ------- a 1-D tensor A vector with each element that indicates the cluster ID of a vertex. """<line_sep>edge_weight_capi=nd.NULL["int64"]<if_stmt>edge_weights<is><not><none><block_start>edge_weight_capi=F.zerocopy_to_dgl_ndarray(edge_weights)<block_end>node_label=F.full_1d(num_nodes -1 getattr(F graph_idx.dtype) F.to_backend_ctx(graph_idx.ctx))<line_sep>node_label_capi=F.zerocopy_to_dgl_ndarray_for_write(node_label)<line_sep>_CAPI_NeighborMatching(graph_idx edge_weight_capi node_label_capi)<if_stmt>F.reduce_sum(node_label<l>0).item()<ne>0<block_start><raise>DGLError("Find unmatched node")<block_end># reorder node id # TODO: actually we can add `return_inverse` option for `unique` # function in backend for efficiency. <if_stmt>relabel_idx<block_start>node_label_np=F.zerocopy_to_numpy(node_label)<line_sep>_,node_label_np=np.unique(node_label_np return_inverse=<true>)<line_sep><return>F.tensor(node_label_np)<block_end><else_stmt><block_start><return>node_label<block_end><block_end>_init_api('dgl.geometry' __name__)<line_sep>
""" Copyright (c) 2018-2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>numpy<as>np<import_from_stmt>.postprocessor PostprocessorWithSpecificTargets<import_from_stmt>..representation BrainTumorSegmentationAnnotation BrainTumorSegmentationPrediction<import_from_stmt>..config NumberField ConfigError<class_stmt>ClipSegmentationMask(PostprocessorWithSpecificTargets)<block_start>__provider__='clip_segmentation_mask'<line_sep>annotation_types=(BrainTumorSegmentationAnnotation )<line_sep>prediction_types=(BrainTumorSegmentationPrediction )<line_sep>@classmethod<def_stmt>parameters cls<block_start>parameters=super().parameters()<line_sep>parameters.update({'min_value':NumberField(value_type=int min_value=0 optional=<true> default=0 description="Min value") 'max_value':NumberField(value_type=int description="Max value")})<line_sep><return>parameters<block_end><def_stmt>configure self<block_start>self.min_value=self.get_value_from_config('min_value')<line_sep>self.max_value=self.get_value_from_config('max_value')<if_stmt>self.max_value<l>self.min_value<block_start><raise>ConfigError('max_value should be greater than min_value')<block_end><block_end><def_stmt>process_image self annotation prediction<block_start><for_stmt>target annotation<block_start>target.mask=np.clip(target.mask a_min=self.min_value a_max=self.max_value)<block_end><for_stmt>target prediction<block_start>target.mask=np.clip(target.mask a_min=self.min_value a_max=self.max_value)<block_end><return>annotation prediction<block_end><block_end>
# Copyright 2013 <NAME> and individual contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>pytest<import_stmt>nacl.secret<import_stmt>nacl.utils<def_stmt>test_random_bytes_produces <block_start><assert_stmt>len(nacl.utils.random(16))<eq>16<block_end><def_stmt>test_random_bytes_produces_different_bytes <block_start><assert_stmt>nacl.utils.random(16)<ne>nacl.utils.random(16)<block_end><def_stmt>test_string_fixer <block_start><assert_stmt>str(nacl.secret.SecretBox(b"\x00"<times>32))<eq>str(b"\x00"<times>32)<block_end><def_stmt>test_deterministic_random_bytes <block_start>expected=(b"0d8e6cc68715648926732e7ea73250cfaf2d58422083904c841a8ba"<concat>b"33b986111f346ba50723a68ae283524a6bded09f83be6b80595856f"<concat>b"72e25b86918e8b114bafb94bc8abedd73daab454576b7c5833eb0bf"<concat>b"982a1bb4587a5c970ff0810ca3b791d7e12")<line_sep>seed=(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d"<concat>b"\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b"<concat>b"\x1c\x1d\x1e\x1f")<assert_stmt>(nacl.utils.randombytes_deterministic(100 seed encoder=nacl.utils.encoding.HexEncoder)<eq>expected)<block_end><def_stmt>test_deterministic_random_bytes_invalid_seed_length <block_start>expected="Deterministic random bytes must be generated from 32 bytes"<line_sep>seed=b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a"<with_stmt>pytest.raises(TypeError)<as>e<block_start>nacl.utils.randombytes_deterministic(100 seed)<block_end><assert_stmt>expected<in>str(e.value)<block_end>
<import_from_stmt>pyspark.sql Row<import_from_stmt>snorkel.labeling.lf labeling_function<import_from_stmt>snorkel.labeling.lf.nlp_spark spark_nlp_labeling_function<import_from_stmt>snorkel.preprocess preprocessor<import_from_stmt>drybell_lfs load_celebrity_knowledge_base<line_sep>ABSTAIN=-1<line_sep>NEGATIVE=0<line_sep>POSITIVE=1<line_sep>@preprocessor()<def_stmt>combine_text x<block_start><return>Row(title=x.title body=x.body article=f"{x.title} {x.body}")<block_end>@spark_nlp_labeling_function(text_field="article" pre=[combine_text])<def_stmt>article_mentions_person x<block_start><for_stmt>ent x.doc.ents<block_start><if_stmt>ent.label_<eq>"PERSON"<block_start><return>ABSTAIN<block_end><block_end><return>NEGATIVE<block_end>@spark_nlp_labeling_function(text_field="article" pre=[combine_text] resources=dict(celebrity_knowledge_base=load_celebrity_knowledge_base()) )<def_stmt>person_in_db x celebrity_knowledge_base<block_start><for_stmt>ent x.doc.ents<block_start><if_stmt>ent.label_<eq>"PERSON"<and>ent.text.lower()<in>celebrity_knowledge_base<block_start><return>POSITIVE<block_end><block_end><return>ABSTAIN<block_end>@labeling_function()<def_stmt>body_contains_fortune x<block_start><return>POSITIVE<if>"fortune"<in>x.body<else>ABSTAIN<block_end>
# terrascript/resource/sematext.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:26:36 UTC) # # For imports without namespace, e.g. # # >>> import terrascript.resource.sematext # # instead of # # >>> import terrascript.resource.sematext.sematext # # This is only available for 'official' and 'partner' providers. <import_from_stmt>terrascript.resource.sematext.sematext *<line_sep>
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for jax_md.space."""<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_from_stmt>jax.config config<as>jax_config<import_from_stmt>jax random<import_stmt>jax.numpy<as>jnp<import_from_stmt>jax grad jit jacfwd<import_from_stmt>jax test_util<as>jtu<import_from_stmt>jax_md space test_util quantity energy<import_from_stmt>jax_md.util *<import_from_stmt>functools partial<import_from_stmt>unittest SkipTest<line_sep>test_util.update_test_tolerance(5e-5 5e-13)<line_sep>jax_config.parse_flags_with_absl()<line_sep>jax_config.enable_omnistaging()<line_sep>FLAGS=jax_config.FLAGS<line_sep>PARTICLE_COUNT=10<line_sep>STOCHASTIC_SAMPLES=10<line_sep>SHIFT_STEPS=10<line_sep>SPATIAL_DIMENSION=[2 3]<line_sep>BOX_FORMATS=['scalar' 'vector' 'matrix']<if_stmt>FLAGS.jax_enable_x64<block_start>POSITION_DTYPE=[f32 f64]<block_end><else_stmt><block_start>POSITION_DTYPE=[f32]<block_end><def_stmt>make_periodic_general_test_system N dim dtype box_format<block_start><assert_stmt>box_format<in>BOX_FORMATS<line_sep>box_size=quantity.box_size_at_number_density(N 1.0 dim)<line_sep>box=dtype(box_size)<if_stmt>box_format<eq>'vector'<block_start>box=jnp.array(jnp.ones(dim)<times>box_size dtype)<block_end><elif_stmt>box_format<eq>'matrix'<block_start>box=jnp.array(jnp.eye(dim)<times>box_size dtype)<block_end>d,s=space.periodic(jnp.diag(box)<if>box_format<eq>'matrix'<else>box)<line_sep>d_gf,s_gf=space.periodic_general(box)<line_sep>d_g,s_g=space.periodic_general(box fractional_coordinates=<false>)<line_sep>key=random.PRNGKey(0)<line_sep>R_f=random.uniform(key (N dim) dtype=dtype)<line_sep>R=space.transform(box R_f)<line_sep>E=jit(energy.soft_sphere_pair(d))<line_sep>E_gf=jit(energy.soft_sphere_pair(d_gf))<line_sep>E_g=jit(energy.soft_sphere_pair(d_g))<line_sep><return>R_f R box (s E) (s_gf E_gf) (s_g E_g)<block_end># pylint: disable=invalid-name <class_stmt>SpaceTest(jtu.JaxTestCase)# pylint: disable=g-complex-comprehension <block_start>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_transform self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split1,split2=random.split(key 3)<line_sep>R=random.normal(split1 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>T=random.normal(split2 (spatial_dimension spatial_dimension) dtype=dtype)<line_sep>R_prime_exact=jnp.array(jnp.einsum('ij,kj->ki' T R) dtype=dtype)<line_sep>R_prime=space.transform(T R)<line_sep>self.assertAllClose(R_prime_exact R_prime)<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}'.format(dim) 'spatial_dimension':dim}<for>dim SPATIAL_DIMENSION))<def_stmt>test_transform_grad self spatial_dimension<block_start>key=random.PRNGKey(0)<for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split1,split2=random.split(key 3)<line_sep>R=random.normal(split1 (PARTICLE_COUNT spatial_dimension))<line_sep>T=random.normal(split2 (spatial_dimension spatial_dimension))<line_sep>R_prime=space.transform(T R)<line_sep>energy_direct=<lambda>R:jnp.sum(R<power>2)<line_sep>energy_indirect=<lambda>T R:jnp.sum(space.transform(T R)<power>2)<line_sep>grad_direct=grad(energy_direct)(R_prime)<line_sep>grad_indirect=grad(energy_indirect 1)(T R)<line_sep>self.assertAllClose(grad_direct grad_indirect)<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_transform_inverse self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<line_sep>tol=1e-13<if_stmt>dtype<is>f32<block_start>tol=1e-5<block_end><for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split1,split2=random.split(key 3)<line_sep>R=random.normal(split1 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>T=random.normal(split2 (spatial_dimension spatial_dimension) dtype=dtype)<line_sep>T_inv=space.inverse(T)<line_sep>R_test=space.transform(T_inv space.transform(T R))<line_sep>self.assertAllClose(R R_test)<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_canonicalize_displacement_or_metric self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<line_sep>displacement,_=space.periodic_general(jnp.eye(spatial_dimension))<line_sep>metric=space.metric(displacement)<line_sep>test_metric=space.canonicalize_displacement_or_metric(displacement)<line_sep>metric=space.map_product(metric)<line_sep>test_metric=space.map_product(test_metric)<for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split1,split2=random.split(key 3)<line_sep>R=random.normal(split1 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>self.assertAllClose(metric(R R) test_metric(R R))<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_periodic_displacement self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split=random.split(key)<line_sep>R=random.uniform(split (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>dR=space.map_product(space.pairwise_displacement)(R R)<line_sep>dR_wrapped=space.periodic_displacement(f32(1.0) dR)<line_sep>dR_direct=dR<line_sep>dr_direct=space.distance(dR)<line_sep>dr_direct=jnp.reshape(dr_direct dr_direct.shape+(1 ))<if_stmt>spatial_dimension<eq>2<block_start><for_stmt>i range(-1 2)<block_start><for_stmt>j range(-1 2)<block_start>dR_shifted=dR+jnp.array([i j] dtype=R.dtype)<line_sep>dr_shifted=space.distance(dR_shifted)<line_sep>dr_shifted=jnp.reshape(dr_shifted dr_shifted.shape+(1 ))<line_sep>dR_direct=jnp.where(dr_shifted<l>dr_direct dR_shifted dR_direct)<line_sep>dr_direct=jnp.where(dr_shifted<l>dr_direct dr_shifted dr_direct)<block_end><block_end><block_end><elif_stmt>spatial_dimension<eq>3<block_start><for_stmt>i range(-1 2)<block_start><for_stmt>j range(-1 2)<block_start><for_stmt>k range(-1 2)<block_start>dR_shifted=dR+jnp.array([i j k] dtype=R.dtype)<line_sep>dr_shifted=space.distance(dR_shifted)<line_sep>dr_shifted=jnp.reshape(dr_shifted dr_shifted.shape+(1 ))<line_sep>dR_direct=jnp.where(dr_shifted<l>dr_direct dR_shifted dR_direct)<line_sep>dr_direct=jnp.where(dr_shifted<l>dr_direct dr_shifted dr_direct)<block_end><block_end><block_end><block_end>dR_direct=jnp.array(dR_direct dtype=dR.dtype)<assert_stmt>dR_wrapped.dtype<eq>dtype<line_sep>self.assertAllClose(dR_wrapped dR_direct)<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_periodic_shift self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split1,split2=random.split(key 3)<line_sep>R=random.uniform(split1 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>dR=jnp.sqrt(f32(0.1))<times>random.normal(split2 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>dR=jnp.where(dR<g>0.49 f32(0.49) dR)<line_sep>dR=jnp.where(dR<l>-0.49 f32(-0.49) dR)<line_sep>R_shift=space.periodic_shift(f32(1.0) R dR)<assert_stmt>R_shift.dtype<eq>R.dtype<assert_stmt>jnp.all(R_shift<l>1.0)<assert_stmt>jnp.all(R_shift<g>0.0)<line_sep>dR_after=space.periodic_displacement(f32(1.0) R_shift-R)<assert_stmt>dR_after.dtype<eq>R.dtype<line_sep>self.assertAllClose(dR_after dR)<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_periodic_against_periodic_general self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<line_sep>tol=1e-13<if_stmt>dtype<is>f32<block_start>tol=1e-5<block_end><for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split1,split2,split3=random.split(key 4)<line_sep>max_box_size=f32(10.0)<line_sep>box_size=max_box_size<times>random.uniform(split1 (spatial_dimension ) dtype=dtype)<line_sep>transform=jnp.diag(box_size)<line_sep>R=random.uniform(split2 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>R_scaled=R<times>box_size<line_sep>dR=random.normal(split3 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>disp_fn,shift_fn=space.periodic(box_size)<line_sep>general_disp_fn,general_shift_fn=space.periodic_general(transform)<line_sep>disp_fn=space.map_product(disp_fn)<line_sep>general_disp_fn=space.map_product(general_disp_fn)<line_sep>self.assertAllClose(disp_fn(R_scaled R_scaled) general_disp_fn(R R))<assert_stmt>disp_fn(R_scaled R_scaled).dtype<eq>dtype<line_sep>self.assertAllClose(shift_fn(R_scaled dR) general_shift_fn(R dR)<times>box_size)<assert_stmt>shift_fn(R_scaled dR).dtype<eq>dtype<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_periodic_against_periodic_general_grad self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<line_sep>tol=1e-13<if_stmt>dtype<is>f32<block_start>tol=1e-5<block_end><for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split1,split2,split3=random.split(key 4)<line_sep>max_box_size=f32(10.0)<line_sep>box_size=max_box_size<times>random.uniform(split1 (spatial_dimension ) dtype=dtype)<line_sep>transform=jnp.diag(box_size)<line_sep>R=random.uniform(split2 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>R_scaled=R<times>box_size<line_sep>dR=random.normal(split3 (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>disp_fn,shift_fn=space.periodic(box_size)<line_sep>general_disp_fn,general_shift_fn=space.periodic_general(transform)<line_sep>disp_fn=space.map_product(disp_fn)<line_sep>general_disp_fn=space.map_product(general_disp_fn)<line_sep>grad_fn=grad(<lambda>R:jnp.sum(disp_fn(R R)<power>2))<line_sep>general_grad_fn=grad(<lambda>R:jnp.sum(general_disp_fn(R R)<power>2))<line_sep>self.assertAllClose(grad_fn(R_scaled) general_grad_fn(R))<assert_stmt>general_grad_fn(R).dtype<eq>dtype<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype }<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_periodic_general_dynamic self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<line_sep>eye=jnp.eye(spatial_dimension)<for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split_T0_scale,split_T0_dT=random.split(key 3)<line_sep>key,split_T1_scale,split_T1_dT=random.split(key 3)<line_sep>key,split_t,split_R,split_dR=random.split(key 4)<line_sep>size_0=10.0<times>random.uniform(split_T0_scale ())<line_sep>dtransform_0=0.5<times>random.normal(split_T0_dT (spatial_dimension spatial_dimension))<line_sep>T_0=jnp.array(size_0<times>(eye+dtransform_0) dtype=dtype)<line_sep>size_1=10.0<times>random.uniform(split_T1_scale () dtype=dtype)<line_sep>dtransform_1=0.5<times>random.normal(split_T1_dT (spatial_dimension spatial_dimension) dtype=dtype)<line_sep>T_1=jnp.array(size_1<times>(eye+dtransform_1) dtype=dtype)<line_sep>disp_fn,shift_fn=space.periodic_general(T_0)<line_sep>true_disp_fn,true_shift_fn=space.periodic_general(T_1)<line_sep>disp_fn=partial(disp_fn box=T_1)<line_sep>disp_fn=space.map_product(disp_fn)<line_sep>true_disp_fn=space.map_product(true_disp_fn)<line_sep>R=random.uniform(split_R (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>dR=random.normal(split_dR (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>self.assertAllClose(disp_fn(R R) jnp.array(true_disp_fn(R R) dtype=dtype))<line_sep>self.assertAllClose(shift_fn(R dR box=T_1) jnp.array(true_shift_fn(R dR) dtype=dtype))<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':'_dim={}_dtype={}'.format(dim dtype.__name__) 'spatial_dimension':dim 'dtype':dtype }<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE))<def_stmt>test_periodic_general_wrapped_vs_unwrapped self spatial_dimension dtype<block_start>key=random.PRNGKey(0)<line_sep>eye=jnp.eye(spatial_dimension dtype=dtype)<line_sep>tol=1e-13<if_stmt>dtype<is>f32<block_start>tol=2e-5<block_end><for_stmt>_ range(STOCHASTIC_SAMPLES)<block_start>key,split_R,split_T=random.split(key 3)<line_sep>dT=random.normal(split_T (spatial_dimension spatial_dimension) dtype=dtype)<line_sep>T=eye+dT+jnp.transpose(dT)<line_sep>R=random.uniform(split_R (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>R0=R<line_sep>unwrapped_R=R<line_sep>displacement,shift=space.periodic_general(T)<line_sep>_,unwrapped_shift=space.periodic_general(T wrapped=<false>)<line_sep>displacement=space.map_product(displacement)<for_stmt>_ range(SHIFT_STEPS)<block_start>key,split=random.split(key)<line_sep>dR=random.normal(split (PARTICLE_COUNT spatial_dimension) dtype=dtype)<line_sep>R=shift(R dR)<line_sep>unwrapped_R=unwrapped_shift(unwrapped_R dR)<line_sep>self.assertAllClose(displacement(R R0) displacement(unwrapped_R R0))<block_end><assert_stmt><not>(jnp.all(unwrapped_R<g>0)<and>jnp.all(unwrapped_R<l>1))<block_end><block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}' 'spatial_dimension':dim 'dtype':dtype 'box_format':box_format}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE<for>box_format BOX_FORMATS))<def_stmt>test_periodic_general_energy self spatial_dimension dtype box_format<block_start>N=16<line_sep>R_f,R,box,(s E),(s_gf E_gf),(s_g E_g)=make_periodic_general_test_system(N spatial_dimension dtype box_format)<line_sep>self.assertAllClose(E(R) E_gf(R_f))<line_sep>self.assertAllClose(E(R) E_g(R))<block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}' 'spatial_dimension':dim 'dtype':dtype 'box_format':box_format}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE<for>box_format BOX_FORMATS))<def_stmt>test_periodic_general_force self spatial_dimension dtype box_format<block_start>N=16<line_sep>R_f,R,box,(s E),(s_gf E_gf),(s_g E_g)=make_periodic_general_test_system(N spatial_dimension dtype box_format)<line_sep>self.assertAllClose(grad(E)(R) grad(E_gf)(R_f))<line_sep>self.assertAllClose(grad(E)(R) grad(E_g)(R))<block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}' 'spatial_dimension':dim 'dtype':dtype 'box_format':box_format}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE<for>box_format BOX_FORMATS))<def_stmt>test_periodic_general_shift self spatial_dimension dtype box_format<block_start>N=16<line_sep>R_f,R,box,(s E),(s_gf E_gf),(s_g E_g)=make_periodic_general_test_system(N spatial_dimension dtype box_format)<line_sep>R_new=s(R grad(E)(R))<line_sep>R_gf_new=s_gf(R_f grad(E_gf)(R_f))<line_sep>R_g_new=s_g(R grad(E_g)(R))<line_sep>self.assertAllClose(R_new space.transform(box R_gf_new))<line_sep>self.assertAllClose(R_new R_g_new)<block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}' 'spatial_dimension':dim 'dtype':dtype 'box_format':box_format}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE<for>box_format BOX_FORMATS))<def_stmt>test_periodic_general_deform self spatial_dimension dtype box_format<block_start>N=16<line_sep>R_f,R,box,(s E),(s_gf E_gf),(s_g E_g)=make_periodic_general_test_system(N spatial_dimension dtype box_format)<line_sep>deformed_box=box<times>0.9<line_sep>self.assertAllClose(E_gf(R_f box=deformed_box) E_g(R new_box=deformed_box))<block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}' 'spatial_dimension':dim 'dtype':dtype 'box_format':box_format}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE<for>box_format BOX_FORMATS))<def_stmt>test_periodic_general_deform_grad self spatial_dimension dtype box_format<block_start>N=16<line_sep>R_f,R,box,(s E),(s_gf E_gf),(s_g E_g)=make_periodic_general_test_system(N spatial_dimension dtype box_format)<line_sep>deformed_box=box<times>0.9<line_sep>self.assertAllClose(grad(E_gf)(R_f box=deformed_box) grad(E_g)(R new_box=deformed_box))<line_sep>self.assertAllClose(jacfwd(E_gf)(R_f box=deformed_box) jacfwd(E_g)(R new_box=deformed_box))<block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}' 'spatial_dimension':dim 'dtype':dtype 'box_format':box_format}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE<for>box_format BOX_FORMATS))<def_stmt>test_periodic_general_deform_shift self spatial_dimension dtype box_format<block_start>N=16<line_sep>R_f,R,box,(s E),(s_gf E_gf),(s_g E_g)=make_periodic_general_test_system(N spatial_dimension dtype box_format)<line_sep>deformed_box=box<times>0.9<line_sep>R_new=s_g(R grad(E_g)(R) new_box=deformed_box)<line_sep>R_gf_new=space.transform(deformed_box s_gf(R_f grad(E_gf)(R_f)))<line_sep>self.assertAllClose(R_new R_gf_new)<block_end>@parameterized.named_parameters(jtu.cases_from_list({'testcase_name':f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}' 'spatial_dimension':dim 'dtype':dtype 'box_format':box_format}<for>dim SPATIAL_DIMENSION<for>dtype POSITION_DTYPE<for>box_format BOX_FORMATS))<def_stmt>test_periodic_general_grad_box self spatial_dimension dtype box_format<block_start><if_stmt>box_format<eq>'scalar'<block_start><raise>SkipTest('Scalar case fails due to JAX Issue #5849.')<block_end>N=16<line_sep>R_f,R,box,(s E),(s_gf E_gf),(s_g E_g)=make_periodic_general_test_system(N spatial_dimension dtype box_format)<line_sep>@grad<def_stmt>box_energy_g_fn box<block_start><return>E_g(R new_box=box)<block_end>@grad<def_stmt>box_energy_gf_fn box<block_start><return>E_gf(R_f box=box)<block_end>self.assertAllClose(box_energy_g_fn(box) box_energy_gf_fn(box))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_stmt>ssg.utils<def_stmt>preprocess data lang<block_start>data["arg_name_value"]=data["arg_name"]+"="+data["arg_value"]<if_stmt>lang<eq>"oval"# escape dot, this is used in oval regex <block_start>data["escaped_arg_name_value"]=data["arg_name_value"].replace("." "\\.")<line_sep># replace . with _, this is used in test / object / state ids data["sanitized_arg_name"]=ssg.utils.escape_id(data["arg_name"])<block_end><return>data<block_end>
# Author: <NAME> # Copyright (c) 2019, <NAME> # All rights reserved. # based on github.com/ClementPinard/SfMLearner-Pytorch <import_from_future_stmt> division<import_stmt>torch<import_from_stmt>torch.autograd Variable<line_sep>pixel_coords=<none><def_stmt>set_id_grid depth<block_start><global>pixel_coords<line_sep>b,h,w=depth.size()<line_sep>i_range=Variable(torch.arange(0 h).view(1 h 1).expand(1 h w)).type_as(depth)# [1, H, W] j_range=Variable(torch.arange(0 w).view(1 1 w).expand(1 h w)).type_as(depth)# [1, H, W] ones=Variable(torch.ones(1 h w)).type_as(depth)<line_sep>pixel_coords=torch.stack((j_range i_range ones) dim=1)<block_end># [1, 3, H, W] <def_stmt>check_sizes input input_name expected<block_start>condition=[input.ndimension()<eq>len(expected)]<for_stmt>i,size enumerate(expected)<block_start><if_stmt>size.isdigit()<block_start>condition.append(input.size(i)<eq>int(size))<block_end><block_end><assert_stmt>(all(condition)) "wrong size for {}, expected {}, got {}".format(input_name 'x'.join(expected) list(input.size()))<block_end><def_stmt>pixel2cam depth intrinsics_inv<block_start><global>pixel_coords<line_sep>"""Transform coordinates in the pixel frame to the camera frame. Args: depth: depth maps -- [B, H, W] intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3] Returns: array of (u,v,1) cam coordinates -- [B, 3, H, W] """<line_sep>b,h,w=depth.size()<if_stmt>(pixel_coords<is><none>)<or>pixel_coords.size(2)<ne>h<or>pixel_coords.size(3)<ne>w<block_start>set_id_grid(depth)<block_end>current_pixel_coords=pixel_coords[: : :h :w].expand(b 3 h w).contiguous().view(b 3 -1)# [B, 3, H*W] cam_coords=intrinsics_inv.bmm(current_pixel_coords).view(b 3 h w)<line_sep><return>cam_coords<times>depth.unsqueeze(1)<block_end><def_stmt>cam2pixel cam_coords proj_c2p_rot proj_c2p_tr padding_mode<block_start>"""Transform coordinates in the camera frame to the pixel frame. Args: cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W] proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4] proj_c2p_tr: translation vectors of cameras -- [B, 3, 1] Returns: array of [-1,1] coordinates -- [B, 2, H, W] """<line_sep>b,_,h,w=cam_coords.size()<line_sep>cam_coords_flat=cam_coords.view(b 3 -1)# [B, 3, H*W] <if_stmt>proj_c2p_rot<is><not><none><block_start>pcoords=proj_c2p_rot.bmm(cam_coords_flat)<block_end><else_stmt><block_start>pcoords=cam_coords_flat<block_end><if_stmt>proj_c2p_tr<is><not><none><block_start>pcoords=pcoords+proj_c2p_tr# [B, 3, H*W] <block_end>X=pcoords[: 0]<line_sep>Y=pcoords[: 1]<line_sep>Z=pcoords[: 2].clamp(min=1e-3)<line_sep>X_norm=2<times>(X/Z)/(w-1)-1# Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W] Y_norm=2<times>(Y/Z)/(h-1)-1# Idem [B, H*W] <if_stmt>padding_mode<eq>'zeros'<block_start>X_mask=((X_norm<g>1)+(X_norm<l>-1)).detach()<line_sep>X_norm[X_mask]=2# make sure that no point in warped image is a combinaison of im and gray Y_mask=((Y_norm<g>1)+(Y_norm<l>-1)).detach()<line_sep>Y_norm[Y_mask]=2<block_end>pixel_coords=torch.stack([X_norm Y_norm] dim=2)# [B, H*W, 2] <return>pixel_coords.view(b h w 2)<block_end><def_stmt>euler2mat angle<block_start>"""Convert euler angles to rotation matrix. Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174 Args: angle: rotation angle along 3 axis (in radians) -- size = [B, 3] Returns: Rotation matrix corresponding to the euler angles -- size = [B, 3, 3] """<line_sep>B=angle.size(0)<line_sep>x,y,z=angle[: 0] angle[: 1] angle[: 2]<line_sep>cosz=torch.cos(z)<line_sep>sinz=torch.sin(z)<line_sep>zeros=z.detach()<times>0<line_sep>ones=zeros.detach()+1<line_sep>zmat=torch.stack([cosz -sinz zeros sinz cosz zeros zeros zeros ones] dim=1).view(B 3 3)<line_sep>cosy=torch.cos(y)<line_sep>siny=torch.sin(y)<line_sep>ymat=torch.stack([cosy zeros siny zeros ones zeros -siny zeros cosy] dim=1).view(B 3 3)<line_sep>cosx=torch.cos(x)<line_sep>sinx=torch.sin(x)<line_sep>xmat=torch.stack([ones zeros zeros zeros cosx -sinx zeros sinx cosx] dim=1).view(B 3 3)<line_sep>rotMat=xmat.bmm(ymat).bmm(zmat)<line_sep><return>rotMat<block_end><def_stmt>quat2mat quat<block_start>"""Convert quaternion coefficients to rotation matrix. Args: quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3] Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """<line_sep>norm_quat=torch.cat([quat[: :1].detach()<times>0+1 quat] dim=1)<line_sep>norm_quat=norm_quat/norm_quat.norm(p=2 dim=1 keepdim=<true>)<line_sep>w,x,y,z=norm_quat[: 0] norm_quat[: 1] norm_quat[: 2] norm_quat[: 3]<line_sep>B=quat.size(0)<line_sep>w2,x2,y2,z2=w.pow(2) x.pow(2) y.pow(2) z.pow(2)<line_sep>wx,wy,wz=w<times>x w<times>y w<times>z<line_sep>xy,xz,yz=x<times>y x<times>z y<times>z<line_sep>rotMat=torch.stack([w2+x2-y2-z2 2<times>xy-2<times>wz 2<times>wy+2<times>xz 2<times>wz+2<times>xy w2-x2+y2-z2 2<times>yz-2<times>wx 2<times>xz-2<times>wy 2<times>wx+2<times>yz w2-x2-y2+z2] dim=1).view(B 3 3)<line_sep><return>rotMat<block_end><def_stmt>pose_vec2mat vec rotation_mode='euler'<block_start>""" Convert 6DoF parameters to transformation matrix. Args:s vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6] Returns: A transformation matrix -- [B, 3, 4] """<line_sep>translation=vec[: :3].unsqueeze(-1)# [B, 3, 1] rot=vec[: 3:]<if_stmt>rotation_mode<eq>'euler'<block_start>rot_mat=euler2mat(rot)# [B, 3, 3] <block_end><elif_stmt>rotation_mode<eq>'quat'<block_start>rot_mat=quat2mat(rot)# [B, 3, 3] <block_end>transform_mat=torch.cat([rot_mat translation] dim=2)# [B, 3, 4] <return>transform_mat<block_end><def_stmt>flow_warp img flow padding_mode='zeros'<block_start>""" Inverse warp a source image to the target image plane. Args: img: the source image (where to sample pixels) -- [B, 3, H, W] flow: flow map of the target image -- [B, 2, H, W] Returns: Source image warped to the target image plane """<line_sep>check_sizes(img 'img' 'BCHW')<line_sep>check_sizes(flow 'flow' 'B2HW')<line_sep>bs,_,h,w=flow.size()<line_sep>u=flow[: 0 : :]<line_sep>v=flow[: 1 : :]<line_sep>grid_x=Variable(torch.arange(0 w).view(1 1 w).expand(1 h w) requires_grad=<false>).type_as(u).expand_as(u)# [bs, H, W] grid_y=Variable(torch.arange(0 h).view(1 h 1).expand(1 h w) requires_grad=<false>).type_as(v).expand_as(v)# [bs, H, W] X=grid_x+u<line_sep>Y=grid_y+v<line_sep>X=2<times>(X/(w-1.0)-0.5)<line_sep>Y=2<times>(Y/(h-1.0)-0.5)<line_sep>grid_tf=torch.stack((X Y) dim=3)<line_sep>img_tf=torch.nn.functional.grid_sample(img grid_tf padding_mode=padding_mode)<line_sep><return>img_tf<block_end><def_stmt>pose2flow depth pose intrinsics intrinsics_inv rotation_mode='euler' padding_mode=<none><block_start>""" Converts pose parameters to rigid optical flow """<line_sep>check_sizes(depth 'depth' 'BHW')<line_sep>check_sizes(pose 'pose' 'B6')<line_sep>check_sizes(intrinsics 'intrinsics' 'B33')<line_sep>check_sizes(intrinsics_inv 'intrinsics' 'B33')<assert_stmt>(intrinsics_inv.size()<eq>intrinsics.size())<line_sep>bs,h,w=depth.size()<line_sep>grid_x=Variable(torch.arange(0 w).view(1 1 w).expand(1 h w) requires_grad=<false>).type_as(depth).expand_as(depth)# [bs, H, W] grid_y=Variable(torch.arange(0 h).view(1 h 1).expand(1 h w) requires_grad=<false>).type_as(depth).expand_as(depth)# [bs, H, W] cam_coords=pixel2cam(depth intrinsics_inv)# [B,3,H,W] pose_mat=pose_vec2mat(pose rotation_mode)# [B,3,4] # Get projection matrix for tgt camera frame to source pixel frame proj_cam_to_src_pixel=intrinsics.bmm(pose_mat)# [B, 3, 4] src_pixel_coords=cam2pixel(cam_coords proj_cam_to_src_pixel[: : :3] proj_cam_to_src_pixel[: : -1:] padding_mode)# [B,H,W,2] X=(w-1)<times>(src_pixel_coords[: : : 0]/2.0+0.5)-grid_x<line_sep>Y=(h-1)<times>(src_pixel_coords[: : : 1]/2.0+0.5)-grid_y<line_sep><return>torch.stack((X Y) dim=1)<block_end><def_stmt>flow2oob flow<block_start>check_sizes(flow 'flow' 'B2HW')<line_sep>bs,_,h,w=flow.size()<line_sep>u=flow[: 0 : :]<line_sep>v=flow[: 1 : :]<line_sep>grid_x=Variable(torch.arange(0 w).view(1 1 w).expand(1 h w) requires_grad=<false>).type_as(u).expand_as(u)# [bs, H, W] grid_y=Variable(torch.arange(0 h).view(1 h 1).expand(1 h w) requires_grad=<false>).type_as(v).expand_as(v)# [bs, H, W] X=grid_x+u<line_sep>Y=grid_y+v<line_sep>X=2<times>(X/(w-1.0)-0.5)<line_sep>Y=2<times>(Y/(h-1.0)-0.5)<line_sep>oob=(X.abs()<g>1).add(Y.abs()<g>1)<g>0<line_sep><return>oob<block_end><def_stmt>occlusion_mask grid depth<block_start>check_sizes(img 'grid' 'BHW2')<line_sep>check_sizes(depth 'depth' 'BHW')<line_sep>mask=grid<line_sep><return>mask<block_end><def_stmt>inverse_warp img depth pose intrinsics intrinsics_inv rotation_mode='euler' padding_mode='zeros'<block_start>""" Inverse warp a source image to the target image plane. Args: img: the source image (where to sample pixels) -- [B, 3, H, W] depth: depth map of the target image -- [B, H, W] pose: 6DoF pose parameters from target to source -- [B, 6] intrinsics: camera intrinsic matrix -- [B, 3, 3] intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3] Returns: Source image warped to the target image plane """<line_sep>check_sizes(img 'img' 'B3HW')<line_sep>check_sizes(depth 'depth' 'BHW')<line_sep>check_sizes(pose 'pose' 'B6')<line_sep>check_sizes(intrinsics 'intrinsics' 'B33')<line_sep>check_sizes(intrinsics_inv 'intrinsics' 'B33')<assert_stmt>(intrinsics_inv.size()<eq>intrinsics.size())<line_sep>batch_size,_,img_height,img_width=img.size()<line_sep>cam_coords=pixel2cam(depth intrinsics_inv)# [B,3,H,W] pose_mat=pose_vec2mat(pose rotation_mode)# [B,3,4] # Get projection matrix for tgt camera frame to source pixel frame proj_cam_to_src_pixel=intrinsics.bmm(pose_mat)# [B, 3, 4] src_pixel_coords=cam2pixel(cam_coords proj_cam_to_src_pixel[: : :3] proj_cam_to_src_pixel[: : -1:] padding_mode)# [B,H,W,2] projected_img=torch.nn.functional.grid_sample(img src_pixel_coords padding_mode=padding_mode)<line_sep><return>projected_img<block_end>
# This file shows how to implement a single hidden layer neural network for # performing binary classification on the GPU using cudamat. <import_from_future_stmt> division<import_stmt>pdb<import_stmt>time<import_stmt>numpy<as>np<import_stmt>cudamat<as>cm<import_from_stmt>cudamat learn<as>cl<import_stmt>util<line_sep># initialize CUDA cm.cublas_init()<line_sep># load data util.load('mnist49.dat' globals())<line_sep># Put training data onto the GPU. dat_train=dat_train/255.<line_sep>dat_train=dat_train-(np.mean(dat_train 1)+10<power>-8)[: np.newaxis]<line_sep>dev_train=cm.CUDAMatrix(dat_train)<line_sep>dev_lbl=cm.CUDAMatrix(lbl_train)<line_sep># training parameters epsilon=0.01<line_sep>momentum=0.9<line_sep>num_epochs=30<line_sep>batch_size=128<line_sep>num_batches=dat_train.shape[1]<floordiv>batch_size<line_sep># model parameters dim_in=dat_train.shape[0]<line_sep>dim_out=1<line_sep>num_hid=1024<line_sep># initialize weights w_w1=cm.CUDAMatrix(dim_in<power>-0.5<times>np.random.randn(dim_in num_hid))<line_sep>w_b1=cm.CUDAMatrix(np.zeros((num_hid 1)))<line_sep>w_w2=cm.CUDAMatrix(num_hid<power>-0.5<times>np.random.randn(num_hid dim_out))<line_sep>w_b2=cm.CUDAMatrix(np.zeros((dim_out 1)))<line_sep># initialize weight update matrices wu_w1=cm.empty(w_w1.shape).assign(0)<line_sep>wu_b1=cm.empty(w_b1.shape).assign(0)<line_sep>wu_w2=cm.empty(w_w2.shape).assign(0)<line_sep>wu_b2=cm.empty(w_b2.shape).assign(0)<line_sep># initialize temporary storage h=cm.empty((num_hid batch_size))<line_sep>out=cm.empty((dim_out batch_size))<line_sep>delta=cm.empty((num_hid batch_size))<line_sep># Train neural network. start_time=time.time()<for_stmt>epoch range(num_epochs)<block_start>print("Epoch %i"%(epoch+1))<line_sep>err=[]<for_stmt>batch range(num_batches)# get current minibatch <block_start>inp=dev_train.slice(batch<times>batch_size (batch+1)<times>batch_size)<line_sep>target=dev_lbl.slice(batch<times>batch_size (batch+1)<times>batch_size)<line_sep># forward pass cm.dot(w_w1.T inp target=h)<line_sep>h.add_col_vec(w_b1)<line_sep>h.apply_sigmoid()<line_sep>cm.dot(w_w2.T h target=out)<line_sep>out.add_col_vec(w_b2)<line_sep>out.apply_sigmoid()<line_sep># back prop errors out.subtract(target)# compute error # gradients for w_w2 and w_b2 wu_w2.add_dot(h out.T beta=momentum)<line_sep>wu_b2.add_sums(out axis=1 beta=momentum)<line_sep># compute delta cm.dot(w_w2 out target=delta)<line_sep># delta = delta * h * (1 - h) cl.mult_by_sigmoid_deriv(delta h)<line_sep># gradients for w_w1 and w_b1 wu_w1.add_dot(inp delta.T beta=momentum)<line_sep>wu_b1.add_sums(delta axis=1 beta=momentum)<line_sep># update weights w_w1.subtract_mult(wu_w1 epsilon/batch_size)<line_sep>w_b1.subtract_mult(wu_b1 epsilon/batch_size)<line_sep>w_w2.subtract_mult(wu_w2 epsilon/batch_size)<line_sep>w_b2.subtract_mult(wu_b2 epsilon/batch_size)<line_sep># calculate error on current minibatch err.append(np.abs(out.asarray())<g>0.5)<block_end>print("Training misclassification rate: %f"%np.mean(err))<line_sep>print("Time: %f"%(time.time()-start_time))<block_end># Evaluate neural network on test data. # Load test data onto the GPU. dat_test=dat_test/255.<line_sep>dat_test=dat_test-np.mean(dat_test 1)[: np.newaxis]<line_sep>dev_test=cm.CUDAMatrix(dat_test)<line_sep>dev_lbl=cm.CUDAMatrix(lbl_test)<line_sep># Initalize temporary storage. h=cm.empty((num_hid dat_test.shape[1]))<line_sep>out=cm.empty((dim_out dat_test.shape[1]))<line_sep># forward pass cm.dot(w_w1.T dev_test target=h)<line_sep>h.add_col_vec(w_b1)<line_sep>h.apply_sigmoid()<line_sep>cm.dot(w_w2.T h target=out)<line_sep>out.add_col_vec(w_b2)<line_sep>out.apply_sigmoid()<line_sep># compute error out.subtract(dev_lbl)<line_sep>print("Testing misclassification rate: %f"%np.mean(np.abs(out.asarray())<g>0.5))<line_sep>cm.cublas_shutdown()<line_sep>
'''Entry point into the pommerman module'''<import_stmt>gym<import_stmt>inspect<import_from_stmt>. agents<import_from_stmt>. configs<import_from_stmt>. constants<import_from_stmt>. forward_model<import_from_stmt>. helpers<import_from_stmt>. utility<import_from_stmt>. network<line_sep>gym.logger.set_level(40)<line_sep>REGISTRY=<none><def_stmt>_register <block_start><global>REGISTRY<line_sep>REGISTRY=[]<for_stmt>name,f inspect.getmembers(configs inspect.isfunction)<block_start><if_stmt><not>name.endswith('_env')<block_start><continue><block_end>config=f()<line_sep>gym.envs.registration.register(id=config['env_id'] entry_point=config['env_entry_point'] kwargs=config['env_kwargs'])<line_sep>REGISTRY.append(config['env_id'])<block_end><block_end># Register environments with gym _register()<def_stmt>make config_id agent_list game_state_file=<none> render_mode='human'<block_start>'''Makes the pommerman env and registers it with gym'''<assert_stmt>config_id<in>REGISTRY "Unknown configuration '{}'. "<concat>"Possible values: {}".format(config_id REGISTRY)<line_sep>env=gym.make(config_id)<for_stmt>id_,agent enumerate(agent_list)<block_start><assert_stmt>isinstance(agent agents.BaseAgent)<line_sep># NOTE: This is IMPORTANT so that the agent character is initialized agent.init_agent(id_ env.spec._kwargs['game_type'])<block_end>env.set_agents(agent_list)<line_sep>env.set_init_game_state(game_state_file)<line_sep>env.set_render_mode(render_mode)<line_sep><return>env<block_end><import_from_stmt>. cli<line_sep>
"""Provide common mysensors fixtures."""<import_from_future_stmt> annotations<import_from_stmt>collections.abc AsyncGenerator Callable Generator<import_stmt>json<import_from_stmt>typing Any<import_from_stmt>unittest.mock AsyncMock MagicMock patch<import_from_stmt>mysensors BaseSyncGateway<import_from_stmt>mysensors.persistence MySensorsJSONDecoder<import_from_stmt>mysensors.sensor Sensor<import_stmt>pytest<import_from_stmt>homeassistant.components.device_tracker.legacy Device<import_from_stmt>homeassistant.components.mqtt DOMAIN<as>MQTT_DOMAIN<import_from_stmt>homeassistant.components.mysensors.config_flow DEFAULT_BAUD_RATE<import_from_stmt>homeassistant.components.mysensors.const CONF_BAUD_RATE CONF_DEVICE CONF_GATEWAY_TYPE CONF_GATEWAY_TYPE_SERIAL CONF_VERSION DOMAIN <import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.setup async_setup_component<import_from_stmt>tests.common MockConfigEntry load_fixture<line_sep>@pytest.fixture(autouse=<true>)<def_stmt>device_tracker_storage mock_device_tracker_conf:list[Device]<arrow>list[Device]<block_start>"""Mock out device tracker known devices storage."""<line_sep>devices=mock_device_tracker_conf<line_sep><return>devices<block_end>@pytest.fixture(name="mqtt")<def_stmt>mock_mqtt_fixture hass:HomeAssistant<arrow><none><block_start>"""Mock the MQTT integration."""<line_sep>hass.config.components.add(MQTT_DOMAIN)<block_end>@pytest.fixture(name="is_serial_port")<def_stmt>is_serial_port_fixture <arrow>Generator[MagicMock <none> <none>]<block_start>"""Patch the serial port check."""<with_stmt>patch("homeassistant.components.mysensors.gateway.cv.isdevice")<as>is_device<block_start>is_device.side_effect=<lambda>device:device<line_sep><yield>is_device<block_end><block_end>@pytest.fixture(name="gateway_nodes")<def_stmt>gateway_nodes_fixture <arrow>dict[int Sensor]<block_start>"""Return the gateway nodes dict."""<line_sep><return>{}<block_end>@pytest.fixture(name="serial_transport")<async_keyword><def_stmt>serial_transport_fixture gateway_nodes:dict[int Sensor] is_serial_port:MagicMock <arrow>AsyncGenerator[dict[int Sensor] <none>]<block_start>"""Mock a serial transport."""<with_stmt>patch("mysensors.gateway_serial.AsyncTransport" autospec=<true>)<as>transport_class patch("mysensors.task.OTAFirmware" autospec=<true>) patch("mysensors.task.load_fw" autospec=<true>) patch("mysensors.task.Persistence" autospec=<true>)<as>persistence_class<block_start>persistence=persistence_class.return_value<line_sep>mock_gateway_features(persistence transport_class gateway_nodes)<line_sep><yield>transport_class<block_end><block_end><def_stmt>mock_gateway_features persistence:MagicMock transport_class:MagicMock nodes:dict[int Sensor]<arrow><none><block_start>"""Mock the gateway features."""<async_keyword><def_stmt>mock_schedule_save_sensors <arrow><none><block_start>"""Load nodes from via persistence."""<line_sep>gateway=transport_class.call_args[0][0]<line_sep>gateway.sensors.update(nodes)<block_end>persistence.schedule_save_sensors=AsyncMock(side_effect=mock_schedule_save_sensors)<line_sep># For some reason autospeccing does not recognize these methods. persistence.safe_load_sensors=MagicMock()<line_sep>persistence.save_sensors=MagicMock()<async_keyword><def_stmt>mock_connect <arrow><none><block_start>"""Mock the start method."""<line_sep>transport.connect_task=MagicMock()<line_sep>gateway=transport_class.call_args[0][0]<line_sep>gateway.on_conn_made(gateway)<block_end>transport=transport_class.return_value<line_sep>transport.connect_task=<none><line_sep>transport.connect.side_effect=mock_connect<block_end>@pytest.fixture(name="transport")<def_stmt>transport_fixture serial_transport:MagicMock<arrow>MagicMock<block_start>"""Return the default mocked transport."""<line_sep><return>serial_transport<block_end>@pytest.fixture<def_stmt>transport_write transport:MagicMock<arrow>MagicMock<block_start>"""Return the transport mock that accepts string messages."""<line_sep><return>transport.return_value.send<block_end>@pytest.fixture(name="serial_entry")<async_keyword><def_stmt>serial_entry_fixture hass:HomeAssistant<arrow>MockConfigEntry<block_start>"""Create a config entry for a serial gateway."""<line_sep>entry=MockConfigEntry(domain=DOMAIN data={CONF_GATEWAY_TYPE:CONF_GATEWAY_TYPE_SERIAL CONF_VERSION:"2.3" CONF_DEVICE:"/test/device" CONF_BAUD_RATE:DEFAULT_BAUD_RATE } )<line_sep><return>entry<block_end>@pytest.fixture(name="config_entry")<def_stmt>config_entry_fixture serial_entry:MockConfigEntry<arrow>MockConfigEntry<block_start>"""Provide the config entry used for integration set up."""<line_sep><return>serial_entry<block_end>@pytest.fixture(name="integration")<async_keyword><def_stmt>integration_fixture hass:HomeAssistant transport:MagicMock config_entry:MockConfigEntry<arrow>AsyncGenerator[MockConfigEntry <none>]<block_start>"""Set up the mysensors integration with a config entry."""<line_sep>config:dict[str Any]={}<line_sep>config_entry.add_to_hass(hass)<with_stmt>patch("homeassistant.components.mysensors.device.UPDATE_DELAY" new=0)<block_start><await>async_setup_component(hass DOMAIN config)<line_sep><await>hass.async_block_till_done()<line_sep><yield>config_entry<block_end><block_end>@pytest.fixture<def_stmt>receive_message transport:MagicMock integration:MockConfigEntry<arrow>Callable[[str] <none>]<block_start>"""Receive a message for the gateway."""<def_stmt>receive_message_callback message_string:str<arrow><none><block_start>"""Receive a message with the transport. The message_string parameter is a string in the MySensors message format. """<line_sep>gateway=transport.call_args[0][0]<line_sep># node_id;child_id;command;ack;type;payload\n gateway.logic(message_string)<block_end><return>receive_message_callback<block_end>@pytest.fixture(name="gateway")<def_stmt>gateway_fixture transport:MagicMock integration:MockConfigEntry<arrow>BaseSyncGateway<block_start>"""Return a setup gateway."""<line_sep><return>transport.call_args[0][0]<block_end><def_stmt>load_nodes_state fixture_path:str<arrow>dict<block_start>"""Load mysensors nodes fixture."""<line_sep><return>json.loads(load_fixture(fixture_path) cls=MySensorsJSONDecoder)<block_end><def_stmt>update_gateway_nodes gateway_nodes:dict[int Sensor] nodes:dict[int Sensor]<arrow>dict<block_start>"""Update the gateway nodes."""<line_sep>gateway_nodes.update(nodes)<line_sep><return>nodes<block_end>@pytest.fixture(name="gps_sensor_state" scope="session")<def_stmt>gps_sensor_state_fixture <arrow>dict<block_start>"""Load the gps sensor state."""<line_sep><return>load_nodes_state("mysensors/gps_sensor_state.json")<block_end>@pytest.fixture<def_stmt>gps_sensor gateway_nodes:dict[int Sensor] gps_sensor_state:dict<arrow>Sensor<block_start>"""Load the gps sensor."""<line_sep>nodes=update_gateway_nodes(gateway_nodes gps_sensor_state)<line_sep>node=nodes[1]<line_sep><return>node<block_end>@pytest.fixture(name="power_sensor_state" scope="session")<def_stmt>power_sensor_state_fixture <arrow>dict<block_start>"""Load the power sensor state."""<line_sep><return>load_nodes_state("mysensors/power_sensor_state.json")<block_end>@pytest.fixture<def_stmt>power_sensor gateway_nodes:dict[int Sensor] power_sensor_state:dict<arrow>Sensor<block_start>"""Load the power sensor."""<line_sep>nodes=update_gateway_nodes(gateway_nodes power_sensor_state)<line_sep>node=nodes[1]<line_sep><return>node<block_end>@pytest.fixture(name="energy_sensor_state" scope="session")<def_stmt>energy_sensor_state_fixture <arrow>dict<block_start>"""Load the energy sensor state."""<line_sep><return>load_nodes_state("mysensors/energy_sensor_state.json")<block_end>@pytest.fixture<def_stmt>energy_sensor gateway_nodes:dict[int Sensor] energy_sensor_state:dict<arrow>Sensor<block_start>"""Load the energy sensor."""<line_sep>nodes=update_gateway_nodes(gateway_nodes energy_sensor_state)<line_sep>node=nodes[1]<line_sep><return>node<block_end>@pytest.fixture(name="sound_sensor_state" scope="session")<def_stmt>sound_sensor_state_fixture <arrow>dict<block_start>"""Load the sound sensor state."""<line_sep><return>load_nodes_state("mysensors/sound_sensor_state.json")<block_end>@pytest.fixture<def_stmt>sound_sensor gateway_nodes:dict[int Sensor] sound_sensor_state:dict<arrow>Sensor<block_start>"""Load the sound sensor."""<line_sep>nodes=update_gateway_nodes(gateway_nodes sound_sensor_state)<line_sep>node=nodes[1]<line_sep><return>node<block_end>@pytest.fixture(name="distance_sensor_state" scope="session")<def_stmt>distance_sensor_state_fixture <arrow>dict<block_start>"""Load the distance sensor state."""<line_sep><return>load_nodes_state("mysensors/distance_sensor_state.json")<block_end>@pytest.fixture<def_stmt>distance_sensor gateway_nodes:dict[int Sensor] distance_sensor_state:dict<arrow>Sensor<block_start>"""Load the distance sensor."""<line_sep>nodes=update_gateway_nodes(gateway_nodes distance_sensor_state)<line_sep>node=nodes[1]<line_sep><return>node<block_end>@pytest.fixture(name="temperature_sensor_state" scope="session")<def_stmt>temperature_sensor_state_fixture <arrow>dict<block_start>"""Load the temperature sensor state."""<line_sep><return>load_nodes_state("mysensors/temperature_sensor_state.json")<block_end>@pytest.fixture<def_stmt>temperature_sensor gateway_nodes:dict[int Sensor] temperature_sensor_state:dict<arrow>Sensor<block_start>"""Load the temperature sensor."""<line_sep>nodes=update_gateway_nodes(gateway_nodes temperature_sensor_state)<line_sep>node=nodes[1]<line_sep><return>node<block_end>@pytest.fixture(name="text_node_state" scope="session")<def_stmt>text_node_state_fixture <arrow>dict<block_start>"""Load the text node state."""<line_sep><return>load_nodes_state("mysensors/text_node_state.json")<block_end>@pytest.fixture<def_stmt>text_node gateway_nodes:dict[int Sensor] text_node_state:dict<arrow>Sensor<block_start>"""Load the text child node."""<line_sep>nodes=update_gateway_nodes(gateway_nodes text_node_state)<line_sep>node=nodes[1]<line_sep><return>node<block_end>
#! usr/bin/env python3 # -*- coding:utf-8 -*- """ @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) """<import_stmt>tensorflow<as>tf<import_from_stmt>nlpgnn.gnn.messagepassing MessagePassing<class_stmt>RGraphConvolution(MessagePassing)<block_start><def_stmt>__init__ self out_features epsion=1e-7 aggr="sum" normalize=<true> kernel_initializer='glorot_uniform' bias_initializer='zeros' use_bias=<true> **kwargs<block_start>super(RGraphConvolution self).__init__(aggr **kwargs)<line_sep>self.kernel_initializer=tf.keras.initializers.get(kernel_initializer)<line_sep>self.bias_initializer=tf.keras.initializers.get(bias_initializer)<line_sep>self.use_bias=use_bias<line_sep>self.normalize=normalize<line_sep>self.out_features=out_features<line_sep>self.epsion=epsion<block_end><def_stmt>build self input_shapes<block_start>node_embedding_shapes=input_shapes.node_embeddings<line_sep>adjacency_list_shapes=input_shapes.adjacency_lists<line_sep>num_edge_type=len(adjacency_list_shapes)<line_sep>in_features=node_embedding_shapes[-1]<line_sep>self._edge_type_weights=[]<line_sep>self._edge_type_bias=[]<for_stmt>i range(num_edge_type)<block_start>weight=self.add_weight(shape=(in_features self.out_features) initializer=self.kernel_initializer name='wt_{}'.format(i) )<line_sep>self._edge_type_weights.append(weight)<block_end><if_stmt>self.use_bias<block_start>self.bias=self.add_weight(shape=(self.out_features) initializer=self.bias_initializer name='b' )<block_end><else_stmt><block_start>self.bias=<none><block_end>self.weight_o=self.add_weight(shape=(in_features self.out_features) initializer=self.kernel_initializer name='wo' )<line_sep>self.built=<true><block_end><def_stmt>message_function self edge_source_states edge_target_states num_incoming_to_node_per_message num_outing_to_node_per_message edge_type_idx<block_start>""" :param edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: """<line_sep>weight_r=self._edge_type_weights[edge_type_idx]<line_sep>messages=tf.linalg.matmul(edge_source_states weight_r)<if_stmt>self.normalize<block_start>messages=(tf.expand_dims(1.0/(tf.cast(num_incoming_to_node_per_message tf.float32)+self.epsion) axis=-1)<times>messages)<block_end><return>messages<block_end><def_stmt>call self inputs<block_start>aggr_out=self.propagate(inputs)# message_passing + update aggr_out<augadd>tf.linalg.matmul(inputs.node_embeddings self.weight_o)<if_stmt>self.bias<is><not><none><block_start>aggr_out<augadd>self.bias<block_end><return>aggr_out<block_end><block_end>
""" Implement transformation on Numba IR """<import_from_future_stmt> absolute_import print_function<import_from_stmt>collections namedtuple defaultdict<import_stmt>logging<import_from_stmt>numba.analysis compute_cfg_from_blocks find_top_level_loops<import_from_stmt>numba ir errors ir_utils<import_from_stmt>numba.analysis compute_use_defs<line_sep>_logger=logging.getLogger(__name__)<def_stmt>_extract_loop_lifting_candidates cfg blocks<block_start>""" Returns a list of loops that are candidate for loop lifting """<line_sep># check well-formed-ness of the loop <def_stmt>same_exit_point loop<block_start>"all exits must point to the same location"<line_sep>outedges=set()<for_stmt>k loop.exits<block_start>succs=set(x<for>x,_ cfg.successors(k))<if_stmt><not>succs# If the exit point has no successor, it contains an return # statement, which is not handled by the looplifting code. # Thus, this loop is not a candidate. <block_start>_logger.debug("return-statement in loop.")<line_sep><return><false><block_end>outedges<augor>succs<block_end>ok=len(outedges)<eq>1<line_sep>_logger.debug("same_exit_point=%s (%s)" ok outedges)<line_sep><return>ok<block_end><def_stmt>one_entry loop<block_start>"there is one entry"<line_sep>ok=len(loop.entries)<eq>1<line_sep>_logger.debug("one_entry=%s" ok)<line_sep><return>ok<block_end><def_stmt>cannot_yield loop<block_start>"cannot have yield inside the loop"<line_sep>insiders=set(loop.body)|set(loop.entries)|set(loop.exits)<for_stmt>blk map(blocks.__getitem__ insiders)<block_start><for_stmt>inst blk.body<block_start><if_stmt>isinstance(inst ir.Assign)<block_start><if_stmt>isinstance(inst.value ir.Yield)<block_start>_logger.debug("has yield")<line_sep><return><false><block_end><block_end><block_end><block_end>_logger.debug("no yield")<line_sep><return><true><block_end>_logger.info('finding looplift candidates')<line_sep># the check for cfg.entry_point in the loop.entries is to prevent a bad # rewrite where a prelude for a lifted loop would get written into block -1 # if a loop entry were in block 0 candidates=[]<for_stmt>loop find_top_level_loops(cfg)<block_start>_logger.debug("top-level loop: %s" loop)<if_stmt>(same_exit_point(loop)<and>one_entry(loop)<and>cannot_yield(loop)<and>cfg.entry_point()<not><in>loop.entries)<block_start>candidates.append(loop)<line_sep>_logger.debug("add candidate: %s" loop)<block_end><block_end><return>candidates<block_end><def_stmt>find_region_inout_vars blocks livemap callfrom returnto body_block_ids<block_start>"""Find input and output variables to a block region. """<line_sep>inputs=livemap[callfrom]<line_sep>outputs=livemap[returnto]<line_sep># ensure live variables are actually used in the blocks, else remove, # saves having to create something valid to run through postproc # to achieve similar loopblocks={}<for_stmt>k body_block_ids<block_start>loopblocks[k]=blocks[k]<block_end>used_vars=set()<line_sep>def_vars=set()<line_sep>defs=compute_use_defs(loopblocks)<for_stmt>vs defs.usemap.values()<block_start>used_vars<augor>vs<block_end><for_stmt>vs defs.defmap.values()<block_start>def_vars<augor>vs<block_end>used_or_defined=used_vars|def_vars<line_sep># note: sorted for stable ordering inputs=sorted(set(inputs)&used_or_defined)<line_sep>outputs=sorted(set(outputs)&used_or_defined&def_vars)<line_sep><return>inputs outputs<block_end>_loop_lift_info=namedtuple('loop_lift_info' 'loop,inputs,outputs,callfrom,returnto')<def_stmt>_loop_lift_get_candidate_infos cfg blocks livemap<block_start>""" Returns information on looplifting candidates. """<line_sep>loops=_extract_loop_lifting_candidates(cfg blocks)<line_sep>loopinfos=[]<for_stmt>loop loops<block_start>[callfrom]=loop.entries# requirement checked earlier an_exit=next(iter(loop.exits))# anyone of the exit block <if_stmt>len(loop.exits)<g>1# Pre-Py3.8 may have multiple exits <block_start>[(returnto _)]=cfg.successors(an_exit)# requirement checked earlier <block_end><else_stmt># Post-Py3.8 DO NOT have multiple exits <block_start>returnto=an_exit<block_end>local_block_ids=set(loop.body)|set(loop.entries)<line_sep>inputs,outputs=find_region_inout_vars(blocks=blocks livemap=livemap callfrom=callfrom returnto=returnto body_block_ids=local_block_ids )<line_sep>lli=_loop_lift_info(loop=loop inputs=inputs outputs=outputs callfrom=callfrom returnto=returnto)<line_sep>loopinfos.append(lli)<block_end><return>loopinfos<block_end><def_stmt>_loop_lift_modify_call_block liftedloop block inputs outputs returnto<block_start>""" Transform calling block from top-level function to call the lifted loop. """<line_sep>scope=block.scope<line_sep>loc=block.loc<line_sep>blk=ir.Block(scope=scope loc=loc)<line_sep>ir_utils.fill_block_with_call(newblock=blk callee=liftedloop label_next=returnto inputs=inputs outputs=outputs )<line_sep><return>blk<block_end><def_stmt>_loop_lift_prepare_loop_func loopinfo blocks<block_start>""" Inplace transform loop blocks for use as lifted loop. """<line_sep>entry_block=blocks[loopinfo.callfrom]<line_sep>scope=entry_block.scope<line_sep>loc=entry_block.loc<line_sep># Lowering assumes the first block to be the one with the smallest offset firstblk=min(blocks)-1<line_sep>blocks[firstblk]=ir_utils.fill_callee_prologue(block=ir.Block(scope=scope loc=loc) inputs=loopinfo.inputs label_next=loopinfo.callfrom )<line_sep>blocks[loopinfo.returnto]=ir_utils.fill_callee_epilogue(block=ir.Block(scope=scope loc=loc) outputs=loopinfo.outputs )<block_end><def_stmt>_loop_lift_modify_blocks func_ir loopinfo blocks typingctx targetctx flags locals<block_start>""" Modify the block inplace to call to the lifted-loop. Returns a dictionary of blocks of the lifted-loop. """<import_from_stmt>numba.dispatcher LiftedLoop<line_sep># Copy loop blocks loop=loopinfo.loop<line_sep>loopblockkeys=set(loop.body)|set(loop.entries)<if_stmt>len(loop.exits)<g>1# Pre-Py3.8 may have multiple exits <block_start>loopblockkeys<augor>loop.exits<block_end>loopblocks=dict((k blocks[k].copy())<for>k loopblockkeys)<line_sep># Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo loopblocks)<line_sep># Create a new IR for the lifted loop lifted_ir=func_ir.derive(blocks=loopblocks arg_names=tuple(loopinfo.inputs) arg_count=len(loopinfo.inputs) force_non_generator=<true>)<line_sep>liftedloop=LiftedLoop(lifted_ir typingctx targetctx flags locals)<line_sep># modify for calling into liftedloop callblock=_loop_lift_modify_call_block(liftedloop blocks[loopinfo.callfrom] loopinfo.inputs loopinfo.outputs loopinfo.returnto)<line_sep># remove blocks <for_stmt>k loopblockkeys<block_start><del_stmt>blocks[k]<block_end># update main interpreter callsite into the liftedloop blocks[loopinfo.callfrom]=callblock<line_sep><return>liftedloop<block_end><def_stmt>loop_lifting func_ir typingctx targetctx flags locals<block_start>""" Loop lifting transformation. Given a interpreter `func_ir` returns a 2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` """<line_sep>blocks=func_ir.blocks.copy()<line_sep>cfg=compute_cfg_from_blocks(blocks)<line_sep>loopinfos=_loop_lift_get_candidate_infos(cfg blocks func_ir.variable_lifetime.livemap)<line_sep>loops=[]<if_stmt>loopinfos<block_start>_logger.debug('loop lifting this IR with %d candidates:\n%s' len(loopinfos) func_ir.dump_to_string())<block_end><for_stmt>loopinfo loopinfos<block_start>lifted=_loop_lift_modify_blocks(func_ir loopinfo blocks typingctx targetctx flags locals)<line_sep>loops.append(lifted)<block_end># Make main IR main=func_ir.derive(blocks=blocks)<line_sep><return>main loops<block_end><def_stmt>canonicalize_cfg_single_backedge blocks<block_start>""" Rewrite loops that have multiple backedges. """<line_sep>cfg=compute_cfg_from_blocks(blocks)<line_sep>newblocks=blocks.copy()<def_stmt>new_block_id <block_start><return>max(newblocks.keys())+1<block_end><def_stmt>has_multiple_backedges loop<block_start>count=0<for_stmt>k loop.body<block_start>blk=blocks[k]<line_sep>edges=blk.terminator.get_targets()<line_sep># is a backedge? <if_stmt>loop.header<in>edges<block_start>count<augadd>1<if_stmt>count<g>1# early exit <block_start><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>yield_loops_with_multiple_backedges <block_start><for_stmt>lp cfg.loops().values()<block_start><if_stmt>has_multiple_backedges(lp)<block_start><yield>lp<block_end><block_end><block_end><def_stmt>replace_target term src dst<block_start><def_stmt>replace target<block_start><return>(dst<if>target<eq>src<else>target)<block_end><if_stmt>isinstance(term ir.Branch)<block_start><return>ir.Branch(cond=term.cond truebr=replace(term.truebr) falsebr=replace(term.falsebr) loc=term.loc)<block_end><elif_stmt>isinstance(term ir.Jump)<block_start><return>ir.Jump(target=replace(term.target) loc=term.loc)<block_end><else_stmt><block_start><assert_stmt><not>term.get_targets()<line_sep><return>term<block_end><block_end><def_stmt>rewrite_single_backedge loop<block_start>""" Add new tail block that gathers all the backedges """<line_sep>header=loop.header<line_sep>tailkey=new_block_id()<for_stmt>blkkey loop.body<block_start>blk=newblocks[blkkey]<if_stmt>header<in>blk.terminator.get_targets()<block_start>newblk=blk.copy()<line_sep># rewrite backedge into jumps to new tail block newblk.body[-1]=replace_target(blk.terminator header tailkey)<line_sep>newblocks[blkkey]=newblk<block_end><block_end># create new tail block entryblk=newblocks[header]<line_sep>tailblk=ir.Block(scope=entryblk.scope loc=entryblk.loc)<line_sep># add backedge tailblk.append(ir.Jump(target=header loc=tailblk.loc))<line_sep>newblocks[tailkey]=tailblk<block_end><for_stmt>loop yield_loops_with_multiple_backedges()<block_start>rewrite_single_backedge(loop)<block_end><return>newblocks<block_end><def_stmt>canonicalize_cfg blocks<block_start>""" Rewrite the given blocks to canonicalize the CFG. Returns a new dictionary of blocks. """<line_sep><return>canonicalize_cfg_single_backedge(blocks)<block_end><def_stmt>with_lifting func_ir typingctx targetctx flags locals<block_start>"""With-lifting transformation Rewrite the IR to extract all withs. Only the top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) """<import_from_stmt>numba postproc<def_stmt>dispatcher_factory func_ir objectmode=<false> **kwargs<block_start><import_from_stmt>numba.dispatcher LiftedWith ObjModeLiftedWith<line_sep>myflags=flags.copy()<if_stmt>objectmode# Lifted with-block cannot looplift <block_start>myflags.enable_looplift=<false><line_sep># Lifted with-block uses object mode myflags.enable_pyobject=<true><line_sep>myflags.force_pyobject=<true><line_sep>myflags.no_cpython_wrapper=<false><line_sep>cls=ObjModeLiftedWith<block_end><else_stmt><block_start>cls=LiftedWith<block_end><return>cls(func_ir typingctx targetctx myflags locals **kwargs)<block_end>postproc.PostProcessor(func_ir).run()# ensure we have variable lifetime <assert_stmt>func_ir.variable_lifetime<line_sep>vlt=func_ir.variable_lifetime<line_sep>blocks=func_ir.blocks.copy()<line_sep># find where with-contexts regions are withs=find_setupwiths(blocks)<line_sep>cfg=vlt.cfg<line_sep>_legalize_withs_cfg(withs cfg blocks)<line_sep># For each with-regions, mutate them according to # the kind of contextmanager sub_irs=[]<for_stmt>(blk_start blk_end) withs<block_start>body_blocks=[]<for_stmt>node _cfg_nodes_in_region(cfg blk_start blk_end)<block_start>body_blocks.append(node)<block_end>_legalize_with_head(blocks[blk_start])<line_sep># Find the contextmanager cmkind,extra=_get_with_contextmanager(func_ir blocks blk_start)<line_sep># Mutate the body and get new IR sub=cmkind.mutate_with_body(func_ir blocks blk_start blk_end body_blocks dispatcher_factory extra)<line_sep>sub_irs.append(sub)<block_end><if_stmt><not>sub_irs# Unchanged <block_start>new_ir=func_ir<block_end><else_stmt><block_start>new_ir=func_ir.derive(blocks)<block_end><return>new_ir sub_irs<block_end><def_stmt>_get_with_contextmanager func_ir blocks blk_start<block_start>"""Get the global object used for the context manager """<line_sep>_illegal_cm_msg="Illegal use of context-manager."<def_stmt>get_var_dfn var<block_start>"""Get the definition given a variable"""<line_sep><return>func_ir.get_definition(var)<block_end><def_stmt>get_ctxmgr_obj var_ref<block_start>"""Return the context-manager object and extra info. The extra contains the arguments if the context-manager is used as a call. """<line_sep># If the contextmanager used as a Call dfn=func_ir.get_definition(var_ref)<if_stmt>isinstance(dfn ir.Expr)<and>dfn.op<eq>'call'<block_start>args=[get_var_dfn(x)<for>x dfn.args]<line_sep>kws={k:get_var_dfn(v)<for>k,v dfn.kws}<line_sep>extra={'args':args 'kwargs':kws}<line_sep>var_ref=dfn.func<block_end><else_stmt><block_start>extra=<none><block_end>ctxobj=ir_utils.guard(ir_utils.find_global_value func_ir var_ref)<line_sep># check the contextmanager object <if_stmt>ctxobj<is>ir.UNDEFINED<block_start><raise>errors.CompilerError("Undefined variable used as context manager" loc=blocks[blk_start].loc )<block_end><if_stmt>ctxobj<is><none><block_start><raise>errors.CompilerError(_illegal_cm_msg loc=dfn.loc)<block_end><return>ctxobj extra<block_end># Scan the start of the with-region for the contextmanager <for_stmt>stmt blocks[blk_start].body<block_start><if_stmt>isinstance(stmt ir.EnterWith)<block_start>var_ref=stmt.contextmanager<line_sep>ctxobj,extra=get_ctxmgr_obj(var_ref)<if_stmt><not>hasattr(ctxobj 'mutate_with_body')<block_start><raise>errors.CompilerError("Unsupported context manager in use" loc=blocks[blk_start].loc )<block_end><return>ctxobj extra<block_end><block_end># No contextmanager found? <raise>errors.CompilerError("malformed with-context usage" loc=blocks[blk_start].loc )<block_end><def_stmt>_legalize_with_head blk<block_start>"""Given *blk*, the head block of the with-context, check that it doesn't do anything else. """<line_sep>counters=defaultdict(int)<for_stmt>stmt blk.body<block_start>counters[type(stmt)]<augadd>1<block_end><if_stmt>counters.pop(ir.EnterWith)<ne>1<block_start><raise>errors.CompilerError("with's head-block must have exactly 1 ENTER_WITH" loc=blk.loc )<block_end><if_stmt>counters.pop(ir.Jump)<ne>1<block_start><raise>errors.CompilerError("with's head-block must have exactly 1 JUMP" loc=blk.loc )<block_end># Can have any number of del counters.pop(ir.Del <none>)<line_sep># There MUST NOT be any other statements <if_stmt>counters<block_start><raise>errors.CompilerError("illegal statements in with's head-block" loc=blk.loc )<block_end><block_end><def_stmt>_cfg_nodes_in_region cfg region_begin region_end<block_start>"""Find the set of CFG nodes that are in the given region """<line_sep>region_nodes=set()<line_sep>stack=[region_begin]<while_stmt>stack<block_start>tos=stack.pop()<line_sep>succs,_=zip(*cfg.successors(tos))<line_sep>nodes=set([node<for>node succs<if>node<not><in>region_nodes<and>node<ne>region_end])<line_sep>stack.extend(nodes)<line_sep>region_nodes<augor>nodes<block_end><return>region_nodes<block_end><def_stmt>_legalize_withs_cfg withs cfg blocks<block_start>"""Verify the CFG of the with-context(s). """<line_sep>doms=cfg.dominators()<line_sep>postdoms=cfg.post_dominators()<line_sep># Verify that the with-context has no side-exits <for_stmt>s,e withs<block_start>loc=blocks[s].loc<if_stmt>s<not><in>doms[e]# Not sure what condition can trigger this error. <block_start>msg="Entry of with-context not dominating the exit."<line_sep><raise>errors.CompilerError(msg loc=loc)<block_end><if_stmt>e<not><in>postdoms[s]<block_start>msg=("Does not support with-context that contain branches "<concat>"(i.e. break/return/raise) that can leave the with-context. "<concat>"Details: exit of with-context not post-dominating the entry. ")<line_sep><raise>errors.CompilerError(msg loc=loc)<block_end><block_end><block_end><def_stmt>find_setupwiths blocks<block_start>"""Find all top-level with. Returns a list of ranges for the with-regions. """<def_stmt>find_ranges blocks<block_start><for_stmt>blk blocks.values()<block_start><for_stmt>ew blk.find_insts(ir.EnterWith)<block_start><yield>ew.begin ew.end<block_end><block_end><block_end><def_stmt>previously_occurred start known_ranges<block_start><for_stmt>a,b known_ranges<block_start><if_stmt>s<ge>a<and>s<l>b<block_start><return><true><block_end><block_end><return><false><block_end>known_ranges=[]<for_stmt>s,e sorted(find_ranges(blocks))<block_start><if_stmt><not>previously_occurred(s known_ranges)<block_start><if_stmt>e<not><in>blocks# this's possible if there's an exit path in the with-block <block_start><raise>errors.CompilerError('unsupported controlflow due to return/raise '<concat>'statements inside with block')<block_end><assert_stmt>s<in>blocks 'starting offset is not a label'<line_sep>known_ranges.append((s e))<block_end><block_end><return>known_ranges<block_end>
<import_stmt>frappe<def_stmt>execute <block_start>frappe.delete_doc_if_exists("DocType" "Web View")<line_sep>frappe.delete_doc_if_exists("DocType" "Web View Component")<line_sep>frappe.delete_doc_if_exists("DocType" "CSS Class")<block_end>
<import_stmt>numpy<as>np<import_from_stmt>skimage.transform resize<import_from_stmt>skimage measure<import_from_stmt>skimage.measure regionprops<class_stmt>OCROnObjects()<block_start><def_stmt>__init__ self license_plate<block_start>character_objects=self.identify_boundary_objects(license_plate)<line_sep>self.get_regions(character_objects license_plate)<block_end><def_stmt>identify_boundary_objects self a_license_plate<block_start>labelImage=measure.label(a_license_plate)<line_sep>character_dimensions=(0.4<times>a_license_plate.shape[0] 0.85<times>a_license_plate.shape[0] 0.04<times>a_license_plate.shape[1] 0.15<times>a_license_plate.shape[1])<line_sep>minHeight,maxHeight,minWidth,maxWidth=character_dimensions<line_sep>regionLists=regionprops(labelImage)<line_sep><return>regionLists<block_end><def_stmt>get_regions self character_objects a_license_plate<block_start>""" used to map out regions where the license plate charcters are the principle of connected component analysis and labelling were used Parameters: ----------- a_license_plate: 2D numpy binary image of the license plate Returns: -------- a dictionary containing the index fullscale: 3D array containig 2D array of each character columnsVal: 1D array the starting column of each character coordinates: """<line_sep>cord=[]<line_sep>counter=0<line_sep>column_list=[]<line_sep>character_dimensions=(0.35<times>a_license_plate.shape[0] 0.60<times>a_license_plate.shape[0] 0.05<times>a_license_plate.shape[1] 0.15<times>a_license_plate.shape[1])<line_sep>minHeight,maxHeight,minWidth,maxWidth=character_dimensions<for_stmt>regions character_objects<block_start>minimumRow,minimumCol,maximumRow,maximumCol=regions.bbox<line_sep>character_height=maximumRow-minimumRow<line_sep>character_width=maximumCol-minimumCol<line_sep>roi=a_license_plate[minimumRow:maximumRow minimumCol:maximumCol]<if_stmt>character_height<g>minHeight<and>character_height<l>maxHeight<and>character_width<g>minWidth<and>character_width<l>maxWidth<block_start><if_stmt>counter<eq>0<block_start>samples=resize(roi (20 20))<line_sep>cord.append(regions.bbox)<line_sep>counter<augadd>1<block_end><elif_stmt>counter<eq>1<block_start>roismall=resize(roi (20 20))<line_sep>samples=np.concatenate((samples[<none> : :] roismall[<none> : :]) axis=0)<line_sep>cord.append(regions.bbox)<line_sep>counter<augadd>1<block_end><else_stmt><block_start>roismall=resize(roi (20 20))<line_sep>samples=np.concatenate((samples[: : :] roismall[<none> : :]) axis=0)<line_sep>cord.append(regions.bbox)<block_end>column_list.append(minimumCol)<block_end><block_end><if_stmt>len(column_list)<eq>0<block_start>self.candidates={}<block_end><else_stmt><block_start>self.candidates={'fullscale':samples 'coordinates':np.array(cord) 'columnsVal':column_list}<block_end><return>self.candidates<block_end><block_end>
# This code is part of Qiskit. # # (C) Copyright IBM 2018, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ iris dataset """<import_stmt>numpy<as>np<import_from_stmt>sklearn datasets<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>sklearn.preprocessing StandardScaler MinMaxScaler<import_from_stmt>sklearn.decomposition PCA<import_from_stmt>qiskit.aqua MissingOptionalLibraryError<def_stmt>iris training_size test_size n plot_data=<false><block_start>""" returns iris dataset """<line_sep>class_labels=[r'A' r'B' r'C']<line_sep>data,target=datasets.load_iris(return_X_y=<true>)<line_sep>sample_train,sample_test,label_train,label_test=train_test_split(data target test_size=1 random_state=42)<line_sep># Now we standardize for gaussian around 0 with unit variance std_scale=StandardScaler().fit(sample_train)<line_sep>sample_train=std_scale.transform(sample_train)<line_sep>sample_test=std_scale.transform(sample_test)<line_sep># Now reduce number of features to number of qubits pca=PCA(n_components=n).fit(sample_train)<line_sep>sample_train=pca.transform(sample_train)<line_sep>sample_test=pca.transform(sample_test)<line_sep># Scale to the range (-1,+1) samples=np.append(sample_train sample_test axis=0)<line_sep>minmax_scale=MinMaxScaler((-1 1)).fit(samples)<line_sep>sample_train=minmax_scale.transform(sample_train)<line_sep>sample_test=minmax_scale.transform(sample_test)<line_sep># Pick training size number of samples from each distro training_input={key:(sample_train[label_train<eq>k :])[:training_size]<for>k,key enumerate(class_labels)}<line_sep>test_input={key:(sample_test[label_test<eq>k :])[:test_size]<for>k,key enumerate(class_labels)}<if_stmt>plot_data<block_start><try_stmt><block_start><import_stmt>matplotlib.pyplot<as>plt<block_end><except_stmt>ImportError<as>ex<block_start><raise>MissingOptionalLibraryError(libname='Matplotlib' name='iris' pip_install='pip install matplotlib')<from>ex<block_end><for_stmt>k range(0 3)<block_start>plt.scatter(sample_train[label_train<eq>k 0][:training_size] sample_train[label_train<eq>k 1][:training_size])<block_end>plt.title("Iris dataset")<line_sep>plt.show()<block_end><return>sample_train training_input test_input class_labels<block_end>
<import_from_stmt>paddle.vision.transforms ToTensor RandomHorizontalFlip RandomResizedCrop SaturationTransform Compose HueTransform BrightnessTransform ContrastTransform RandomCrop Normalize RandomRotation <import_from_stmt>paddle.vision.datasets Cifar100<import_from_stmt>paddle.io DataLoader<import_from_stmt>paddle.optimizer.lr CosineAnnealingDecay MultiStepDecay LinearWarmup<import_stmt>random<import_from_stmt>resnet20 *<import_stmt>paddle<line_sep># supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star <import_from_stmt>paddleslim.nas.ofa.convert_super Convert supernet<import_from_stmt>paddleslim.nas.ofa OFA RunConfig DistillConfig<import_from_stmt>paddleslim.nas.ofa.utils utils<line_sep>channel_list=[]<for_stmt>i range(1 21)<block_start><if_stmt>0<l>i<le>7# channel_list.append(random.choice([ 4, 8, 12, 16])) <block_start>channel_list.append(16)<block_end><elif_stmt>7<l>i<le>13# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32])) <block_start>channel_list.append(32)<block_end><elif_stmt>13<l>i<le>19# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64])) <block_start>channel_list.append(64)<block_end><else_stmt># channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64])) <block_start>channel_list.append(64)<block_end><block_end>net=ResNet20(100 channel_list)<line_sep>net2=ResNet20(100 channel_list)<line_sep>net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams'))<line_sep>channel_optional=[]<for_stmt>i range(0 23)<block_start><if_stmt>i<le>7<block_start>channel_optional.append([4 8 12 16])<line_sep># channel_optional.append([12, 16]) <block_end><elif_stmt>7<l>i<le>14<block_start>channel_optional.append([4 8 12 16 20 24 28 32])<line_sep># channel_optional.append([20, 24, 28, 32]) <block_end><elif_stmt>14<l>i<le>21<block_start>channel_optional.append([4 8 12 16 20 24 28 32 36 40 44 48 52 56 60 64])<line_sep># channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) <block_end><else_stmt><block_start>channel_optional.append([4 8 12 16 20 24 28 32 36 40 44 48 52 56 60 64])<line_sep># channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) <block_end><block_end>distill_config=DistillConfig(teacher_model=net2)<line_sep>sp_net_config=supernet(channel=channel_optional)<line_sep>sp_model=Convert(sp_net_config).convert(net)<line_sep>ofa_net=OFA(sp_model distill_config=distill_config)<line_sep>ofa_net.set_task('channel')<line_sep>model=paddle.Model(ofa_net)<line_sep>MAX_EPOCH=300<line_sep>LR=0.1<line_sep>WEIGHT_DECAY=5e-4<line_sep>MOMENTUM=0.9<line_sep>BATCH_SIZE=128<line_sep>CIFAR_MEAN=[0.5071 0.4865 0.4409]<line_sep>CIFAR_STD=[0.1942 0.1918 0.1958]<line_sep>DATA_FILE='./data/data76994/cifar-100-python.tar.gz'<line_sep>model.prepare(paddle.optimizer.Momentum(learning_rate=LinearWarmup(CosineAnnealingDecay(LR MAX_EPOCH) 2000 0. LR) momentum=MOMENTUM parameters=model.parameters() weight_decay=WEIGHT_DECAY) CrossEntropyLoss() paddle.metric.Accuracy(topk=(1 5)))<line_sep>transforms=Compose([RandomCrop(32 padding=4) RandomApply(BrightnessTransform(0.1)) RandomApply(ContrastTransform(0.1)) RandomHorizontalFlip() RandomRotation(15) ToArray() Normalize(CIFAR_MEAN CIFAR_STD) ])<line_sep>val_transforms=Compose([ToArray() Normalize(CIFAR_MEAN CIFAR_STD)])<line_sep>train_set=Cifar100(DATA_FILE mode='train' transform=transforms)<line_sep>test_set=Cifar100(DATA_FILE mode='test' transform=val_transforms)<line_sep>callbacks=[LRSchedulerM() callbacks.VisualDL('vis_logs/ofa_resnet20')]<line_sep>model.fit(train_set test_set epochs=MAX_EPOCH batch_size=BATCH_SIZE save_dir='checkpoints' save_freq=100 shuffle=<true> num_workers=4 verbose=1 callbacks=callbacks )<line_sep>
__version__='7.8.0'<line_sep>_optional_dependencies=[{'name':'CuPy' 'packages':['cupy-cuda120' 'cupy-cuda114' 'cupy-cuda113' 'cupy-cuda112' 'cupy-cuda111' 'cupy-cuda110' 'cupy-cuda102' 'cupy-cuda101' 'cupy-cuda100' 'cupy-cuda92' 'cupy-cuda91' 'cupy-cuda90' 'cupy-cuda80' 'cupy' ] 'specifier':'>=7.7.0,<8.0.0' 'help':'https://docs.cupy.dev/en/latest/install.html' } {'name':'iDeep' 'packages':['ideep4py' ] 'specifier':'>=2.0.0.post3, <2.1' 'help':'https://docs.chainer.org/en/latest/tips.html' } ]<line_sep>
<import_from_stmt>contextlib contextmanager<import_from_stmt>django.db DatabaseError<import_from_stmt>..core.tracing traced_atomic_transaction<line_sep>@contextmanager<def_stmt>transaction_with_commit_on_errors <block_start>"""Perform transaction and raise an error in any occurred."""<line_sep>error=<none><with_stmt>traced_atomic_transaction()<block_start><try_stmt><block_start><yield><block_end><except_stmt>DatabaseError<block_start><raise><block_end><except_stmt>Exception<as>e<block_start>error=e<block_end><block_end><if_stmt>error<block_start><raise>error<block_end><block_end>
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for mobilenet_v2."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>copy<import_stmt>tensorflow<as>tf<import_from_stmt>nets.mobilenet conv_blocks<as>ops<import_from_stmt>nets.mobilenet mobilenet<import_from_stmt>nets.mobilenet mobilenet_v2<line_sep>slim=tf.contrib.slim<def_stmt>find_ops optype<block_start>"""Find ops of a given type in graphdef or a graph. Args: optype: operation type (e.g. Conv2D) Returns: List of operations. """<line_sep>gd=tf.get_default_graph()<line_sep><return>[var<for>var gd.get_operations()<if>var.type<eq>optype]<block_end><class_stmt>MobilenetV2Test(tf.test.TestCase)<block_start><def_stmt>setUp self<block_start>tf.reset_default_graph()<block_end><def_stmt>testCreation self<block_start>spec=dict(mobilenet_v2.V2_DEF)<line_sep>_,ep=mobilenet.mobilenet(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=spec)<line_sep>num_convs=len(find_ops('Conv2D'))<line_sep># This is mostly a sanity test. No deep reason for these particular # constants. # # All but first 2 and last one have two convolutions, and there is one # extra conv that is not in the spec. (logits) self.assertEqual(num_convs len(spec['spec'])<times>2-2)<line_sep># Check that depthwise are exposed. <for_stmt>i range(2 17)<block_start>self.assertIn('layer_%d/depthwise_output'%i ep)<block_end><block_end><def_stmt>testCreationNoClasses self<block_start>spec=copy.deepcopy(mobilenet_v2.V2_DEF)<line_sep>net,ep=mobilenet.mobilenet(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=spec num_classes=<none>)<line_sep>self.assertIs(net ep['global_pool'])<block_end><def_stmt>testImageSizes self<block_start><for_stmt>input_size,output_size [(224 7) (192 6) (160 5) (128 4) (96 3)]<block_start>tf.reset_default_graph()<line_sep>_,ep=mobilenet_v2.mobilenet(tf.placeholder(tf.float32 (10 input_size input_size 3)))<line_sep>self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3] [output_size]<times>2)<block_end><block_end><def_stmt>testWithSplits self<block_start>spec=copy.deepcopy(mobilenet_v2.V2_DEF)<line_sep>spec['overrides']={(ops.expanded_conv ):dict(split_expansion=2) }<line_sep>_,_=mobilenet.mobilenet(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=spec)<line_sep>num_convs=len(find_ops('Conv2D'))<line_sep># All but 3 op has 3 conv operatore, the remainign 3 have one # and there is one unaccounted. self.assertEqual(num_convs len(spec['spec'])<times>3-5)<block_end><def_stmt>testWithOutputStride8 self<block_start>out,_=mobilenet.mobilenet_base(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=mobilenet_v2.V2_DEF output_stride=8 scope='MobilenetV2')<line_sep>self.assertEqual(out.get_shape().as_list()[1:3] [28 28])<block_end><def_stmt>testDivisibleBy self<block_start>tf.reset_default_graph()<line_sep>mobilenet_v2.mobilenet(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=mobilenet_v2.V2_DEF divisible_by=16 min_depth=32)<line_sep>s=[op.outputs[0].get_shape().as_list()[-1]<for>op find_ops('Conv2D')]<line_sep>s=set(s)<line_sep>self.assertSameElements([32 64 96 160 192 320 384 576 960 1280 1001] s)<block_end><def_stmt>testDivisibleByWithArgScope self<block_start>tf.reset_default_graph()<line_sep># Verifies that depth_multiplier arg scope actually works # if no default min_depth is provided. <with_stmt>slim.arg_scope((mobilenet.depth_multiplier ) min_depth=32)<block_start>mobilenet_v2.mobilenet(tf.placeholder(tf.float32 (10 224 224 2)) conv_defs=mobilenet_v2.V2_DEF depth_multiplier=0.1)<line_sep>s=[op.outputs[0].get_shape().as_list()[-1]<for>op find_ops('Conv2D')]<line_sep>s=set(s)<line_sep>self.assertSameElements(s [32 192 128 1001])<block_end><block_end><def_stmt>testFineGrained self<block_start>tf.reset_default_graph()<line_sep># Verifies that depth_multiplier arg scope actually works # if no default min_depth is provided. mobilenet_v2.mobilenet(tf.placeholder(tf.float32 (10 224 224 2)) conv_defs=mobilenet_v2.V2_DEF depth_multiplier=0.01 finegrain_classification_mode=<true>)<line_sep>s=[op.outputs[0].get_shape().as_list()[-1]<for>op find_ops('Conv2D')]<line_sep>s=set(s)<line_sep># All convolutions will be 8->48, except for the last one. self.assertSameElements(s [8 48 1001 1280])<block_end><def_stmt>testMobilenetBase self<block_start>tf.reset_default_graph()<line_sep># Verifies that mobilenet_base returns pre-pooling layer. <with_stmt>slim.arg_scope((mobilenet.depth_multiplier ) min_depth=32)<block_start>net,_=mobilenet_v2.mobilenet_base(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=mobilenet_v2.V2_DEF depth_multiplier=0.1)<line_sep>self.assertEqual(net.get_shape().as_list() [10 7 7 128])<block_end><block_end><def_stmt>testWithOutputStride16 self<block_start>tf.reset_default_graph()<line_sep>out,_=mobilenet.mobilenet_base(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=mobilenet_v2.V2_DEF output_stride=16)<line_sep>self.assertEqual(out.get_shape().as_list()[1:3] [14 14])<block_end><def_stmt>testWithOutputStride8AndExplicitPadding self<block_start>tf.reset_default_graph()<line_sep>out,_=mobilenet.mobilenet_base(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=mobilenet_v2.V2_DEF output_stride=8 use_explicit_padding=<true> scope='MobilenetV2')<line_sep>self.assertEqual(out.get_shape().as_list()[1:3] [28 28])<block_end><def_stmt>testWithOutputStride16AndExplicitPadding self<block_start>tf.reset_default_graph()<line_sep>out,_=mobilenet.mobilenet_base(tf.placeholder(tf.float32 (10 224 224 16)) conv_defs=mobilenet_v2.V2_DEF output_stride=16 use_explicit_padding=<true>)<line_sep>self.assertEqual(out.get_shape().as_list()[1:3] [14 14])<block_end><def_stmt>testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone self<block_start>sc=mobilenet.training_scope(is_training=<none>)<line_sep>self.assertNotIn('is_training' sc[slim.arg_scope_func_key(slim.batch_norm)])<block_end><def_stmt>testBatchNormScopeDoesHasIsTrainingWhenItsNotNone self<block_start>sc=mobilenet.training_scope(is_training=<false>)<line_sep>self.assertIn('is_training' sc[slim.arg_scope_func_key(slim.batch_norm)])<line_sep>sc=mobilenet.training_scope(is_training=<true>)<line_sep>self.assertIn('is_training' sc[slim.arg_scope_func_key(slim.batch_norm)])<line_sep>sc=mobilenet.training_scope()<line_sep>self.assertIn('is_training' sc[slim.arg_scope_func_key(slim.batch_norm)])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
# Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """VGG16 from https://arxiv.org/abs/1409.1556."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_stmt>core.config cfg<def_stmt>add_VGG16_conv5_body model<block_start>model.Conv('data' 'conv1_1' 3 64 3 pad=1 stride=1)<line_sep>model.Relu('conv1_1' 'conv1_1')<line_sep>model.Conv('conv1_1' 'conv1_2' 64 64 3 pad=1 stride=1)<line_sep>model.Relu('conv1_2' 'conv1_2')<line_sep>model.MaxPool('conv1_2' 'pool1' kernel=2 pad=0 stride=2)<line_sep>model.Conv('pool1' 'conv2_1' 64 128 3 pad=1 stride=1)<line_sep>model.Relu('conv2_1' 'conv2_1')<line_sep>model.Conv('conv2_1' 'conv2_2' 128 128 3 pad=1 stride=1)<line_sep>model.Relu('conv2_2' 'conv2_2')<line_sep>model.MaxPool('conv2_2' 'pool2' kernel=2 pad=0 stride=2)<line_sep>model.StopGradient('pool2' 'pool2')<line_sep>model.Conv('pool2' 'conv3_1' 128 256 3 pad=1 stride=1)<line_sep>model.Relu('conv3_1' 'conv3_1')<line_sep>model.Conv('conv3_1' 'conv3_2' 256 256 3 pad=1 stride=1)<line_sep>model.Relu('conv3_2' 'conv3_2')<line_sep>model.Conv('conv3_2' 'conv3_3' 256 256 3 pad=1 stride=1)<line_sep>model.Relu('conv3_3' 'conv3_3')<line_sep>model.MaxPool('conv3_3' 'pool3' kernel=2 pad=0 stride=2)<line_sep>model.Conv('pool3' 'conv4_1' 256 512 3 pad=1 stride=1)<line_sep>model.Relu('conv4_1' 'conv4_1')<line_sep>model.Conv('conv4_1' 'conv4_2' 512 512 3 pad=1 stride=1)<line_sep>model.Relu('conv4_2' 'conv4_2')<line_sep>model.Conv('conv4_2' 'conv4_3' 512 512 3 pad=1 stride=1)<line_sep>model.Relu('conv4_3' 'conv4_3')<line_sep>model.MaxPool('conv4_3' 'pool4' kernel=2 pad=0 stride=2)<line_sep>model.Conv('pool4' 'conv5_1' 512 512 3 pad=1 stride=1)<line_sep>model.Relu('conv5_1' 'conv5_1')<line_sep>model.Conv('conv5_1' 'conv5_2' 512 512 3 pad=1 stride=1)<line_sep>model.Relu('conv5_2' 'conv5_2')<line_sep>model.Conv('conv5_2' 'conv5_3' 512 512 3 pad=1 stride=1)<line_sep>blob_out=model.Relu('conv5_3' 'conv5_3')<line_sep><return>blob_out 512 1./16.<block_end><def_stmt>add_VGG16_roi_fc_head model blob_in dim_in spatial_scale<block_start>model.RoIFeatureTransform(blob_in 'pool5' blob_rois='rois' method=cfg.FAST_RCNN.ROI_XFORM_METHOD resolution=7 sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO spatial_scale=spatial_scale)<line_sep>model.FC('pool5' 'fc6' dim_in<times>7<times>7 4096)<line_sep>model.Relu('fc6' 'fc6')<line_sep>model.FC('fc6' 'fc7' 4096 4096)<line_sep>blob_out=model.Relu('fc7' 'fc7')<line_sep><return>blob_out 4096<block_end>