id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
500 | # Copyright (c) 2019 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.SimObject import SimObject
from m5.util.fdthelper import *
class Display(SimObject):
type = "Display"
cxx_header = "dev/arm/display.hh"
cxx_class = "gem5::Display"
clock_frequency = Param.Unsigned("clock-frequency property")
hactive = Param.Unsigned("hactive property")
vactive = Param.Unsigned("vactive property")
hfront_porch = Param.Unsigned("hfront-porch property")
hback_porch = Param.Unsigned("hback-porch property")
hsync_len = Param.Unsigned("hsync-len property")
vfront_porch = Param.Unsigned("vfront-porch property")
vback_porch = Param.Unsigned("vback-porch property")
vsync_len = Param.Unsigned("vsync-len property")
_endpoint_node = None
def endpointPhandle(self):
return "encoder_endpoint"
def endpointNode(self):
assert self._endpoint_node is not None
return self._endpoint_node
def generateDeviceTree(self, state):
# timing node
timing_node = FdtNode(self.METHOD_NAME())
timing_node.append(
FdtPropertyWords("clock-frequency", [self.clock_frequency])
)
timing_node.append(FdtPropertyWords("hactive", [self.hactive]))
timing_node.append(FdtPropertyWords("vactive", [self.vactive]))
timing_node.append(
FdtPropertyWords("hfront-porch", [self.hfront_porch])
)
timing_node.append(FdtPropertyWords("hback-porch", [self.hback_porch]))
timing_node.append(FdtPropertyWords("hsync-len", [self.hsync_len]))
timing_node.append(
FdtPropertyWords("vfront-porch", [self.vfront_porch])
)
timing_node.append(FdtPropertyWords("vback-porch", [self.vback_porch]))
timing_node.append(FdtPropertyWords("vsync-len", [self.vsync_len]))
timing_node.appendPhandle(self.METHOD_NAME())
# display timing node
dispt_node = FdtNode("display-timings")
dispt_node.append(
FdtPropertyWords("native-mode", state.phandle(self.METHOD_NAME()))
)
dispt_node.append(timing_node)
# endpoint node
endpoint_node = FdtNode("endpoint")
endpoint_node.appendPhandle(self.endpointPhandle())
# Assign node so that it can be retrieved
self._endpoint_node = endpoint_node
# port node
port_node = FdtNode("port")
port_node.append(endpoint_node)
# Virt-encoder
node = FdtNode("virt-encoder")
node.appendCompatible(["drm,virtual-encoder"])
node.append(dispt_node)
node.append(port_node)
yield node
class Display1080p(Display):
clock_frequency = 148500000
hactive = 1920
vactive = 1080
hfront_porch = 148
hback_porch = 88
hsync_len = 44
vfront_porch = 36
vback_porch = 4
vsync_len = 5
def METHOD_NAME(self):
return "timing_1080p60" | null |
501 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateLoadBalancerHTTPListenerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'CreateLoadBalancerHTTPListener','ens')
self.set_method('POST')
def METHOD_NAME(self): # String
return self.get_query_params().get('ListenerForward')
def set_ListenerForward(self, ListenerForward): # String
self.add_query_param('ListenerForward', ListenerForward)
def get_HealthCheckTimeout(self): # Integer
return self.get_query_params().get('HealthCheckTimeout')
def set_HealthCheckTimeout(self, HealthCheckTimeout): # Integer
self.add_query_param('HealthCheckTimeout', HealthCheckTimeout)
def get_XForwardedFor(self): # String
return self.get_query_params().get('XForwardedFor')
def set_XForwardedFor(self, XForwardedFor): # String
self.add_query_param('XForwardedFor', XForwardedFor)
def get_HealthCheckURI(self): # String
return self.get_query_params().get('HealthCheckURI')
def set_HealthCheckURI(self, HealthCheckURI): # String
self.add_query_param('HealthCheckURI', HealthCheckURI)
def get_HealthCheck(self): # String
return self.get_query_params().get('HealthCheck')
def set_HealthCheck(self, HealthCheck): # String
self.add_query_param('HealthCheck', HealthCheck)
def get_HealthCheckMethod(self): # String
return self.get_query_params().get('HealthCheckMethod')
def set_HealthCheckMethod(self, HealthCheckMethod): # String
self.add_query_param('HealthCheckMethod', HealthCheckMethod)
def get_HealthCheckDomain(self): # String
return self.get_query_params().get('HealthCheckDomain')
def set_HealthCheckDomain(self, HealthCheckDomain): # String
self.add_query_param('HealthCheckDomain', HealthCheckDomain)
def get_RequestTimeout(self): # Integer
return self.get_query_params().get('RequestTimeout')
def set_RequestTimeout(self, RequestTimeout): # Integer
self.add_query_param('RequestTimeout', RequestTimeout)
def get_LoadBalancerId(self): # String
return self.get_query_params().get('LoadBalancerId')
def set_LoadBalancerId(self, LoadBalancerId): # String
self.add_query_param('LoadBalancerId', LoadBalancerId)
def get_HealthCheckInterval(self): # Integer
return self.get_query_params().get('HealthCheckInterval')
def set_HealthCheckInterval(self, HealthCheckInterval): # Integer
self.add_query_param('HealthCheckInterval', HealthCheckInterval)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_UnhealthyThreshold(self): # Integer
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self, UnhealthyThreshold): # Integer
self.add_query_param('UnhealthyThreshold', UnhealthyThreshold)
def get_HealthyThreshold(self): # Integer
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self, HealthyThreshold): # Integer
self.add_query_param('HealthyThreshold', HealthyThreshold)
def get_Scheduler(self): # String
return self.get_query_params().get('Scheduler')
def set_Scheduler(self, Scheduler): # String
self.add_query_param('Scheduler', Scheduler)
def get_ForwardPort(self): # Integer
return self.get_query_params().get('ForwardPort')
def set_ForwardPort(self, ForwardPort): # Integer
self.add_query_param('ForwardPort', ForwardPort)
def get_ListenerPort(self): # Integer
return self.get_query_params().get('ListenerPort')
def set_ListenerPort(self, ListenerPort): # Integer
self.add_query_param('ListenerPort', ListenerPort)
def get_IdleTimeout(self): # Integer
return self.get_query_params().get('IdleTimeout')
def set_IdleTimeout(self, IdleTimeout): # Integer
self.add_query_param('IdleTimeout', IdleTimeout)
def get_HealthCheckConnectPort(self): # Integer
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self, HealthCheckConnectPort): # Integer
self.add_query_param('HealthCheckConnectPort', HealthCheckConnectPort)
def get_HealthCheckHttpCode(self): # String
return self.get_query_params().get('HealthCheckHttpCode')
def set_HealthCheckHttpCode(self, HealthCheckHttpCode): # String
self.add_query_param('HealthCheckHttpCode', HealthCheckHttpCode) | null |
502 | import asyncio
import json
import unittest
from typing import Awaitable, Optional
from unittest.mock import patch
import aiohttp
from aioresponses import aioresponses
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.web_assistant.auth import AuthBase
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, RESTResponse, WSRequest
from hummingbot.core.web_assistant.connections.rest_connection import RESTConnection
from hummingbot.core.web_assistant.rest_assistant import RESTAssistant
from hummingbot.core.web_assistant.rest_post_processors import RESTPostProcessorBase
from hummingbot.core.web_assistant.rest_pre_processors import RESTPreProcessorBase
class RESTAssistantTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.ev_loop = asyncio.get_event_loop()
def async_run_with_timeout(self, coroutine: Awaitable, timeout: int = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
@aioresponses()
def test_rest_assistant_call_with_pre_and_post_processing(self, mocked_api):
url = "https://www.test.com/url"
resp = {"one": 1}
pre_processor_ran = False
post_processor_ran = False
mocked_api.get(url, body=json.dumps(resp).encode())
class PreProcessor(RESTPreProcessorBase):
async def pre_process(self, request: RESTRequest) -> RESTRequest:
nonlocal pre_processor_ran
pre_processor_ran = True
return request
class PostProcessor(RESTPostProcessorBase):
async def METHOD_NAME(self, response: RESTResponse) -> RESTResponse:
nonlocal post_processor_ran
post_processor_ran = True
return response
pre_processors = [PreProcessor()]
post_processors = [PostProcessor()]
connection = RESTConnection(aiohttp.ClientSession())
assistant = RESTAssistant(
connection=connection,
throttler=AsyncThrottler(rate_limits=[]),
rest_pre_processors=pre_processors,
rest_post_processors=post_processors)
req = RESTRequest(method=RESTMethod.GET, url=url)
ret = self.async_run_with_timeout(assistant.call(req))
ret_json = self.async_run_with_timeout(ret.json())
self.assertEqual(resp, ret_json)
self.assertTrue(pre_processor_ran)
self.assertTrue(post_processor_ran)
@patch("hummingbot.core.web_assistant.connections.rest_connection.RESTConnection.call")
def test_rest_assistant_authenticates(self, mocked_call):
url = "https://www.test.com/url"
resp = {"one": 1}
call_request: Optional[RESTRequest] = None
auth_header = {"authenticated": True}
async def register_request_and_return(request: RESTRequest):
nonlocal call_request
call_request = request
return resp
mocked_call.side_effect = register_request_and_return
class AuthDummy(AuthBase):
async def rest_authenticate(self, request: RESTRequest) -> RESTRequest:
request.headers = auth_header
return request
async def ws_authenticate(self, request: WSRequest) -> WSRequest:
pass
connection = RESTConnection(aiohttp.ClientSession())
assistant = RESTAssistant(connection, throttler=AsyncThrottler(rate_limits=[]), auth=AuthDummy())
req = RESTRequest(method=RESTMethod.GET, url=url)
auth_req = RESTRequest(method=RESTMethod.GET, url=url, is_auth_required=True)
self.async_run_with_timeout(assistant.call(req))
self.assertIsNotNone(call_request)
self.assertIsNone(call_request.headers)
self.async_run_with_timeout(assistant.call(auth_req))
self.assertIsNotNone(call_request)
self.assertIsNotNone(call_request.headers)
self.assertEqual(call_request.headers, auth_header) | null |
503 | ##########################################################################
#
# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os, sys, traceback
import inspect, string
import warnings
import IECore
## Set the environment variable and the current LevelFilteredMessageHandler.
# Parameters:
# level: a string with the name of the log level as defined in MessageHandler.Level.
#
# This function sets the $IECORE_LOG_LEVEL environment variable, so child processes will inherit the log level.
# If the current message handler is also a LevelFilteredMessageHandler, this function pushes
# it from the stack and register the new one.
#
## \ingroup python
def setLogLevelByName( levelName ):
IECore.setLogLevel( IECore.MessageHandler.stringAsLevel( levelName ) )
## Set the environment variable and the current LevelFilteredMessageHandler.
# Parameters:
# level: MessageHandler.Level value.
#
# This function sets the $IECORE_LOG_LEVEL environment variable, so child processes will inherit the log level.
# If the current message handler is also a LevelFilteredMessageHandler, this function pushes
# it from the stack and register the new one.
## \ingroup python
def setLogLevel( level ):
assert( isinstance( level, IECore.MessageHandler.Level ) and level!=IECore.MessageHandler.Level.Invalid )
os.environ["IECORE_LOG_LEVEL"] = IECore.MessageHandler.levelAsString( level )
current = IECore.MessageHandler.currentHandler()
if not isinstance( current, IECore.LevelFilteredMessageHandler ) :
IECore.msg( IECore.Msg.Level.Warning, "IECore.setLogLevel", "Failed to set log level - current handler is not a LevelFilteredMessageHandler" )
return
current.setLevel( level )
IECore.debug("setLogLevel(", level, ")")
def __getCallStr(frame):
return frame.f_globals.get("__name__", frame.f_globals.get("__file__", "N/A"))
def __getCallContext(frame = None, withLineNumber = False):
if frame is None:
f = inspect.currentframe().f_back.f_back
else:
f = frame
callStr = __getCallStr(f)
if withLineNumber:
callStr += " #" + str(f.f_lineno)
return callStr
## Help function to track dificult errors.
# It prints the callstack giving the module name and the line number.
## \ingroup python
def showCallStack():
f = inspect.currentframe().f_back.f_back
index = 0
callstack = "Callstack:\n"
while not f is None:
callstack += "> " + str(index) + ": " + __getCallStr(f) + " #" + str(f.f_lineno) + "\n"
f = f.f_back
index += 1
IECore.Msg.output(IECore.Msg.Level.Debug, __getCallContext( withLineNumber = True ), callstack )
## Use this function to get information about the context where the exception happened.
# Returns a tuple of strings (location, stack trace) for the captured exception.
## \ingroup python
def exceptionInfo():
(exceptionType, exception, trace) = sys.exc_info()
etb = traceback.extract_tb(trace)
exceptionType = str(exceptionType.__name__) + ": " + str(exception)
exceptInfo = ""
for (module, line, function, location) in etb:
exceptInfo += " File " + str(module) + ", line " + str(line) + ", in " + str(function) + "\n> " + str(location) + "\n"
return ( __getCallContext( withLineNumber = True ), "Exception traceback:\n" + exceptInfo + exceptionType)
## Sends debug messages to the current message handler and appends a full description of the catched exception.
# Parameters:
# Any string or object. They are converted to string and separated by space.
## \ingroup python
def METHOD_NAME(*args):
# same as debug
stdStr = " ".join(map(str, args))
(exceptionType, exception, trace) = sys.exc_info()
etb = traceback.extract_tb(trace)
exceptionType = "> " + str(exceptionType.__name__) + ": " + str(exception)
exceptInfo = ""
for (module, line, function, location) in etb:
exceptInfo += "> File " + str(module) + ", line " + str(line) + ", in " + str(function) + "\n> " + str(location) + "\n"
IECore.Msg.output(IECore.Msg.Level.Debug, __getCallContext( withLineNumber = True ), "[EXCEPTION CAPTURED] " + stdStr + "\n> Exception traceback:\n" + exceptInfo + exceptionType)
## Sends debug messages to the current message handler.
# Every message include information about the module and line number from where this function was called.
# Parameters:
# Any string or object. They are converted to string and separated by space.
## \ingroup python
def debug(*args):
stdStr = " ".join(map(str, args))
IECore.Msg.output(IECore.Msg.Level.Debug, __getCallContext( withLineNumber = True ), stdStr )
# Sends warning messages to the current message handler.
# Parameters:
# Any string or object. They are converted to string and separated by space.
## \ingroup python
def warning(*args):
stdStr = " ".join(map(str, args))
IECore.Msg.output(IECore.Msg.Level.Warning, __getCallContext(), stdStr )
# Sends info messages to the current message handler.
# Parameters:
# Any string or object. They are converted to string and separated by space.
## \ingroup python
def info(*args):
stdStr = " ".join(map(str, args))
IECore.Msg.output(IECore.Msg.Level.Info, __getCallContext(), stdStr )
# Sends error messages to the current message handler.
# Parameters:
# Any string or object. They are converted to string and separated by space.
## \ingroup python
def error(*args):
stdStr = " ".join(map(str, args))
IECore.Msg.output(IECore.Msg.Level.Error, __getCallContext(), stdStr )
__all__ = [ "setLogLevelByName", "setLogLevel", "showCallStack",
"exceptionInfo", "debugException", "debug", "warning", "info", "error",
] | null |
504 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateTrafficMarkingPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateTrafficMarkingPolicy')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_TrafficMarkingPolicyDescription(self): # String
return self.get_query_params().get('TrafficMarkingPolicyDescription')
def set_TrafficMarkingPolicyDescription(self, TrafficMarkingPolicyDescription): # String
self.add_query_param('TrafficMarkingPolicyDescription', TrafficMarkingPolicyDescription)
def get_TrafficMarkingPolicyName(self): # String
return self.get_query_params().get('TrafficMarkingPolicyName')
def set_TrafficMarkingPolicyName(self, TrafficMarkingPolicyName): # String
self.add_query_param('TrafficMarkingPolicyName', TrafficMarkingPolicyName)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_TrafficMatchRuless(self): # RepeatList
return self.get_query_params().get('TrafficMatchRules')
def set_TrafficMatchRuless(self, TrafficMatchRules): # RepeatList
for depth1 in range(len(TrafficMatchRules)):
if TrafficMatchRules[depth1].get('DstPortRange') is not None:
for depth2 in range(len(TrafficMatchRules[depth1].get('DstPortRange'))):
self.add_query_param('TrafficMatchRules.' + str(depth1 + 1) + '.DstPortRange.' + str(depth2 + 1), TrafficMatchRules[depth1].get('DstPortRange')[depth2])
if TrafficMatchRules[depth1].get('MatchDscp') is not None:
self.add_query_param('TrafficMatchRules.' + str(depth1 + 1) + '.MatchDscp', TrafficMatchRules[depth1].get('MatchDscp'))
if TrafficMatchRules[depth1].get('Protocol') is not None:
self.add_query_param('TrafficMatchRules.' + str(depth1 + 1) + '.Protocol', TrafficMatchRules[depth1].get('Protocol'))
if TrafficMatchRules[depth1].get('TrafficMatchRuleDescription') is not None:
self.add_query_param('TrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleDescription', TrafficMatchRules[depth1].get('TrafficMatchRuleDescription'))
if TrafficMatchRules[depth1].get('SrcPortRange') is not None:
for depth2 in range(len(TrafficMatchRules[depth1].get('SrcPortRange'))):
self.add_query_param('TrafficMatchRules.' + str(depth1 + 1) + '.SrcPortRange.' + str(depth2 + 1), TrafficMatchRules[depth1].get('SrcPortRange')[depth2])
if TrafficMatchRules[depth1].get('DstCidr') is not None:
self.add_query_param('TrafficMatchRules.' + str(depth1 + 1) + '.DstCidr', TrafficMatchRules[depth1].get('DstCidr'))
if TrafficMatchRules[depth1].get('TrafficMatchRuleName') is not None:
self.add_query_param('TrafficMatchRules.' + str(depth1 + 1) + '.TrafficMatchRuleName', TrafficMatchRules[depth1].get('TrafficMatchRuleName'))
if TrafficMatchRules[depth1].get('SrcCidr') is not None:
self.add_query_param('TrafficMatchRules.' + str(depth1 + 1) + '.SrcCidr', TrafficMatchRules[depth1].get('SrcCidr'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def METHOD_NAME(self): # String
return self.get_query_params().get('TransitRouterId')
def set_TransitRouterId(self, TransitRouterId): # String
self.add_query_param('TransitRouterId', TransitRouterId)
def get_Priority(self): # Integer
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # Integer
self.add_query_param('Priority', Priority)
def get_MarkingDscp(self): # Integer
return self.get_query_params().get('MarkingDscp')
def set_MarkingDscp(self, MarkingDscp): # Integer
self.add_query_param('MarkingDscp', MarkingDscp) | null |
505 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknlb.endpoint import endpoint_data
class CreateServerGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Nlb', '2022-04-30', 'CreateServerGroup','nlb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ServerGroupName(self): # String
return self.get_body_params().get('ServerGroupName')
def set_ServerGroupName(self, ServerGroupName): # String
self.add_body_params('ServerGroupName', ServerGroupName)
def get_ClientToken(self): # String
return self.get_body_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_body_params('ClientToken', ClientToken)
def get_PreserveClientIpEnabled(self): # Boolean
return self.get_body_params().get('PreserveClientIpEnabled')
def set_PreserveClientIpEnabled(self, PreserveClientIpEnabled): # Boolean
self.add_body_params('PreserveClientIpEnabled', PreserveClientIpEnabled)
def get_HealthCheckConfig(self): # Struct
return self.get_body_params().get('HealthCheckConfig')
def set_HealthCheckConfig(self, HealthCheckConfig): # Struct
if HealthCheckConfig.get('HealthCheckEnabled') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckEnabled', HealthCheckConfig.get('HealthCheckEnabled'))
if HealthCheckConfig.get('HealthCheckType') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckType', HealthCheckConfig.get('HealthCheckType'))
if HealthCheckConfig.get('HealthCheckConnectPort') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckConnectPort', HealthCheckConfig.get('HealthCheckConnectPort'))
if HealthCheckConfig.get('HealthyThreshold') is not None:
self.add_body_params('HealthCheckConfig.HealthyThreshold', HealthCheckConfig.get('HealthyThreshold'))
if HealthCheckConfig.get('UnhealthyThreshold') is not None:
self.add_body_params('HealthCheckConfig.UnhealthyThreshold', HealthCheckConfig.get('UnhealthyThreshold'))
if HealthCheckConfig.get('HealthCheckConnectTimeout') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckConnectTimeout', HealthCheckConfig.get('HealthCheckConnectTimeout'))
if HealthCheckConfig.get('HealthCheckInterval') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckInterval', HealthCheckConfig.get('HealthCheckInterval'))
if HealthCheckConfig.get('HealthCheckDomain') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckDomain', HealthCheckConfig.get('HealthCheckDomain'))
if HealthCheckConfig.get('HealthCheckUrl') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckUrl', HealthCheckConfig.get('HealthCheckUrl'))
if HealthCheckConfig.get('HealthCheckHttpCode') is not None:
for index1, value1 in enumerate(HealthCheckConfig.get('HealthCheckHttpCode')):
self.add_body_params('HealthCheckConfig.HealthCheckHttpCode.' + str(index1 + 1), value1)
if HealthCheckConfig.get('HttpCheckMethod') is not None:
self.add_body_params('HealthCheckConfig.HttpCheckMethod', HealthCheckConfig.get('HttpCheckMethod'))
def get_AddressIPVersion(self): # String
return self.get_body_params().get('AddressIPVersion')
def set_AddressIPVersion(self, AddressIPVersion): # String
self.add_body_params('AddressIPVersion', AddressIPVersion)
def get_Scheduler(self): # String
return self.get_body_params().get('Scheduler')
def set_Scheduler(self, Scheduler): # String
self.add_body_params('Scheduler', Scheduler)
def get_ResourceGroupId(self): # String
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_Protocol(self): # String
return self.get_body_params().get('Protocol')
def set_Protocol(self, Protocol): # String
self.add_body_params('Protocol', Protocol)
def get_Tags(self): # RepeatList
return self.get_body_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_body_params('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_body_params('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_DryRun(self): # Boolean
return self.get_body_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_body_params('DryRun', DryRun)
def get_ConnectionDrainEnabled(self): # Boolean
return self.get_body_params().get('ConnectionDrainEnabled')
def set_ConnectionDrainEnabled(self, ConnectionDrainEnabled): # Boolean
self.add_body_params('ConnectionDrainEnabled', ConnectionDrainEnabled)
def METHOD_NAME(self): # Integer
return self.get_body_params().get('ConnectionDrainTimeout')
def set_ConnectionDrainTimeout(self, ConnectionDrainTimeout): # Integer
self.add_body_params('ConnectionDrainTimeout', ConnectionDrainTimeout)
def get_AnyPortEnabled(self): # Boolean
return self.get_body_params().get('AnyPortEnabled')
def set_AnyPortEnabled(self, AnyPortEnabled): # Boolean
self.add_body_params('AnyPortEnabled', AnyPortEnabled)
def get_ServerGroupType(self): # String
return self.get_body_params().get('ServerGroupType')
def set_ServerGroupType(self, ServerGroupType): # String
self.add_body_params('ServerGroupType', ServerGroupType)
def get_VpcId(self): # String
return self.get_body_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_body_params('VpcId', VpcId) | null |
506 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class CreateJobTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'CreateJobTemplate')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StderrRedirectPath(self): # String
return self.get_query_params().get('StderrRedirectPath')
def set_StderrRedirectPath(self, StderrRedirectPath): # String
self.add_query_param('StderrRedirectPath', StderrRedirectPath)
def get_ClockTime(self): # String
return self.get_query_params().get('ClockTime')
def set_ClockTime(self, ClockTime): # String
self.add_query_param('ClockTime', ClockTime)
def get_CommandLine(self): # String
return self.get_query_params().get('CommandLine')
def set_CommandLine(self, CommandLine): # String
self.add_query_param('CommandLine', CommandLine)
def get_ArrayRequest(self): # String
return self.get_query_params().get('ArrayRequest')
def set_ArrayRequest(self, ArrayRequest): # String
self.add_query_param('ArrayRequest', ArrayRequest)
def get_UnzipCmd(self): # String
return self.get_query_params().get('UnzipCmd')
def set_UnzipCmd(self, UnzipCmd): # String
self.add_query_param('UnzipCmd', UnzipCmd)
def get_PackagePath(self): # String
return self.get_query_params().get('PackagePath')
def set_PackagePath(self, PackagePath): # String
self.add_query_param('PackagePath', PackagePath)
def get_Mem(self): # String
return self.get_query_params().get('Mem')
def set_Mem(self, Mem): # String
self.add_query_param('Mem', Mem)
def get_StdoutRedirectPath(self): # String
return self.get_query_params().get('StdoutRedirectPath')
def set_StdoutRedirectPath(self, StdoutRedirectPath): # String
self.add_query_param('StdoutRedirectPath', StdoutRedirectPath)
def METHOD_NAME(self): # String
return self.get_query_params().get('Variables')
def set_Variables(self, Variables): # String
self.add_query_param('Variables', Variables)
def get_RunasUser(self): # String
return self.get_query_params().get('RunasUser')
def set_RunasUser(self, RunasUser): # String
self.add_query_param('RunasUser', RunasUser)
def get_ReRunable(self): # Boolean
return self.get_query_params().get('ReRunable')
def set_ReRunable(self, ReRunable): # Boolean
self.add_query_param('ReRunable', ReRunable)
def get_Thread(self): # Integer
return self.get_query_params().get('Thread')
def set_Thread(self, Thread): # Integer
self.add_query_param('Thread', Thread)
def get_Priority(self): # Integer
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # Integer
self.add_query_param('Priority', Priority)
def get_Gpu(self): # Integer
return self.get_query_params().get('Gpu')
def set_Gpu(self, Gpu): # Integer
self.add_query_param('Gpu', Gpu)
def get_WithUnzipCmd(self): # Boolean
return self.get_query_params().get('WithUnzipCmd')
def set_WithUnzipCmd(self, WithUnzipCmd): # Boolean
self.add_query_param('WithUnzipCmd', WithUnzipCmd)
def get_Node(self): # Integer
return self.get_query_params().get('Node')
def set_Node(self, Node): # Integer
self.add_query_param('Node', Node)
def get_Task(self): # Integer
return self.get_query_params().get('Task')
def set_Task(self, Task): # Integer
self.add_query_param('Task', Task)
def get_InputFileUrl(self): # String
return self.get_query_params().get('InputFileUrl')
def set_InputFileUrl(self, InputFileUrl): # String
self.add_query_param('InputFileUrl', InputFileUrl)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_Queue(self): # String
return self.get_query_params().get('Queue')
def set_Queue(self, Queue): # String
self.add_query_param('Queue', Queue) | null |
507 | from __future__ import annotations
import random
from typing import Literal, Optional
from htmltools import Tag, TagAttrs, TagAttrValue, TagChild, TagList, css, div
from htmltools import svg as svgtags
from htmltools import tags
# from ._color import get_color_contrast
from ._css_unit import CssUnit, as_css_padding, as_css_unit
from ._fill import as_fill_item, as_fillable_container
from ._htmldeps import sidebar_dependency
from ._utils import consolidate_attrs, trinary
class Sidebar:
def __init__(
self,
tag: Tag,
collapse_tag: Optional[Tag],
position: Literal["left", "right"],
open: Literal["desktop", "open", "closed", "always"],
width: CssUnit,
max_height_mobile: Optional[str | float],
color_fg: Optional[str],
color_bg: Optional[str],
):
self.tag = tag
self.collapse_tag = collapse_tag
self.position = position
self.open = open
self.width = width
self.max_height_mobile = max_height_mobile
self.color_fg = color_fg
self.color_bg = color_bg
# # This does not contain the `collapse_tag`
# # The `Sidebar` class should use it's fields, not this method
# def tagify(self) -> Tag:
# return self.tag.tagify()
def sidebar(
*args: TagChild | TagAttrs,
width: CssUnit = 250,
position: Literal["left", "right"] = "left",
open: Literal["desktop", "open", "closed", "always"] = "desktop",
id: Optional[str] = None,
title: TagChild | str = None,
bg: Optional[str] = None,
fg: Optional[str] = None,
class_: Optional[str] = None, # TODO-future; Consider using `**kwargs` instead
max_height_mobile: Optional[str | float] = None,
) -> Sidebar:
# See [this article](https://rstudio.github.io/bslib/articles/sidebars.html)
# to learn more.
# TODO-future; If color contrast is implemented. Docs for `bg` and `fg`:
# If only one of either is provided, an
# accessible contrasting color is provided for the opposite color, e.g. setting
# `bg` chooses an appropriate `fg` color.
# TODO-future; validate `open`, bg, fg, class_, max_height_mobile
if id is None and open != "always":
# but always provide id when collapsible for accessibility reasons
id = f"bslib-sidebar-{random.randint(1000, 10000)}"
# TODO-future; implement
# if fg is None and bg is not None:
# fg = get_color_contrast(bg)
# if bg is None and fg is not None:
# bg = get_color_contrast(fg)
if isinstance(title, (str, int, float)):
title = div(str(title), class_="sidebar-title")
collapse_tag = None
# Code
if open != "always":
collapse_tag = tags.button(
_collapse_icon(),
class_="collapse-toggle",
type="button",
title="Toggle sidebar",
aria_expanded=trinary(open in ["open", "desktop"]),
aria_controls=id,
)
tag = div(
div(
title,
{"class": "sidebar-content"},
*args,
),
{"class": "bslib-sidebar-input"} if id is not None else None,
{"class": "sidebar"},
id=id,
role="complementary",
class_=class_,
)
return Sidebar(
tag=tag,
collapse_tag=collapse_tag,
position=position,
open=open,
width=width,
max_height_mobile=max_height_mobile,
color_fg=fg,
color_bg=bg,
)
# TODO-maindocs; @add_example()
def layout_sidebar(
sidebar: Sidebar,
content: PanelMain,
fillable: bool = True,
fill: bool = True,
bg: Optional[str] = None,
fg: Optional[str] = None,
border: Optional[bool] = None,
border_radius: Optional[bool] = None,
border_color: Optional[str] = None,
gap: Optional[CssUnit] = None,
padding: Optional[CssUnit | list[CssUnit]] = None,
height: Optional[CssUnit] = None,
**kwargs: TagAttrValue,
) -> Tag:
attrs, _ = consolidate_attrs(**content.attrs, **kwargs)
main = div(
{
"role": "main",
"class": f"main{' bslib-gap-spacing' if fillable else ''}",
""
"style": css(
background_color=bg,
color=fg,
gap=as_css_unit(gap),
padding=as_css_padding(padding),
),
},
attrs,
content,
)
if fillable:
main = as_fillable_container(main)
max_height_mobile = sidebar.max_height_mobile or (
"250px" if height is None else "50%"
)
res = div(
{"class": "bslib-sidebar-layout"},
{"class": "sidebar-right"} if sidebar.position == "right" else None,
{"class": "sidebar-collapsed"} if sidebar.open == "closed" else None,
main,
sidebar.tag,
sidebar.collapse_tag,
sidebar_dependency(),
_sidebar_init_js(),
data_bslib_sidebar_init="true" if sidebar.open != "always" else None,
data_bslib_sidebar_open=sidebar.open,
data_bslib_sidebar_border=trinary(border),
data_bslib_sidebar_border_radius=trinary(border_radius),
style=css(
__bslib_sidebar_width=as_css_unit(sidebar.width),
__bslib_sidebar_bg=as_css_unit(sidebar.color_bg),
__bslib_sidebar_fg=as_css_unit(sidebar.color_fg),
__bs_card_border_color=border_color,
height=as_css_unit(height),
__bslib_sidebar_max_height_mobile=as_css_unit(max_height_mobile),
),
)
if fill:
res = as_fill_item(res)
return res
# _sidebar_func = sidebar
def _collapse_icon() -> Tag:
return tags.svg(
svgtags.path(
fill_rule="evenodd",
d="M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z",
),
xmlns="http://www.w3.org/2000/svg",
viewBox="0 0 16 16",
class_="bi bi-chevron-down collapse-icon",
style="fill:currentColor;",
aria_hidden="true",
role="img",
)
def _sidebar_init_js() -> Tag:
# Note: if we want to avoid inline `<script>` tags in the future for
# initialization code, we might be able to do so by turning the sidebar layout
# container into a web component
return tags.script(
{"data-bslib-sidebar-init": True},
"bslib.Sidebar.initCollapsibleAll()",
)
###################################################################
class PanelSidebar:
# Store `attrs` for `layout_sidebar()` to retrieve
def __init__(
self, *args: TagChild | TagAttrs, width: int = 4, **kwargs: TagAttrValue
) -> None:
self.args = args
self.kwargs = kwargs
self.width = width
def METHOD_NAME(self, position: Literal["left", "right"] = "left") -> Sidebar:
return sidebar(
*self.args,
width=f"{int(self.width / 12 * 100)}%",
position=position,
open="always",
**self.kwargs,
)
# Hopefully this is never used. But it makes it Tagifiable to allow us to not expose
# `Sidebar` and `PanelSidebar` classes
def tagify(self) -> Tag:
return self.METHOD_NAME().tag.tagify()
class PanelMain:
# Store `attrs` for `layout_sidebar()` to retrieve
attrs: TagAttrs
# Return `children` in `layout_sidebar()` via `.tagify()` method
children: list[TagChild]
def __init__(self, *, attrs: TagAttrs, children: list[TagChild]) -> None:
self.attrs = attrs
self.children = children
def tagify(self) -> TagList:
return TagList(self.children).tagify() | null |
508 | from functools import partial
from pytest import raises
from ..argument import Argument
from ..field import Field
from ..scalars import String
from ..structures import NonNull
from .utils import MyLazyType
class MyInstance:
value = "value"
value_func = staticmethod(lambda: "value_func")
def value_method(self):
return "value_method"
def test_field_basic():
MyType = object()
args = {"my arg": Argument(True)}
def resolver():
return None
deprecation_reason = "Deprecated now"
description = "My Field"
my_default = "something"
field = Field(
MyType,
name="name",
args=args,
resolver=resolver,
description=description,
deprecation_reason=deprecation_reason,
default_value=my_default,
)
assert field.name == "name"
assert field.args == args
assert field.resolver == resolver
assert field.deprecation_reason == deprecation_reason
assert field.description == description
assert field.default_value == my_default
def test_field_required():
MyType = object()
field = Field(MyType, required=True)
assert isinstance(field.type, NonNull)
assert field.type.of_type == MyType
def test_field_default_value_not_callable():
MyType = object()
try:
Field(MyType, default_value=lambda: True)
except AssertionError as e:
# substring comparison for py 2/3 compatibility
assert "The default value can not be a function but received" in str(e)
def test_field_source():
MyType = object()
field = Field(MyType, source="value")
assert field.resolver(MyInstance(), None) == MyInstance.value
def test_field_source_dict_or_attr():
MyType = object()
field = Field(MyType, source="value")
assert field.resolver(MyInstance(), None) == MyInstance.value
assert field.resolver({"value": MyInstance.value}, None) == MyInstance.value
def test_field_with_lazy_type():
MyType = object()
field = Field(lambda: MyType)
assert field.type == MyType
def test_field_with_lazy_partial_type():
MyType = object()
field = Field(partial(lambda: MyType))
assert field.type == MyType
def METHOD_NAME():
field = Field("graphene.types.tests.utils.MyLazyType")
assert field.type == MyLazyType
def test_field_not_source_and_resolver():
MyType = object()
with raises(Exception) as exc_info:
Field(MyType, source="value", resolver=lambda: None)
assert (
str(exc_info.value)
== "A Field cannot have a source and a resolver in at the same time."
)
def test_field_source_func():
MyType = object()
field = Field(MyType, source="value_func")
assert field.resolver(MyInstance(), None) == MyInstance.value_func()
def test_field_source_method():
MyType = object()
field = Field(MyType, source="value_method")
assert field.resolver(MyInstance(), None) == MyInstance().value_method()
def test_field_source_as_argument():
MyType = object()
field = Field(MyType, source=String())
assert "source" in field.args
assert field.args["source"].type == String
def test_field_name_as_argument():
MyType = object()
field = Field(MyType, name=String())
assert "name" in field.args
assert field.args["name"].type == String
def test_field_source_argument_as_kw():
MyType = object()
deprecation_reason = "deprecated"
field = Field(
MyType,
b=NonNull(True),
c=Argument(None, deprecation_reason=deprecation_reason),
a=NonNull(False),
)
assert list(field.args) == ["b", "c", "a"]
assert isinstance(field.args["b"], Argument)
assert isinstance(field.args["b"].type, NonNull)
assert field.args["b"].type.of_type is True
assert isinstance(field.args["c"], Argument)
assert field.args["c"].type is None
assert field.args["c"].deprecation_reason == deprecation_reason
assert isinstance(field.args["a"], Argument)
assert isinstance(field.args["a"].type, NonNull)
assert field.args["a"].type.of_type is False | null |
509 | from __future__ import annotations
import logging
import os
from xia2.Decorators.DecoratorFactory import DecoratorFactory
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
from xia2.lib.bits import transpose_loggraph
from xia2.Wrappers.CCP4.Ctruncate import Ctruncate
from xia2.Wrappers.XIA.FrenchWilson import FrenchWilson
logger = logging.getLogger("xia2.Wrappers.CCP4.Truncate")
def Truncate(DriverType=None):
"""A factory for TruncateWrapper classes."""
if PhilIndex.params.ccp4.truncate.program == "ctruncate":
return Ctruncate(DriverType)
elif PhilIndex.params.ccp4.truncate.program == "cctbx":
return FrenchWilson(DriverType)
DriverInstance = DriverFactory.Driver(DriverType)
CCP4DriverInstance = DecoratorFactory.Decorate(DriverInstance, "ccp4")
class TruncateWrapper(CCP4DriverInstance.__class__):
"""A wrapper for Truncate, using the CCP4-ified Driver."""
def __init__(self):
# generic things
CCP4DriverInstance.__class__.__init__(self)
self.set_executable(os.path.join(os.environ.get("CBIN", ""), "truncate"))
self._anomalous = False
self._nres = 0
# should we do wilson scaling?
self._wilson = True
self._b_factor = 0.0
self._moments = None
self._wilson_fit_grad = 0.0
self._wilson_fit_grad_sd = 0.0
self._wilson_fit_m = 0.0
self._wilson_fit_m_sd = 0.0
self._wilson_fit_range = None
# numbers of reflections in and out, and number of absences
# counted
self._nref_in = 0
self._nref_out = 0
self._nabsent = 0
self._xmlout = None
def set_anomalous(self, anomalous):
self._anomalous = anomalous
def set_wilson(self, wilson):
"""Set the use of Wilson scaling - if you set this to False
Wilson scaling will be switched off..."""
self._wilson = wilson
def get_xmlout(self):
return self._xmlout
def truncate(self):
"""Actually perform the truncation procedure."""
self.check_hklin()
self.check_hklout()
self.start()
if self._anomalous:
self.input("anomalous yes")
else:
self.input("anomalous no")
if self._nres:
self.input("nres %d" % self._nres)
if not self._wilson:
self.input("scale 1")
self.close_wait()
try:
self.check_for_errors()
self.check_ccp4_errors()
except RuntimeError:
try:
os.remove(self.get_hklout())
except Exception:
pass
raise RuntimeError("truncate failure")
# parse the output for interesting things, including the
# numbers of reflections in and out (isn't that a standard CCP4
# report?) and the number of absent reflections.
self._nref_in, self._nref_out = self.METHOD_NAME(
self.get_all_output()
)
# FIXME guess I should be reading this properly...
self._nabsent = self._nref_in - self._nref_out
for line in self.get_all_output():
if "Least squares straight line gives" in line:
list = line.replace("=", " ").split()
if "***" not in list[6]:
self._b_factor = float(list[6])
else:
logger.debug("no B factor available")
if "LSQ Line Gradient" in line:
self._wilson_fit_grad = float(line.split()[-1])
if self._wilson_fit_grad > 0:
logger.debug("Positive gradient but not much wilson plot")
if "Uncertainty in Gradient" in line:
self._wilson_fit_grad_sd = float(line.split()[-1])
if "X Intercept" in line:
self._wilson_fit_m = float(line.split()[-1])
if "Uncertainty in Intercept" in line:
self._wilson_fit_m_sd = float(line.split()[-1])
if "Resolution range" in line:
self._wilson_fit_range = list(map(float, line.split()[-2:]))
results = self.parse_ccp4_loggraph()
moments = transpose_loggraph(
results["Acentric Moments of E for k = 1,3,4,6,8"]
)
# keys we want in this are "Resln_Range" "1/resol^2" and
# MomentZ2. The last of these should be around two, but is
# likely to be a little different to this.
self._moments = moments
def get_b_factor(self):
return self._b_factor
def get_wilson_fit(self):
return (
self._wilson_fit_grad,
self._wilson_fit_grad_sd,
self._wilson_fit_m,
self._wilson_fit_m_sd,
)
def get_wilson_fit_range(self):
return self._wilson_fit_range
def get_moments(self):
return self._moments
def get_nref_in(self):
return self._nref_in
def get_nref_out(self):
return self._nref_out
def get_nabsent(self):
return self._nabsent
def METHOD_NAME(self, records):
"""Look to see how many reflections came in through HKLIN, and
how many went out again in HKLOUT."""
nref_in = 0
nref_out = 0
current_logical = None
for record in records:
if "Logical Name" in record:
current_logical = record.split()[2]
assert current_logical in ["HKLIN", "HKLOUT", "SYMINFO"]
if "Number of Reflections" in record:
if current_logical == "HKLIN":
nref_in = int(record.split()[-1])
elif current_logical == "HKLOUT":
nref_out = int(record.split()[-1])
return nref_in, nref_out
return TruncateWrapper() | null |
510 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateIPv6TranslatorEntryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateIPv6TranslatorEntry','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BackendIpv4Port(self): # Integer
return self.get_query_params().get('BackendIpv4Port')
def set_BackendIpv4Port(self, BackendIpv4Port): # Integer
self.add_query_param('BackendIpv4Port', BackendIpv4Port)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_EntryName(self): # String
return self.get_query_params().get('EntryName')
def set_EntryName(self, EntryName): # String
self.add_query_param('EntryName', EntryName)
def get_AclStatus(self): # String
return self.get_query_params().get('AclStatus')
def set_AclStatus(self, AclStatus): # String
self.add_query_param('AclStatus', AclStatus)
def get_EntryBandwidth(self): # Integer
return self.get_query_params().get('EntryBandwidth')
def set_EntryBandwidth(self, EntryBandwidth): # Integer
self.add_query_param('EntryBandwidth', EntryBandwidth)
def get_AclType(self): # String
return self.get_query_params().get('AclType')
def set_AclType(self, AclType): # String
self.add_query_param('AclType', AclType)
def get_AllocateIpv6Port(self): # Integer
return self.get_query_params().get('AllocateIpv6Port')
def set_AllocateIpv6Port(self, AllocateIpv6Port): # Integer
self.add_query_param('AllocateIpv6Port', AllocateIpv6Port)
def get_EntryDescription(self): # String
return self.get_query_params().get('EntryDescription')
def METHOD_NAME(self, EntryDescription): # String
self.add_query_param('EntryDescription', EntryDescription)
def get_BackendIpv4Addr(self): # String
return self.get_query_params().get('BackendIpv4Addr')
def set_BackendIpv4Addr(self, BackendIpv4Addr): # String
self.add_query_param('BackendIpv4Addr', BackendIpv4Addr)
def get_AclId(self): # String
return self.get_query_params().get('AclId')
def set_AclId(self, AclId): # String
self.add_query_param('AclId', AclId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransProtocol(self): # String
return self.get_query_params().get('TransProtocol')
def set_TransProtocol(self, TransProtocol): # String
self.add_query_param('TransProtocol', TransProtocol)
def get_Ipv6TranslatorId(self): # String
return self.get_query_params().get('Ipv6TranslatorId')
def set_Ipv6TranslatorId(self, Ipv6TranslatorId): # String
self.add_query_param('Ipv6TranslatorId', Ipv6TranslatorId) | null |
511 | # Copyright (c) 2004-2010 Mellanox Technologies LTD. All rights reserved.
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This software is available to you under a choice of one of two
# licenses. You may choose to be licensed under the terms of the GNU
# General Public License (GPL) Version 2, available from the file
# COPYING in the main directory of this source tree, or the
# OpenIB.org BSD license below:
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Author: Ahmed Awwad [email protected] Created: 2019-Jan
import binascii
class BinaryFile(object):
"""
Binary File class is dealing with r/w of binary files
"""
def __init__(self, file_path):
self._file_path = file_path
def read_byte(self, offset):
"""
Read 1B from the pci configuration
"""
return self.METHOD_NAME(1, offset)[0]
def read_word(self, offset):
"""
Read 2B for the pci configuration
"""
bytes_list = self.METHOD_NAME(2, offset)
if bytes_list != []:
byte0 = "{0:x}".format(bytes_list[0]).zfill(2)
byte1 = "{0:x}".format(bytes_list[1]).zfill(2)
return int("{0}{1}".format(byte1, byte0), 16)
else:
return None
def read_long(self, offset):
"""
Reads 4B from the pci configuration
"""
bytes_list = self.METHOD_NAME(4, offset)
if bytes_list != []:
byte0 = "{0:x}".format(bytes_list[0]).zfill(2)
byte1 = "{0:x}".format(bytes_list[1]).zfill(2)
byte2 = "{0:x}".format(bytes_list[2]).zfill(2)
byte3 = "{0:x}".format(bytes_list[3]).zfill(2)
return int("{0}{1}{2}{3}".format(byte3, byte2, byte1, byte0), 16)
else:
return None
def METHOD_NAME(self, size, offset=0, skip_offset_list=None):
"""
Read a given binary file and return the output as a string in hex representation
Skip-list is a list of addresses the function need to skip. The function will insert '00' on every address that is skipped
Example read(size=10, offset=0, skip_offset_list=[0x5]) will read from address 0x0 to address 0x9 and replace address 0x5 with '00'
"""
try:
with open(self._file_path, "rb") as f:
if skip_offset_list:
data = binascii.unhexlify("") # Initialization
zero_binary = binascii.unhexlify("00") # Initialization
offset_interval = self._get_read_intervals(skip_offset_list, offset, size)
for interval_start, interval_size in offset_interval:
if interval_size: # read interval
f.seek(interval_start)
data += f.METHOD_NAME(interval_size)
else: # add '00' to data
data += zero_binary
else:
f.seek(offset)
data = f.METHOD_NAME(size)
except Exception as err:
raise RuntimeError("Failed to parse file: {0} for reading. ERROR: {1}".format(self._file_path, err))
data = binascii.hexlify(data)
bytes_as_string = self.chunkstring(data, 2)
bytes_list = []
for byte in bytes_as_string:
if byte != "":
bytes_list.append(int(byte, 16))
else:
bytes_list.append(None)
return bytes_list
@staticmethod
def _get_read_intervals(skip_offset_list, offset, size):
"""
The method prepare the skip list in order to read a range and skipping on some (dangerous) offsets
The method return list of tuples
Example , offset:0, size:10, skiplist=[4, 5]
The function return [(0, 4),(4, 0), (5, 0) (6, 10)] when each item is (start, size)
when size is 0 it means we need to skip this offset
"""
# remove out of range skip addresses
for skip_offset in skip_offset_list:
if skip_offset not in range(offset, size + 1):
skip_offset_list.remove(skip_offset)
read_interval_set = set(skip_offset_list)
# Add offset, offset+size to skip_list
read_interval_set.add(offset - 1)
read_interval_set.add(offset + size)
read_interval_list = sorted(list(read_interval_set))
result = []
for start, end in zip(read_interval_list[:-1], read_interval_list[1:]):
interval_start = start + 1
interval_end = end
result.append((interval_start, interval_end - interval_start)) # interval
result.append((interval_end, 0)) # skip interval
result = result[:-1] # remove the last tuple (offset+size, 0)
return sorted(set(result))
def write(self, bytes_list, size, offset=0):
"""
A method to write data to a binary file
data is a list of values
"""
data_to_write = ""
for byte in bytes_list:
data_to_write += "{0:x}".format(byte).zfill(2)
if len(data_to_write) > size * 2:
raise RuntimeError("Failed to write data {0} invalid size".format(data_to_write))
bin_data = binascii.unhexlify(data_to_write) # Return the binary data represented by the hexadecimal string
try:
with open(self._file_path, "wb") as f:
f.seek(offset)
f.write(bin_data)
except Exception as e:
raise RuntimeError("Failed writing to a file: {0}. ERROR: {1}".format(self._file_path, e))
def chunkstring(self, string, length):
"""
Chunks the string to list of strings in the given length
"""
return (string[0 + i:length + i] for i in range(0, len(string), length)) | null |
512 | # -*- coding: utf-8 -*-
"""Handle app url related tests.
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from secrets import token_hex
from django.urls import resolve, reverse
from test_plus.test import TestCase
class AppUrlsTestCase(TestCase):
"""Define tests for app urls."""
def setUp(self):
self.user = self.make_user()
def test_robotstxt_reverse(self):
"""Test the robotstxt url and check the reverse."""
self.assertEqual(reverse('robotstxt'), '/robots.txt')
def test_robotstxt_resolve(self):
"""Test the robotstxt url and check the resolution."""
self.assertEqual(resolve('/robots.txt').view_name, 'robotstxt')
self.assertEqual(resolve('/robots.txt/').view_name, 'robotstxt')
def test_sitemap_reverse(self):
"""Test the sitemap url and check the reverse."""
self.assertEqual(reverse('django.contrib.sitemaps.views.index'), '/sitemap.xml')
def test_sitemap_resolve(self):
"""Test the sitemap url and check the resolution."""
self.assertEqual(resolve('/sitemap.xml').view_name, 'django.contrib.sitemaps.views.index')
def test_email_settings_reverse(self):
"""Test the email_settings url and check the reverse."""
priv_key = token_hex(16)[:29]
self.assertEqual(reverse('email_settings', args=(priv_key, )), f'/settings/email/{priv_key}')
def test_email_settings_resolve(self):
"""Test the email_settings url and check the resolution."""
self.assertEqual(resolve('/settings/email/').view_name, 'email_settings')
def test_leaderboard_reverse(self):
"""Test the leaderboard url and check the reverse."""
self.assertEqual(reverse('leaderboard', args=('quarterly_earners', )), '/leaderboard/quarterly_earners')
def test_leaderboard_resolve(self):
"""Test the leaderboard url and check the resolution."""
self.assertEqual(resolve('/leaderboard/').view_name, 'leaderboard')
def test__leaderboard_reverse(self):
"""Test the _leaderboard url and check the reverse."""
self.assertEqual(reverse('_leaderboard'), '/leaderboard')
def test__leaderboard_resolve(self):
"""Test the _leaderboard url and check the resolution."""
self.assertEqual(resolve('/leaderboard').view_name, '_leaderboard')
def test_stats_reverse(self):
"""Test the stats url and check the reverse."""
self.assertEqual(reverse('stats'), '/_administration/stats/')
def test_stats_resolve(self):
"""Test the stats url and check the resolution."""
self.assertEqual(resolve('/_administration/stats/').view_name, 'stats')
def METHOD_NAME(self):
"""Test the explorer url and check the reverse."""
self.assertEqual(reverse('explorer'), '/explorer')
def test_explorer_resolve(self):
"""Test the explorer url and check the resolution."""
self.assertEqual(resolve('/explorer').view_name, 'explorer')
self.assertEqual(resolve('/explorer/').view_name, 'explorer')
def test_new_bounty_reverse(self):
"""Test the new_bounty url and check the reverse."""
self.assertEqual(reverse('new_bounty'), '/bounty/new')
def test_new_bounty_resolve(self):
"""Test the new_bounty url and check the resolution."""
self.assertEqual(resolve('/bounty/new').view_name, 'new_bounty')
self.assertEqual(resolve('/bounty/new/').view_name, 'new_bounty')
def test_uniterested_reverse(self):
"""Test the uninterested url and check the reverse"""
self.assertEqual(reverse('uninterested', args=[1, 2]), '/actions/bounty/1/interest/2/uninterested/')
def test_uniterested_resolve(self):
"""Test the uninterested url and check the resolution"""
self.assertEqual(resolve('/actions/bounty/1/interest/2/uninterested/').view_name, 'uninterested') | null |
513 | #
# First-hop gateway transformation module
#
import typing
from box import Box
from . import _Module,get_effective_module_attribute
from ..utils import log, strings
from .. import data
from ..augment.nodes import reserve_id
from ..augment import devices
from ..data.validate import validate_attributes,must_be_string
def check_gw_protocol(gw: Box, path: str, topology: Box) -> typing.Any:
return must_be_string(
parent=gw,
key='protocol',
path=path,
module='gateway',
valid_values=topology.defaults.gateway.attributes.protocols
)
#
# Check whether a node supports all FHRP protocols configured on it
#
def check_protocol_support(node: Box, topology: Box) -> bool:
features = devices.get_device_features(node,topology.defaults)
proto_list = []
OK = True
for intf in node.interfaces: # Iterate over interfaces
if not 'gateway' in intf: # ... and skip interfaces without FHRP
continue
gw_proto = intf.gateway.protocol # We're asuming someone else did a sanity check on this value
if gw_proto in proto_list: # Already checked?
continue
proto_list.append(gw_proto)
if not gw_proto in features.gateway.protocol:
OK = False
log.error(
f'Node {node.name} ({node.device}) does not support gateway protocol {gw_proto}',
log.IncorrectValue,
'gateway')
return OK
#
# Remove unicast IPv4 addresses from interfaces that use 'anycast' gateway protocol
# and have 'gateway.anycast.unicast' set to False
#
def cleanup_unicast_ip(node: Box) -> None:
for intf in node.interfaces:
if not intf.get('gateway',False): # This interface not using FHRP or FHRP is disabled
continue
if intf.gateway.protocol != 'anycast': # Leave non-anycast FHRP implementations alone, they need node IP addresses
continue
if intf.get('gateway.anycast.unicast',None) is False: # Are we forbidden to use unicast IP addresses together with anycast ones?
intf.pop('ipv4',None) # No unicast with anycast ==> pop the address
#
# Default settings copied onto individual links have parameters for every known FHRP protocol.
# We don't need those parameters on every interface -- this function cleans up unusud gateway protocol
# parameters from interfaces and returns a list of active protocols so we know what to clean on the
# node level.
def cleanup_protocol_parameters(node: Box,topology: Box) -> list:
active_proto: list = []
proto_list = topology.defaults.gateway.attributes.protocols # List of known FHRP protocols
for intf in node.interfaces: # Iterate over interfaces
if not 'gateway' in intf: # ... and skip interfaces without FHRP
continue
gw_proto = intf.gateway.protocol # We're asuming someone else did a sanity check on this value
if not gw_proto in active_proto: # Add the current protocol to the list of active protocols
active_proto.append(gw_proto)
for k in list(intf.gateway): # Now iterate over all keywords in interface gateway settings
if k != gw_proto and k in proto_list: # ... found FHRP protocol that is NOT the active protocol
intf.gateway.pop(k,None) # ... useless, pop it
return active_proto
class FHRP(_Module):
def module_init(self, topology: Box) -> None:
gw = data.get_global_settings(topology,'gateway')
if not gw or gw is None:
log.error(
f'Global/default gateway parameters are missing. We need at least a gateway ID',
log.IncorrectType,
'gateway')
return
check_gw_protocol(gw,'topology.gateway',topology)
if not data.is_true_int(gw.id):
log.error(
f'Global/default gateway.id parameter is missing or not integer',
log.IncorrectType,
'gateway')
return
if gw.id > 0:
reserve_id(gw.id)
def METHOD_NAME(self, link: Box, topology: Box) -> None:
if not 'gateway' in link:
return
global_gateway = data.get_global_settings(topology,'gateway')
if not global_gateway: # pragma: no cover
return # We know (from init) that we have global parameters. This is just to keep mypy happy
if link.gateway is True: # We just want to do FHRP on the link ==> take global parameters
link.gateway = global_gateway
elif link.gateway is False: # We DEFINITELY don't want FHRP on this link ==> remove it and move on
link.pop('gateway',None)
return
else: # Otherwise merge global defaults with links settings (because we usually don't do that)
check_gw_protocol(link.gateway,f'{link._linkname}',topology)
link.gateway = global_gateway + link.gateway
for k in ('id','protocol'):
if not k in link.gateway or not link.gateway[k]:
log.error(
f'Gateway attribute {k} is missing in {link._linkname}\n' + \
strings.extra_data_printout(strings.format_structured_dict(link)),
log.MissingValue,
'gateway')
return
if not data.is_true_int(link.gateway.id):
log.error(
f'Gateway.id parameter in {link._linkname} must be an integer\n' + \
strings.extra_data_printout(strings.format_structured_dict(link)),
log.IncorrectType,
'gateway')
return
if link.gateway.id == -1:
log.error(
f'Cannot use -1 as the gateway ID in {link._linkname} -- that would be the broadcast address\n' + \
strings.extra_data_printout(strings.format_structured_dict(link)),
log.IncorrectValue,
'gateway')
def node_post_transform(self, node: Box, topology: Box) -> None:
if not check_protocol_support(node,topology):
return
cleanup_unicast_ip(node)
active_proto = cleanup_protocol_parameters(node,topology) # Cleanup interface parameters and get a list of active protocols
if 'gateway' in node:
for k in list(node.gateway): # Iterate over node-level gateway parameters
if not k in active_proto: # Not a parameter for a FHRP active on this node?
node.gateway.pop(k,None) # ... zap it! | null |
514 | # Copyright 2021 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields
from odoo.addons.shopinvader.tests.common import CommonCase
class TestShopinvaderPos(CommonCase):
"""
Tests for shopinvader.pos.service
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
cls.PosOrder = cls.env["pos.order"]
cls.partner = cls.env.ref("base.res_partner_2")
cls.pricelist = cls.env.ref("product.list0")
cls.pick_type_out = cls.env["stock.picking.type"].search(
[("code", "=", "outgoing")], limit=1
)
cls.product1 = cls.env.ref("product.product_product_4")
cls.product2 = cls.env.ref("product.product_product_2")
cls.pos_config = cls.env["pos.config"].create(
{"name": "Test POS", "picking_type_id": cls.pick_type_out.id}
)
cls.pos_config.open_session_cb()
amount_base = 1 * 100 + 12 * 30
amount_tax = amount_base * 0.21
amount_total = amount_base + amount_tax
cls.pos_values = {
"partner_id": cls.partner.id,
"pricelist_id": cls.pricelist.id,
"session_id": cls.pos_config.current_session_id.id,
"amount_tax": amount_tax,
"amount_total": amount_total,
"amount_paid": 0,
"amount_return": 0,
"lines": [
(
0,
False,
{
"name": "Test line 1",
"qty": 1,
"price_unit": 100,
"product_id": cls.product1.id,
"price_subtotal": 1 * 100,
"price_subtotal_incl": 1 * 100 * 1.21,
},
),
(
0,
False,
{
"name": "Test line 2",
"qty": 12,
"price_unit": 30,
"product_id": cls.product2.id,
"price_subtotal": 12 * 30,
"price_subtotal_incl": 12 * 30 * 1.21,
},
),
],
}
cls.pos_order1 = cls.PosOrder.create(cls.pos_values)
cls.pos_order2 = cls.PosOrder.create(cls.pos_values)
cls.pos_order1.write({"state": "done"})
cls.pos_order2.write({"state": "done"})
def setUp(self):
super().setUp()
usage = "point_of_sale"
with self.work_on_services(
partner=self.partner, shopinvader_session=self.shopinvader_session
) as work:
self.service = work.component(usage=usage)
with self.work_on_services(
shopinvader_session=self.shopinvader_session
) as work:
self.service_guest = work.component(usage=usage)
def _build_json(self, pos_order):
result = {
"pos_id": pos_order.id,
"amount_untaxed": pos_order.amount_total - pos_order.amount_tax,
"name": pos_order.name,
"reference": pos_order.pos_reference or None,
"amount_tax": pos_order.amount_tax,
"date": fields.Datetime.to_string(pos_order.date_order),
"partner": {
"partner_id": pos_order.partner_id.id,
"name": pos_order.partner_id.name,
},
"amount_total": pos_order.amount_total,
}
return result
def METHOD_NAME(self):
result = self.service.dispatch("search")
result_data = result.get("data", {})
pos_orders = self.pos_order2 | self.pos_order1
expected_result = [self._build_json(pos_order) for pos_order in pos_orders]
for result, expected in zip(result_data, expected_result):
self.assertDictEqual(result, expected)
def test_get1(self):
pos_order = self.pos_order1
result = self.service.dispatch("get", pos_order.id)
result_data = result.get("data", {})
expected_result = self._build_json(pos_order)
self.assertDictEqual(result_data, expected_result) | null |
515 | import pytest
from api.base.settings import REST_FRAMEWORK, LATEST_VERSIONS
valid_url_path_version = '2.0'
valid_header_version = '2.1'
valid_query_parameter_version = '2.2'
invalid_url_path_version = '1.0'
invalid_header_version = '1.0.1'
invalid_query_parameter_version = '1.1'
invalid_url_path_version_url = '/v1/'
valid_url_path_version_url = '/v2/'
invalid_query_parameter_version_url = '/v2/?version={}'.format(
invalid_query_parameter_version
)
valid_query_parameter_version_url = '/v2/?version={}'.format(
valid_query_parameter_version
)
@pytest.mark.django_db
class TestBaseVersioning:
def test_url_path_version(self, app):
res = app.get(valid_url_path_version_url)
assert res.status_code == 200
assert res.json['meta']['version'] == valid_url_path_version
def test_header_version(self, app):
headers = {
'accept': 'application/vnd.api+json;version={}'.format(valid_header_version)
}
res = app.get(valid_url_path_version_url, headers=headers)
assert res.status_code == 200
assert res.json['meta']['version'] == valid_header_version
def test_query_param_version(self, app):
res = app.get(valid_query_parameter_version_url)
assert res.status_code == 200
assert res.json['meta']['version'] == valid_query_parameter_version
def test_url_path_version_not_in_allowed_versions(self, app):
res = app.get(invalid_url_path_version_url, expect_errors=True)
assert res.status_code == 404
def test_header_version_not_in_allowed_versions(self, app):
headers = {
'accept': 'application/vnd.api+json;version={}'.format(invalid_header_version)
}
res = app.get(
valid_url_path_version_url,
headers=headers,
expect_errors=True
)
assert res.status_code == 406
assert res.json['errors'][0]['detail'] == 'Invalid version in "Accept" header.'
def test_query_param_version_not_in_allowed_versions(self, app):
res = app.get(invalid_query_parameter_version_url, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Invalid version in query parameter.'
def test_header_version_and_query_parameter_version_match(self, app):
headers = {
'accept': 'application/vnd.api+json;version={}'.format(valid_header_version)
}
url = '/v2/?version={}'.format(valid_header_version)
res = app.get(url, headers=headers)
assert res.status_code == 200
assert res.json['meta']['version'] == valid_header_version
def test_header_version_and_query_parameter_version_mismatch(self, app):
headers = {
'accept': 'application/vnd.api+json;version={}'.format(valid_header_version)}
url = '/v2/?version={}'.format(valid_query_parameter_version)
res = app.get(url, headers=headers, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'Version {} specified in "Accept" header does not match version {} specified in query parameter'.format(
valid_header_version, valid_query_parameter_version
)
def test_header_version_bad_format(self, app):
headers = {
'accept': 'application/vnd.api+json;version=not_at_all_a_version'
}
res = app.get(
valid_url_path_version_url,
headers=headers,
expect_errors=True
)
assert res.status_code == 406
assert res.json['errors'][0]['detail'] == 'Invalid version in "Accept" header.'
def test_query_version_bad_format(self, app):
url = '/v2/?version=not_at_all_a_version'
res = app.get(url, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Invalid version in query parameter.'
def test_query_version_latest_v2(self, app):
url = '/v2/?version=latest'
res = app.get(url)
assert res.status_code == 200
assert res.json['meta']['version'] == LATEST_VERSIONS[2]
def test_header_version_latest(self, app):
headers = {'accept': 'application/vnd.api+json;version=latest'}
res = app.get(valid_url_path_version_url, headers=headers)
assert res.status_code == 200
assert res.json['meta']['version'] == LATEST_VERSIONS[2]
def test_browsable_api_defaults_to_latest(self, app):
url = '/v2/?format=api'
res = app.get(url)
assert res.status_code == 200
assert '"version": "{}"'.format(
LATEST_VERSIONS[2]
) in res.body.decode()
def test_browsable_api_query_version(self, app):
url = '/v2/?format=api&version=2.5'
res = app.get(url)
assert res.status_code == 200
assert b'"version": "2.5"' in res.body
def METHOD_NAME(self, app):
url = '/v2/?format=json'
res = app.get(url)
assert res.status_code == 200
assert res.json['meta']['version'] == REST_FRAMEWORK['DEFAULT_VERSION']
def test_json_api_defaults_to_default(self, app):
url = '/v2/?format=jsonapi'
res = app.get(url)
assert res.status_code == 200
assert res.json['meta']['version'] == REST_FRAMEWORK['DEFAULT_VERSION'] | null |
516 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmartag.endpoint import endpoint_data
class CreateFlowLogRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Smartag', '2018-03-13', 'CreateFlowLog','smartag')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_NetflowVersion(self): # String
return self.get_query_params().get('NetflowVersion')
def set_NetflowVersion(self, NetflowVersion): # String
self.add_query_param('NetflowVersion', NetflowVersion)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_InactiveAging(self): # Integer
return self.get_query_params().get('InactiveAging')
def set_InactiveAging(self, InactiveAging): # Integer
self.add_query_param('InactiveAging', InactiveAging)
def get_SlsRegionId(self): # String
return self.get_query_params().get('SlsRegionId')
def set_SlsRegionId(self, SlsRegionId): # String
self.add_query_param('SlsRegionId', SlsRegionId)
def get_ActiveAging(self): # Integer
return self.get_query_params().get('ActiveAging')
def set_ActiveAging(self, ActiveAging): # Integer
self.add_query_param('ActiveAging', ActiveAging)
def get_OutputType(self): # String
return self.get_query_params().get('OutputType')
def set_OutputType(self, OutputType): # String
self.add_query_param('OutputType', OutputType)
def get_ProjectName(self): # String
return self.get_query_params().get('ProjectName')
def set_ProjectName(self, ProjectName): # String
self.add_query_param('ProjectName', ProjectName)
def get_LogstoreName(self): # String
return self.get_query_params().get('LogstoreName')
def set_LogstoreName(self, LogstoreName): # String
self.add_query_param('LogstoreName', LogstoreName)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def METHOD_NAME(self): # Integer
return self.get_query_params().get('NetflowServerPort')
def set_NetflowServerPort(self, NetflowServerPort): # Integer
self.add_query_param('NetflowServerPort', NetflowServerPort)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_NetflowServerIp(self): # String
return self.get_query_params().get('NetflowServerIp')
def set_NetflowServerIp(self, NetflowServerIp): # String
self.add_query_param('NetflowServerIp', NetflowServerIp)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name) | null |
517 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudapi.endpoint import endpoint_data
class ModifyApiGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CloudAPI', '2016-07-14', 'ModifyApiGroup','apigateway')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DefaultDomain(self): # String
return self.get_query_params().get('DefaultDomain')
def set_DefaultDomain(self, DefaultDomain): # String
self.add_query_param('DefaultDomain', DefaultDomain)
def get_BasePath(self): # String
return self.get_query_params().get('BasePath')
def set_BasePath(self, BasePath): # String
self.add_query_param('BasePath', BasePath)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_RpcPattern(self): # String
return self.get_query_params().get('RpcPattern')
def set_RpcPattern(self, RpcPattern): # String
self.add_query_param('RpcPattern', RpcPattern)
def get_UserLogConfig(self): # String
return self.get_query_params().get('UserLogConfig')
def set_UserLogConfig(self, UserLogConfig): # String
self.add_query_param('UserLogConfig', UserLogConfig)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_CustomerConfigs(self): # String
return self.get_query_params().get('CustomerConfigs')
def set_CustomerConfigs(self, CustomerConfigs): # String
self.add_query_param('CustomerConfigs', CustomerConfigs)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_GroupName(self): # String
return self.get_query_params().get('GroupName')
def set_GroupName(self, GroupName): # String
self.add_query_param('GroupName', GroupName)
def get_PassthroughHeaders(self): # String
return self.get_query_params().get('PassthroughHeaders')
def set_PassthroughHeaders(self, PassthroughHeaders): # String
self.add_query_param('PassthroughHeaders', PassthroughHeaders)
def METHOD_NAME(self): # String
return self.get_query_params().get('CompatibleFlags')
def set_CompatibleFlags(self, CompatibleFlags): # String
self.add_query_param('CompatibleFlags', CompatibleFlags)
def get_CustomTraceConfig(self): # String
return self.get_query_params().get('CustomTraceConfig')
def set_CustomTraceConfig(self, CustomTraceConfig): # String
self.add_query_param('CustomTraceConfig', CustomTraceConfig) | null |
518 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdyplsapi.endpoint import endpoint_data
class BindAxnExtensionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dyplsapi', '2017-05-25', 'BindAxnExtension')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Extension(self): # String
return self.get_query_params().get('Extension')
def set_Extension(self, Extension): # String
self.add_query_param('Extension', Extension)
def get_CallDisplayType(self): # Integer
return self.get_query_params().get('CallDisplayType')
def set_CallDisplayType(self, CallDisplayType): # Integer
self.add_query_param('CallDisplayType', CallDisplayType)
def get_PhoneNoX(self): # String
return self.get_query_params().get('PhoneNoX')
def set_PhoneNoX(self, PhoneNoX): # String
self.add_query_param('PhoneNoX', PhoneNoX)
def get_RingConfig(self): # String
return self.get_query_params().get('RingConfig')
def set_RingConfig(self, RingConfig): # String
self.add_query_param('RingConfig', RingConfig)
def get_ASRStatus(self): # Boolean
return self.get_query_params().get('ASRStatus')
def METHOD_NAME(self, ASRStatus): # Boolean
self.add_query_param('ASRStatus', ASRStatus)
def get_PhoneNoB(self): # String
return self.get_query_params().get('PhoneNoB')
def set_PhoneNoB(self, PhoneNoB): # String
self.add_query_param('PhoneNoB', PhoneNoB)
def get_PhoneNoA(self): # String
return self.get_query_params().get('PhoneNoA')
def set_PhoneNoA(self, PhoneNoA): # String
self.add_query_param('PhoneNoA', PhoneNoA)
def get_ExpectCity(self): # String
return self.get_query_params().get('ExpectCity')
def set_ExpectCity(self, ExpectCity): # String
self.add_query_param('ExpectCity', ExpectCity)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_OutOrderId(self): # String
return self.get_query_params().get('OutOrderId')
def set_OutOrderId(self, OutOrderId): # String
self.add_query_param('OutOrderId', OutOrderId)
def get_PoolKey(self): # String
return self.get_query_params().get('PoolKey')
def set_PoolKey(self, PoolKey): # String
self.add_query_param('PoolKey', PoolKey)
def get_Expiration(self): # String
return self.get_query_params().get('Expiration')
def set_Expiration(self, Expiration): # String
self.add_query_param('Expiration', Expiration)
def get_IsRecordingEnabled(self): # Boolean
return self.get_query_params().get('IsRecordingEnabled')
def set_IsRecordingEnabled(self, IsRecordingEnabled): # Boolean
self.add_query_param('IsRecordingEnabled', IsRecordingEnabled)
def get_OutId(self): # String
return self.get_query_params().get('OutId')
def set_OutId(self, OutId): # String
self.add_query_param('OutId', OutId)
def get_ASRModelId(self): # String
return self.get_query_params().get('ASRModelId')
def set_ASRModelId(self, ASRModelId): # String
self.add_query_param('ASRModelId', ASRModelId)
def get_CallRestrict(self): # String
return self.get_query_params().get('CallRestrict')
def set_CallRestrict(self, CallRestrict): # String
self.add_query_param('CallRestrict', CallRestrict) | null |
519 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeDedicatedHostClustersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeDedicatedHostClusters','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def METHOD_NAME(self): # String
return self.get_query_params().get('DedicatedHostClusterName')
def set_DedicatedHostClusterName(self, DedicatedHostClusterName): # String
self.add_query_param('DedicatedHostClusterName', DedicatedHostClusterName)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DedicatedHostClusterIds(self): # String
return self.get_query_params().get('DedicatedHostClusterIds')
def set_DedicatedHostClusterIds(self, DedicatedHostClusterIds): # String
self.add_query_param('DedicatedHostClusterIds', DedicatedHostClusterIds)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_LockReason(self): # String
return self.get_query_params().get('LockReason')
def set_LockReason(self, LockReason): # String
self.add_query_param('LockReason', LockReason)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status) | null |
520 | import logging
import os
import pickle
from functools import partial
from tqdm import tqdm
import meerkat as mk
from .data_utils import (
compute_file_tuples,
compute_slice_matrix,
compute_stanford_file_tuples,
get_sz_labels,
stanford_eeg_loader,
)
logger = logging.getLogger(__name__)
def METHOD_NAME(
dataset_dir: str,
raw_dataset_dir: str,
splits=["train", "dev"],
clip_len: int = 60,
step_size: int = 1,
stride: int = 60,
):
"""Builds a `DataFrame` for accessing EEG data.
Currently only supports TUH dataset for seq-seq prediction.
Future TODO: integrating stanford dataset with weak seq-seq labels
Args:
dataset_dir (str): A local directory where the preprocessed
(h5) EEG data are stored
raw_dataset_dir (str): A local directory where the original
(edf) EEG data are stored
clip_len (int): Number of seconds in an EEG clip
step_size (int): Number of seconds in a single 'step'
stride (int): Number of seconds in the stride when extracting
clips from signals
"""
# retrieve paths of all edf files in the raw_dataset_dir
edf_files = []
for path, subdirs, files in os.walk(raw_dataset_dir):
for name in files:
if ".edf" in name:
edf_files.append(os.path.join(path, name))
data = []
for split in splits:
file_tuples = compute_file_tuples(
raw_dataset_dir, dataset_dir, split, clip_len, stride
)
for edf_fn, clip_idx, _ in tqdm(file_tuples, total=len(file_tuples)):
filepath = [file for file in edf_files if edf_fn in file]
filepath = filepath[0]
file_id = edf_fn.split(".edf")[0]
sequence_sz, binary_sz = get_sz_labels(
edf_fn=filepath,
clip_idx=int(clip_idx),
time_step_size=step_size,
clip_len=clip_len,
stride=stride,
)
row_df = {
"filepath": filepath,
"file_id": file_id,
"sequence_sz": sequence_sz,
"binary_sz": binary_sz,
"clip_idx": int(clip_idx),
"h5_fn": os.path.join(dataset_dir, edf_fn.split(".edf")[0] + ".h5"),
"split": split,
}
data.append(row_df)
df = mk.DataFrame(data)
eeg_loader = partial(
compute_slice_matrix, time_step_size=step_size, clip_len=clip_len, stride=stride
)
eeg_input_col = df[["clip_idx", "h5_fn"]].defer(fn=eeg_loader)
df.add_column(
"eeg_input",
eeg_input_col,
overwrite=True,
)
return df
def download_tusz(download_dir, version="1.5.2"):
"""Downloads the EEG Seizure TUH dataset (TUSZ)
REQUIRED:
1. Need to first registed at
https://www.isip.piconepress.com/projects/tuh_eeg/html/downloads.shtml
2. run download_tusz from python script or simply run the provided rsync
command below in your terminal
3. enter the provided password sent to your email after step (1)
Args:
download_dir (str): The directory path to save to.
version (str, optional): Which version to download
"""
src_pth = f"[email protected]:data/tuh_eeg_seizure/v{version}/"
rsync_command = f"rsync -auxvL {src_pth} {download_dir}"
print("Executing rsync command")
os.system(rsync_command)
def build_stanford_eeg_df(
stanford_dataset_dir: str,
lpch_dataset_dir: str,
file_marker_dir: str,
splits=["train", "dev"],
reports_pth=None,
clip_len: int = 60,
):
"""Builds a `DataFrame` for accessing EEG data.
This is for accessing private stanford data.
The stanford data is limited to specific researchers on IRB.
No public directions on how to download them yet.
Contact [email protected] for more information.
Args:
stanford_dataset_dir (str): A local dir where stanford EEG are stored
lpch_dataset_dir (str): A local dir where the lpch EEG are stored
file_marker_dir (str): A local dir where file markers are stored
splits (list[str]): List of splits to load
reports_pth (str): if not None, will load reports
clip_len (int): Number of seconds in an EEG clip
"""
# retrieve file tuples which is a list of
# (eeg filepath, location of sz or -1 if no sz, split)
file_tuples = compute_stanford_file_tuples(
stanford_dataset_dir, lpch_dataset_dir, file_marker_dir, splits
)
data = []
for filepath, sz_loc, split in file_tuples:
row_df = {
"filepath": filepath,
"file_id": filepath.split("/")[-1].split(".eeghdf")[0],
"binary_sz": sz_loc != -1,
"sz_start_index": sz_loc,
"split": split,
}
data.append(row_df)
df = mk.DataFrame(data)
eeg_input_col = df[["sz_start_index", "filepath", "split"]].defer(
fn=partial(stanford_eeg_loader, clip_len=clip_len)
)
df.add_column(
"eeg_input",
eeg_input_col,
overwrite=True,
)
if reports_pth:
raw_reports_pth = os.path.join(reports_pth, "reports_unique_for_hl_mm.txt")
raw_reports_df = mk.DataFrame.from_csv(raw_reports_pth, sep="\t")
parsed_reports_pth = os.path.join(reports_pth, "parsed_eeg_notes.dill")
with open(parsed_reports_pth, "rb") as dill_f:
parsed_reports = pickle.load(dill_f)
doc_data = []
for doc in parsed_reports:
uuid = doc.doc_id
mask_id = raw_reports_df["note_uuid"] == uuid
if mask_id.sum() == 1 and "findings" in doc.sections:
file_id = raw_reports_df[mask_id]["edf_file_name"][0].split(".edf")[0]
findings = doc.sections["findings"]["text"]
row_df = {"file_id": file_id, "findings": findings}
doc_data.append(row_df)
reports_df = mk.DataFrame(doc_data)
df = df.merge(reports_df, how="left", on="file_id")
return df | null |
521 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class DescribeForwardTableEntriesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'DescribeForwardTableEntries','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ForwardTableId(self): # String
return self.get_query_params().get('ForwardTableId')
def set_ForwardTableId(self, ForwardTableId): # String
self.add_query_param('ForwardTableId', ForwardTableId)
def get_InternalIp(self): # String
return self.get_query_params().get('InternalIp')
def set_InternalIp(self, InternalIp): # String
self.add_query_param('InternalIp', InternalIp)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def METHOD_NAME(self): # String
return self.get_query_params().get('ForwardEntryId')
def set_ForwardEntryId(self, ForwardEntryId): # String
self.add_query_param('ForwardEntryId', ForwardEntryId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_NatGatewayId(self): # String
return self.get_query_params().get('NatGatewayId')
def set_NatGatewayId(self, NatGatewayId): # String
self.add_query_param('NatGatewayId', NatGatewayId)
def get_ExternalIp(self): # String
return self.get_query_params().get('ExternalIp')
def set_ExternalIp(self, ExternalIp): # String
self.add_query_param('ExternalIp', ExternalIp)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_IpProtocol(self): # String
return self.get_query_params().get('IpProtocol')
def set_IpProtocol(self, IpProtocol): # String
self.add_query_param('IpProtocol', IpProtocol)
def get_ForwardEntryName(self): # String
return self.get_query_params().get('ForwardEntryName')
def set_ForwardEntryName(self, ForwardEntryName): # String
self.add_query_param('ForwardEntryName', ForwardEntryName)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InternalPort(self): # String
return self.get_query_params().get('InternalPort')
def set_InternalPort(self, InternalPort): # String
self.add_query_param('InternalPort', InternalPort)
def get_ExternalPort(self): # String
return self.get_query_params().get('ExternalPort')
def set_ExternalPort(self, ExternalPort): # String
self.add_query_param('ExternalPort', ExternalPort) | null |
522 | #!/usr/bin/env python
#
# Azure Linux extension
#
# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
import os
import datetime
import time
import string
import traceback
class Watcher:
"""
A class that handles periodic monitoring activities that are requested for LAD to perform.
The first such activity is to watch /etc/fstab and report (log to console) if there's anything
wrong with that. There might be other such monitoring activities that will be added later.
"""
def __init__(self, hutil_error, hutil_log, log_to_console=False):
"""
Constructor.
:param hutil_error: Error logging function (e.g., hutil.error). This is not a stream.
:param hutil_log: Normal logging function (e.g., hutil.log). This is not a stream.
:param log_to_console: Indicates whether to log any issues to /dev/console or not.
"""
# This is only for the /etc/fstab watcher feature.
self._fstab_last_mod_time = os.path.getmtime('/etc/fstab')
self._hutil_error = hutil_error
self._hutil_log = hutil_log
self._log_to_console = log_to_console
self._imds_logger = None
def METHOD_NAME(self, message):
"""
Write 'message' to console. Stolen from waagent LogToCon().
"""
if self._log_to_console:
try:
with open('/dev/console', 'w') as console:
message = filter(lambda x: x in string.printable, message)
console.write(message.encode('ascii', 'ignore') + '\n')
except IOError as e:
self._hutil_error('Error writing to console. Exception={0}'.format(e))
def handle_fstab(self, ignore_time=False):
"""
Watches if /etc/fstab is modified and verifies if it's OK. Otherwise, report it in logs or to /dev/console.
:param ignore_time: Disable the default logic of delaying /etc/fstab verification by 1 minute.
This is to allow any test code to avoid waiting 1 minute unnecessarily.
:return: None
"""
try_mount = False
if ignore_time:
try_mount = True
else:
current_mod_time = os.path.getmtime('/etc/fstab')
current_mod_date_time = datetime.datetime.fromtimestamp(current_mod_time)
# Only try to mount if it's been at least 1 minute since the
# change to fstab was done, to prevent spewing out erroneous spew
if (current_mod_time != self._fstab_last_mod_time and
datetime.datetime.now() > current_mod_date_time +
datetime.timedelta(minutes=1)):
try_mount = True
self._fstab_last_mod_time = current_mod_time
ret = 0
if try_mount:
ret = subprocess.call(['sudo', 'mount', '-a', '-vf'])
if ret != 0:
# There was an error running mount, so log
error_msg = 'fstab modification failed mount validation. Please correct before reboot.'
self._hutil_error(error_msg)
self.METHOD_NAME(error_msg)
else:
# No errors
self._hutil_log('fstab modification passed mount validation')
return ret
def set_imds_logger(self, imds_logger):
self._imds_logger = imds_logger
def watch(self):
"""
Main loop performing various monitoring activities periodically.
Currently iterates every 5 minutes, and other periodic activities might be
added in the loop later.
:return: None
"""
while True:
# /etc/fstab watcher
self.handle_fstab()
# IMDS probe (only sporadically, inside the function)
if self._imds_logger:
try:
self._imds_logger.log_imds_data_if_right_time()
except Exception as e:
self._hutil_error('ImdsLogger exception: {0}\nStacktrace: {1}'.format(e, traceback.format_exc()))
# Sleep 5 minutes
time.sleep(60 * 5)
pass | null |
523 | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapters for file formats."""
from __future__ import annotations
import abc
import enum
import os
from typing import Any, ClassVar, Dict, Iterable, List, Optional, Type, Union
from etils import epath
from tensorflow_datasets.core.utils import type_utils
from tensorflow_datasets.core.utils.lazy_imports_utils import array_record_module
from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
ExamplePositions = List[Any]
class FileFormat(enum.Enum):
"""Format of the record files.
The values of the enumeration are used as filename endings/suffix.
"""
TFRECORD = 'tfrecord'
RIEGELI = 'riegeli'
ARRAY_RECORD = 'array_record'
@property
def file_suffix(self) -> str:
return ADAPTER_FOR_FORMAT[self].FILE_SUFFIX
@classmethod
def from_value(cls, file_format: Union[str, 'FileFormat']) -> 'FileFormat':
try:
return cls(file_format)
except ValueError as e:
all_values = [f.value for f in cls]
raise ValueError(
f'{file_format} is not a valid FileFormat! '
f'Valid file formats: {all_values}'
) from e
DEFAULT_FILE_FORMAT = FileFormat.TFRECORD
class FileAdapter(abc.ABC):
"""Interface for Adapter objects which read and write examples in a format."""
FILE_SUFFIX: ClassVar[str]
BUFFER_SIZE = 8 << 20 # 8 MiB per file.
@classmethod
@abc.abstractmethod
def make_tf_data(
cls,
filename: epath.PathLike,
buffer_size: Optional[int] = None,
) -> tf.data.Dataset:
"""Returns TensorFlow Dataset comprising given record file."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def write_examples(
cls,
path: epath.PathLike,
iterator: Iterable[type_utils.KeySerializedExample],
) -> Optional[ExamplePositions]:
"""Write examples from given iterator in given path.
Args:
path: Path where to write the examples.
iterator: Iterable of examples.
Returns:
List of record positions for each record in the given iterator. In case of
TFRecords, does not return anything.
"""
raise NotImplementedError()
class TfRecordFileAdapter(FileAdapter):
"""File adapter for TFRecord file format."""
FILE_SUFFIX = 'tfrecord'
@classmethod
def make_tf_data(
cls,
filename: epath.PathLike,
buffer_size: Optional[int] = None,
) -> tf.data.Dataset:
"""Returns TensorFlow Dataset comprising given record file."""
buffer_size = buffer_size or cls.BUFFER_SIZE
return tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
@classmethod
def write_examples(
cls,
path: epath.PathLike,
iterator: Iterable[type_utils.KeySerializedExample],
) -> Optional[ExamplePositions]:
"""Write examples from given iterator in given path.
Args:
path: Path where to write the examples.
iterator: Iterable of examples.
Returns:
None
"""
with tf.io.TFRecordWriter(os.fspath(path)) as writer:
for _, serialized_example in iterator:
writer.write(serialized_example)
writer.flush()
class RiegeliFileAdapter(FileAdapter):
"""File adapter for Riegeli file format."""
FILE_SUFFIX = 'riegeli'
@classmethod
def make_tf_data(
cls,
filename: epath.PathLike,
buffer_size: Optional[int] = None,
) -> tf.data.Dataset:
buffer_size = buffer_size or cls.BUFFER_SIZE
from riegeli.tensorflow.ops import riegeli_dataset_ops as riegeli_tf # pylint: disable=g-import-not-at-top # pytype: disable=import-error
return riegeli_tf.RiegeliDataset(filename, buffer_size=buffer_size)
@classmethod
def write_examples(
cls,
path: epath.PathLike,
iterator: Iterable[type_utils.KeySerializedExample],
) -> Optional[ExamplePositions]:
"""Write examples from given iterator in given path.
Args:
path: Path where to write the examples.
iterator: Iterable of examples.
Returns:
List of record positions for each record in the given iterator.
"""
positions = []
import riegeli # pylint: disable=g-import-not-at-top
with tf.io.gfile.GFile(os.fspath(path), 'wb') as f:
with riegeli.RecordWriter(f, options='transpose') as writer:
for _, record in iterator:
writer.write_record(record)
positions.append(writer.last_pos)
return positions
class ArrayRecordFileAdapter(FileAdapter):
"""File adapter for ArrayRecord file format."""
FILE_SUFFIX = 'array_record'
@classmethod
def make_tf_data(
cls,
filename: epath.PathLike,
buffer_size: Optional[int] = None,
) -> tf.data.Dataset:
"""Returns TensorFlow Dataset comprising given array record file."""
raise NotImplementedError(
'`.as_dataset()` not implemented for ArrayRecord files. Please, use'
' `.as_data_source()`.'
)
@classmethod
def write_examples(
cls,
path: epath.PathLike,
iterator: Iterable[type_utils.KeySerializedExample],
) -> Optional[ExamplePositions]:
"""Write examples from given iterator in given path.
Args:
path: Path where to write the examples.
iterator: Iterable of examples.
Returns:
None
"""
writer = array_record_module.ArrayRecordWriter(
os.fspath(path), 'group_size:1'
)
for _, serialized_example in iterator:
writer.write(serialized_example)
writer.close()
def _to_bytes(key: type_utils.Key) -> bytes:
"""Convert the key to bytes."""
if isinstance(key, int):
return key.to_bytes(128, byteorder='big') # Use 128 as this match md5
elif isinstance(key, bytes):
return key
elif isinstance(key, str):
return key.encode('utf-8')
else:
raise TypeError(f'Invalid key type: {type(key)}')
# Create a mapping from FileFormat -> FileAdapter.
ADAPTER_FOR_FORMAT: Dict[FileFormat, Type[FileAdapter]] = {
FileFormat.RIEGELI: RiegeliFileAdapter,
FileFormat.TFRECORD: TfRecordFileAdapter,
FileFormat.ARRAY_RECORD: ArrayRecordFileAdapter,
}
_FILE_SUFFIX_TO_FORMAT = {
adapter.FILE_SUFFIX: file_format
for file_format, adapter in ADAPTER_FOR_FORMAT.items()
}
def file_format_from_suffix(file_suffix: str) -> FileFormat:
"""Returns the file format associated with the file extension (`tfrecord`)."""
if file_suffix not in _FILE_SUFFIX_TO_FORMAT:
raise ValueError(
'Unrecognized file extension: Should be one of '
f'{list(_FILE_SUFFIX_TO_FORMAT.values())}'
)
return _FILE_SUFFIX_TO_FORMAT[file_suffix]
def METHOD_NAME(filename: str) -> bool:
"""Whether the given filename is a record file."""
return any(
f'.{adapter.FILE_SUFFIX}' in filename
for adapter in ADAPTER_FOR_FORMAT.values()
) | null |
524 | ################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2009-2020 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from django.core.exceptions import PermissionDenied
from django.forms.utils import ValidationError
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from ..utils import entities_to_str
def validate_authenticated_user(user, message, code, **kwargs):
if user is None or not user.is_authenticated:
raise ValidationError(message.format(**kwargs), code=code)
# TODO: factorise
# VIEW ----------------------
def validate_viewable_entity(entity, user, code='viewnotallowed'):
validate_authenticated_user(
user,
gettext_lazy('Not authenticated user is not allowed to view entities'),
code,
)
try:
user.has_perm_to_view_or_die(entity)
except PermissionDenied as e:
raise ValidationError(str(e), code=code) from e
return entity
def validate_viewable_entities(entities, user, code='viewnotallowed'):
validate_authenticated_user(
user,
gettext_lazy('Not authenticated user is not allowed to view entities'),
code,
)
has_perm = user.has_perm_to_view
unviewable = entities_to_str((e for e in entities if not has_perm(e)), user)
if unviewable:
raise ValidationError(
_('Some entities are not viewable: {}').format(unviewable),
code=code,
)
return entities
# CHANGE ----------------------
def validate_editable_entity(entity, user, code='changenotallowed'):
validate_authenticated_user(
user,
gettext_lazy('Not authenticated user is not allowed to edit entities'),
code,
)
try:
user.has_perm_to_change_or_die(entity)
except PermissionDenied as e:
raise ValidationError(str(e), code=code) from e
return entity
def validate_editable_entities(entities, user, code='changenotallowed'):
validate_authenticated_user(
user,
gettext_lazy('Not authenticated user is not allowed to edit entities'),
code,
)
has_perm = user.has_perm_to_change
uneditable = entities_to_str((e for e in entities if not has_perm(e)), user)
if uneditable:
raise ValidationError(
_('Some entities are not editable: {}').format(uneditable),
code=code,
)
return entities
# LINK ----------------------
def validate_linkable_entity(entity, user, code='linknotallowed'):
validate_authenticated_user(
user,
gettext_lazy('Not authenticated user is not allowed to link entities'),
code,
)
try:
user.has_perm_to_link_or_die(entity)
except PermissionDenied as e:
raise ValidationError(str(e), code=code) from e
return entity
def validate_linkable_entities(entities, user, code='linknotallowed'):
validate_authenticated_user(
user,
gettext_lazy('Not authenticated user is not allowed to link entities'),
code,
)
has_perm = user.has_perm_to_link
unlinkable = entities_to_str((e for e in entities if not has_perm(e)), user)
if unlinkable:
raise ValidationError(
_('Some entities are not linkable: {}').format(unlinkable),
code=code,
)
return entities
def METHOD_NAME(model, user, owner, code='linknotallowed'):
validate_authenticated_user(
user, gettext_lazy('Not authenticated user is not allowed to link «{model}»'),
code=code,
model=model._meta.verbose_name_plural,
)
if not user.has_perm_to_link(model, owner=owner):
raise ValidationError(
_('You are not allowed to link with the «{models}» of this user.').format(
models=model._meta.verbose_name_plural,
),
code=code,
)
return owner | null |
525 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateDhcpOptionsSetRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateDhcpOptionsSet','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_LeaseTime(self): # String
return self.get_query_params().get('LeaseTime')
def set_LeaseTime(self, LeaseTime): # String
self.add_query_param('LeaseTime', LeaseTime)
def get_DomainNameServers(self): # String
return self.get_query_params().get('DomainNameServers')
def set_DomainNameServers(self, DomainNameServers): # String
self.add_query_param('DomainNameServers', DomainNameServers)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_DhcpOptionsSetDescription(self): # String
return self.get_query_params().get('DhcpOptionsSetDescription')
def set_DhcpOptionsSetDescription(self, DhcpOptionsSetDescription): # String
self.add_query_param('DhcpOptionsSetDescription', DhcpOptionsSetDescription)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DhcpOptionsSetName(self): # String
return self.get_query_params().get('DhcpOptionsSetName')
def set_DhcpOptionsSetName(self, DhcpOptionsSetName): # String
self.add_query_param('DhcpOptionsSetName', DhcpOptionsSetName)
def get_Ipv6LeaseTime(self): # String
return self.get_query_params().get('Ipv6LeaseTime')
def METHOD_NAME(self, Ipv6LeaseTime): # String
self.add_query_param('Ipv6LeaseTime', Ipv6LeaseTime) | null |
526 | # **************************************************************************
# *
# * Authors: David Herreros Calero ([email protected])
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address '[email protected]'
# *
# **************************************************************************
import os
import pyworkflow.viewer as pwviewer
import pwem.viewers.views as vi
import pyworkflow.protocol.params as params
from pwem.viewers import ChimeraView
from pyworkflow.object import Set
from xmipp3.protocols.protocol_apply_zernike3d import XmippApplyZernike3D
class XmippPDBDeformViewer(pwviewer.ProtocolViewer):
""" Visualize the deformation applied to the PDB file """
_label = 'viewer pdb deform sph'
_targets = [XmippApplyZernike3D]
_environments = [pwviewer.DESKTOP_TKINTER, pwviewer.WEB_DJANGO]
OPEN_FILE = "open %s\n"
def _defineParams(self, form):
self.deformed = self.protocol.deformed.get()
self.have_set = isinstance(self.deformed, Set)
form.addSection(label='Show deformation')
form.addParam('pdbIdChoice', params.EnumParam,
condition='self.have_set==True',
choices=list(self.deformed.getIdSet()),
default=0,
label='Structure to display', display=params.EnumParam.DISPLAY_COMBO,
help='Select which structure to display from the IDs of the set')
form.addParam('doShowPDB', params.LabelParam,
label="Display original and deformed PDB or volume")
form.addParam('doShowMorph', params.LabelParam,
label="Display a morphing between the original and deformed PDB or volume")
def _getVisualizeDict(self):
if self.have_set==True:
self.chosen = self.deformed[list(self.deformed.getIdSet())[self.pdbIdChoice]]
else:
self.chosen = self.deformed
return {'doShowPDB': self.METHOD_NAME,
'doShowMorph': self._doShowMorph}
def METHOD_NAME(self, obj, **kwargs):
if self.protocol.applyPDB.get() == True:
scriptFile = self.protocol._getPath('pdb_deform_chimera.cxc')
fhCmd = open(scriptFile, 'w')
inputFile = os.path.abspath(self.protocol.inputPDB.get().getFileName())
outputFile = os.path.abspath(self.chosen.getFileName())
fhCmd.write(self.OPEN_FILE % inputFile)
fhCmd.write(self.OPEN_FILE % outputFile)
# fhCmd.write("start Model Panel\n")
fhCmd.write("show cartoons\n")
fhCmd.write("cartoon style width 1.5 thick 1.5\n")
fhCmd.write("style stick\n")
fhCmd.write("color bymodel\n")
fhCmd.close()
view = ChimeraView(scriptFile)
return [view]
else:
raise ValueError("This viewer is only for atomic structures")
def _doShowMorph(self, obj, **kwargs):
if self.protocol.applyPDB.get() == True:
scriptFile = self.protocol._getPath('pdb_deform_chimera.cxc')
fhCmd = open(scriptFile, 'w')
inputFile = os.path.abspath(self.protocol.inputPDB.get().getFileName())
outputFile = os.path.abspath(self.protocol.chosen.getFileName())
fhCmd.write(self.OPEN_FILE % inputFile)
fhCmd.write(self.OPEN_FILE % outputFile)
fhCmd.write("hide models\n")
fhCmd.write("morph #1,2 frames 50 play false\n")
fhCmd.write("coordset #3 1,\n")
fhCmd.write("wait 50\n")
fhCmd.write("coordset #3 50,1\n")
fhCmd.close()
view = ChimeraView(scriptFile)
return [view]
else:
raise ValueError("This viewer is only for atomic structures")
| null |
527 | # Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import os
import re
from guild import click_util
from guild import cmd_impl_support
from guild import remote as remotelib
from guild import util
from guild import var
from guild.commands import runs_impl
log = logging.getLogger("guild")
class MetaSyncRemote(remotelib.Remote):
def __init__(self, runs_dir, deleted_runs_dir=None):
self._runs_dir = runs_dir
self._deleted_runs_dir = deleted_runs_dir
def list_runs(self, **opts):
deleted = opts.pop("deleted", False)
if deleted and not self._deleted_runs_dir:
raise remotelib.OperationNotSupported(
f"remote '{self.name}' does not support '--deleted' option"
)
self._sync_runs_meta()
runs_dir = self._deleted_runs_dir if deleted else self._runs_dir
if not os.path.exists(runs_dir):
return
assert not opts.get("archive"), opts
assert not opts.get("remote"), opts
args = click_util.Args(
deleted=False, archive=runs_dir, remote=None, json=False, **opts
)
try:
runs_impl.list_runs(args)
except SystemExit as e:
self._system_exit_for_remote(e, ["runs list", "runs"])
def _sync_runs_meta(self, force=False):
raise NotImplementedError()
def filtered_runs(self, **filters):
self._sync_runs_meta()
args = click_util.Args(archive=self._runs_dir, remote=None, runs=[], **filters)
return runs_impl.runs_for_args(args)
def delete_runs(self, **opts):
if not self._deleted_runs_dir and not opts.get("permanent"):
raise remotelib.OperationNotSupported(
f"remote '{self.name}' does not support non permanent deletes\n"
"Use the '--permanent' with this command and try again."
)
args = click_util.Args(archive=self._runs_dir, remote=None, **opts)
self._sync_runs_meta()
if args.permanent:
preview = cmd_impl_support.format_warn(
"WARNING: You are about to permanently delete "
f"the following runs on {self.name}:"
)
confirm = "Permanently delete these runs?"
else:
preview = f"You are about to delete the following runs on {self.name}:"
confirm = "Delete these runs?"
no_runs_help = "Nothing to delete."
def delete_f(selected):
self._delete_runs(selected, args.permanent)
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
preview,
confirm,
no_runs_help,
delete_f,
confirm_default=not args.permanent,
)
except SystemExit as e:
self._system_exit_for_remote(e, ["runs rm", "runs delete"])
def _delete_runs(self, runs, permanent):
raise NotImplementedError()
def _system_exit_for_remote(self, e, cmds):
from guild import main
assert isinstance(e, SystemExit), e
msg, code = main.system_exit_params(e)
if not msg:
raise SystemExit(code)
for cmd in cmds:
maybe_changed = msg.replace(
f"guild {self.name}", f"guild {self.name} -r {cmd}"
)
if maybe_changed != msg:
msg = maybe_changed
break
raise SystemExit(msg, code)
def restore_runs(self, **opts):
if not self._deleted_runs_dir:
raise remotelib.OperationNotSupported()
self._sync_runs_meta()
args = click_util.Args(archive=self._deleted_runs_dir, remote=None, **opts)
preview = f"You are about to restore the following runs on {self.name}:"
confirm = "Restore these runs?"
no_runs_help = "Nothing to restore."
def restore_f(selected):
self._restore_runs(selected)
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
preview,
confirm,
no_runs_help,
restore_f,
confirm_default=True,
)
except SystemExit as e:
self._system_exit_for_remote(e, ["runs restore"])
def _restore_runs(self, runs):
raise NotImplementedError()
def purge_runs(self, **opts):
if not self._deleted_runs_dir:
raise remotelib.OperationNotSupported()
self._sync_runs_meta()
args = click_util.Args(archive=self._deleted_runs_dir, remote=None, **opts)
preview = (
"WARNING: You are about to permanently delete "
f"the following runs on {self.name}:"
)
confirm = "Permanently delete these runs?"
no_runs_help = "Nothing to purge."
def purge_f(selected):
self._purge_runs(selected)
self._sync_runs_meta(force=True)
try:
runs_impl.runs_op(
args,
None,
preview,
confirm,
no_runs_help,
purge_f,
confirm_default=False,
)
except SystemExit as e:
self._system_exit_for_remote(e, ["runs purge"])
def _purge_runs(self, runs):
raise NotImplementedError()
def run_info(self, **opts):
self._sync_runs_meta()
args = click_util.Args(**opts)
args.archive = self._runs_dir
args.remote = None
args.private_attrs = False
runs_impl.run_info(args, None)
def local_meta_dir(remote_name, key):
base_dir = var.remote_dir(_safe_filename(remote_name))
key_hash = hashlib.md5(key.encode()).hexdigest()
return os.path.join(base_dir, "meta", key_hash)
def _safe_filename(s):
if not s:
return s
return re.sub(r"\W+", "-", s).strip("-") or "-"
def local_meta_id(local_sync_dir):
id_path = os.path.join(local_sync_dir, "meta-id")
return util.try_read(id_path, apply=str.strip)
def clear_local_meta_id(local_sync_dir):
id_path = os.path.join(local_sync_dir, "meta-id")
util.ensure_deleted(id_path)
def write_local_meta_id(meta_id, local_sync_dir):
assert meta_id is not None, "meta_id cannot be None"
id_path = os.path.join(local_sync_dir, "meta-id")
with open(id_path, "w") as f:
f.write(meta_id)
def meta_current(local_sync_dir, remote_meta_id_cb):
local_id = local_meta_id(local_sync_dir)
if local_id is None:
log.debug("local meta-id not found, meta not current")
return False
remote_id = remote_meta_id_cb()
log.debug("local meta-id: %s", local_id)
log.debug("remote meta-id: %s", remote_id)
return local_id == remote_id
def METHOD_NAME(name):
name = name.replace("\\", "/")
return (
name.endswith(".guild/opref") or #
"/.guild/attrs/" in name #
or "/.guild/LOCK" in name
) | null |
528 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoceanbasepro.endpoint import endpoint_data
class DescribeOasAnomalySQLListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OceanBasePro', '2019-09-01', 'DescribeOasAnomalySQLList','oceanbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self): # String
return self.get_body_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_body_params('StartTime', StartTime)
def get_SearchRule(self): # String
return self.get_body_params().get('SearchRule')
def set_SearchRule(self, SearchRule): # String
self.add_body_params('SearchRule', SearchRule)
def get_MergeDynamicSql(self): # Boolean
return self.get_body_params().get('MergeDynamicSql')
def set_MergeDynamicSql(self, MergeDynamicSql): # Boolean
self.add_body_params('MergeDynamicSql', MergeDynamicSql)
def get_Current(self): # Long
return self.get_body_params().get('Current')
def set_Current(self, Current): # Long
self.add_body_params('Current', Current)
def get_DynamicSql(self): # Boolean
return self.get_body_params().get('DynamicSql')
def set_DynamicSql(self, DynamicSql): # Boolean
self.add_body_params('DynamicSql', DynamicSql)
def get_SqlTextLength(self): # Long
return self.get_body_params().get('SqlTextLength')
def set_SqlTextLength(self, SqlTextLength): # Long
self.add_body_params('SqlTextLength', SqlTextLength)
def get_TenantId(self): # String
return self.get_body_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_body_params('TenantId', TenantId)
def get_PageSize(self): # Long
return self.get_body_params().get('PageSize')
def set_PageSize(self, PageSize): # Long
self.add_body_params('PageSize', PageSize)
def get_SearchValue(self): # String
return self.get_body_params().get('SearchValue')
def set_SearchValue(self, SearchValue): # String
self.add_body_params('SearchValue', SearchValue)
def get_SqlId(self): # String
return self.get_body_params().get('SqlId')
def set_SqlId(self, SqlId): # String
self.add_body_params('SqlId', SqlId)
def get_FilterCondition(self): # String
return self.get_body_params().get('FilterCondition')
def set_FilterCondition(self, FilterCondition): # String
self.add_body_params('FilterCondition', FilterCondition)
def get_SearchParam(self): # String
return self.get_body_params().get('SearchParam')
def set_SearchParam(self, SearchParam): # String
self.add_body_params('SearchParam', SearchParam)
def get_EndTime(self): # String
return self.get_body_params().get('EndTime')
def set_EndTime(self, EndTime): # String
self.add_body_params('EndTime', EndTime)
def get_NodeIp(self): # String
return self.get_body_params().get('NodeIp')
def set_NodeIp(self, NodeIp): # String
self.add_body_params('NodeIp', NodeIp)
def get_InstanceId(self): # String
return self.get_body_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_body_params('InstanceId', InstanceId)
def get_DbName(self): # String
return self.get_body_params().get('DbName')
def set_DbName(self, DbName): # String
self.add_body_params('DbName', DbName)
def get_SearchKeyWord(self): # String
return self.get_body_params().get('SearchKeyWord')
def set_SearchKeyWord(self, SearchKeyWord): # String
self.add_body_params('SearchKeyWord', SearchKeyWord)
def get_AcceptLanguage(self): # String
return self.get_body_params().get('AcceptLanguage')
def METHOD_NAME(self, AcceptLanguage): # String
self.add_body_params('AcceptLanguage', AcceptLanguage) | null |
529 | #!/usr/bin/python3 -u
# Copyright 2022 Memgraph Ltd.
#
# Use of this software is governed by the Business Source License
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
# License, and you may not use this file except in compliance with the Business Source License.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0, included in the file
# licenses/APL.txt.
import argparse
import atexit
import fcntl
import os
import subprocess
import sys
import tempfile
import time
from typing import List
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", ".."))
def wait_for_server(port: int, delay: float = 0.1) -> float:
cmd = ["nc", "-z", "-w", "1", "127.0.0.1", str(port)]
while subprocess.call(cmd) != 0:
time.sleep(0.01)
time.sleep(delay)
def execute_tester(
binary,
queries,
should_fail=False,
failure_message="",
username="",
password="",
check_failure=True,
connection_should_fail=False,
):
args = [binary, "--username", username, "--password", password]
if should_fail:
args.append("--should-fail")
if failure_message:
args.extend(["--failure-message", failure_message])
if check_failure:
args.append("--check-failure")
if connection_should_fail:
args.append("--connection-should-fail")
args.extend(queries)
subprocess.run(args).check_returncode()
def METHOD_NAME(binary: str, queries: List[str], username: str = "", password: str = "") -> None:
args = [binary, "--username", username, "--password", password]
args.extend(queries)
subprocess.run(args).check_returncode()
def make_non_blocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def start_memgraph(memgraph_args: List[any]) -> subprocess:
memgraph = subprocess.Popen(
list(map(str, memgraph_args)), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
time.sleep(0.1)
assert memgraph.poll() is None, "Memgraph process died prematurely!"
wait_for_server(7687)
# Make the stdout and stderr pipes non-blocking
make_non_blocking(memgraph.stdout.fileno())
make_non_blocking(memgraph.stderr.fileno())
return memgraph
def check_flag(tester_binary: str, flag: str, value: str) -> None:
args = [tester_binary, "--field", flag, "--value", value]
subprocess.run(args).check_returncode()
def cleanup(memgraph: subprocess):
if memgraph.poll() is None:
memgraph.terminate()
assert memgraph.wait() == 0, "Memgraph process didn't exit cleanly!"
def run_test(tester_binary: str, memgraph_args: List[str], server_name: str, query_tx: str):
memgraph = start_memgraph(memgraph_args)
atexit.register(cleanup, memgraph)
check_flag(tester_binary, "server.name", server_name)
check_flag(tester_binary, "query.timeout", query_tx)
cleanup(memgraph)
atexit.unregister(cleanup)
def run_test_w_query(tester_binary: str, memgraph_args: List[str], executor_binary: str):
memgraph = start_memgraph(memgraph_args)
atexit.register(cleanup, memgraph)
METHOD_NAME(executor_binary, ["SET DATABASE SETTING 'server.name' TO 'New Name';"])
METHOD_NAME(executor_binary, ["SET DATABASE SETTING 'query.timeout' TO '123';"])
check_flag(tester_binary, "server.name", "New Name")
check_flag(tester_binary, "query.timeout", "123")
cleanup(memgraph)
atexit.unregister(cleanup)
def consume(stream):
res = []
while True:
line = stream.readline()
if not line:
break
res.append(line.strip())
return res
def run_log_test(tester_binary: str, memgraph_args: List[str], executor_binary: str):
# Test if command line parameters work
memgraph = start_memgraph(memgraph_args + ["--log-level", "TRACE", "--also-log-to-stderr"])
atexit.register(cleanup, memgraph)
std_err = consume(memgraph.stderr)
assert len(std_err) > 5, "Failed to log to stderr"
# Test if run-time setting log.to_stderr works
METHOD_NAME(executor_binary, ["SET DATABASE SETTING 'log.to_stderr' TO 'false';"])
consume(memgraph.stderr)
METHOD_NAME(executor_binary, ["SET DATABASE SETTING 'query.timeout' TO '123';"])
std_err = consume(memgraph.stderr)
assert len(std_err) == 0, "Still writing to stderr even after disabling it"
# Test if run-time setting log.level works
METHOD_NAME(executor_binary, ["SET DATABASE SETTING 'log.to_stderr' TO 'true';"])
METHOD_NAME(executor_binary, ["SET DATABASE SETTING 'log.level' TO 'CRITICAL';"])
consume(memgraph.stderr)
METHOD_NAME(executor_binary, ["SET DATABASE SETTING 'query.timeout' TO '123';"])
std_err = consume(memgraph.stderr)
assert len(std_err) == 0, "Log level not updated"
# Tets that unsupported values cause an exception
execute_tester(
tester_binary,
["SET DATABASE SETTING 'log.to_stderr' TO 'something'"],
should_fail=True,
failure_message="'something' not valid for 'log.to_stderr'",
)
execute_tester(
tester_binary,
["SET DATABASE SETTING 'log.level' TO 'something'"],
should_fail=True,
failure_message="'something' not valid for 'log.level'",
)
cleanup(memgraph)
atexit.unregister(cleanup)
def execute_test(memgraph_binary: str, tester_binary: str, flag_tester_binary: str, executor_binary: str) -> None:
storage_directory = tempfile.TemporaryDirectory()
memgraph_args = [memgraph_binary, "--data-directory", storage_directory.name]
print("\033[1;36m~~ Starting run-time settings check test ~~\033[0m")
print("\033[1;34m~~ server.name and query.timeout ~~\033[0m")
# Check default flags
run_test(flag_tester_binary, memgraph_args, "Neo4j/v5.11.0 compatible graph database server - Memgraph", "600")
# Check changing flags via command-line arguments
run_test(
flag_tester_binary,
memgraph_args + ["--bolt-server-name-for-init", "Memgraph", "--query-execution-timeout-sec", "1000"],
"Memgraph",
"1000",
)
# Check changing flags via query
run_test_w_query(flag_tester_binary, memgraph_args, executor_binary)
print("\033[1;34m~~ log.level and log.to_stderr ~~\033[0m")
# Check log settings
run_log_test(tester_binary, memgraph_args, executor_binary)
print("\033[1;36m~~ Finished run-time settings check test ~~\033[0m")
if __name__ == "__main__":
memgraph_binary = os.path.join(PROJECT_DIR, "build", "memgraph")
tester_binary = os.path.join(PROJECT_DIR, "build", "tests", "integration", "run_time_settings", "tester")
flag_tester_binary = os.path.join(PROJECT_DIR, "build", "tests", "integration", "run_time_settings", "flag_tester")
executor_binary = os.path.join(PROJECT_DIR, "build", "tests", "integration", "run_time_settings", "executor")
parser = argparse.ArgumentParser()
parser.add_argument("--memgraph", default=memgraph_binary)
parser.add_argument("--tester", default=tester_binary)
parser.add_argument("--flag_tester", default=flag_tester_binary)
parser.add_argument("--executor", default=executor_binary)
args = parser.parse_args()
execute_test(args.memgraph, args.tester, args.flag_tester, args.executor)
sys.exit(0) | null |
530 | import json
import os
import shutil
from datetime import datetime
import openai
import meerkat as mk
CHATBOT = "chatbot"
USER = "user"
class ConversationHistory:
"""Stores the full conversation history, and keeps track of the agent's
memory to use for prompting."""
def __init__(
self,
greeting: str = "Hi! Welcome to Meerkat!",
chatbot_name: str = "Meerkat",
savepath: str = "history.jsonl",
):
# Create an data frame with a single message from the chatbot.
df = mk.DataFrame(
{
"message": [greeting],
"sender": [CHATBOT],
"name": [chatbot_name],
"time": [self.timestamp(datetime.now())],
},
)
self.df = df
# Store the history in a jsonl file.
self.savepath = savepath
if os.path.exists(savepath):
self.df = mk.DataFrame.from_json(savepath, lines=True)
else:
self.write_last_message()
@staticmethod
def timestamp(time: datetime):
# Formats as 04:20 PM / Jan 01, 2020
return time.strftime("%I:%M %p / %b %d, %Y")
def METHOD_NAME(self, message: str, sender: str, name: str, send_time: datetime):
df = mk.DataFrame(
{
"message": [message],
"sender": [sender],
"name": [name],
"time": [self.timestamp(send_time)],
},
)
# THIS IS ESSENTIAL!
# Running a df.set will automatically trigger a re-render on the
# frontend, AS LONG AS this method is called inside an `mk.endpoint`!
# Otherwise, this will behave like a normal Python method.
self.df.set(self.df.append(df))
self.write_last_message()
def write_last_message(self):
# Write the last message.
with open(self.savepath, "a") as f:
f.write(json.dumps(self.df[-1]) + "\n")
def get_chatbot_response(
history: ConversationHistory,
lookback: int,
instructions: str = "You are a helpful assistant.",
) -> str:
"""Run the OpenAI chat completion, using a subset of the chat history."""
assert lookback > 0, "Lookback must be greater than 0."
# Lookback, and rename columns to align with OpenAI's API.
messages = history.df[-lookback:].rename({"sender": "role", "message": "content"})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": instructions}]
+ [
message if message["role"] == USER else {**message, **{"role": "assistant"}}
for message in messages["role", "content"]
],
)
return response["choices"][0]["message"]["content"]
@mk.endpoint()
def run_chatbot(
history: ConversationHistory,
message: str,
lookback: int = 10,
instructions: str = "You are a helpful assistant.",
chatbot_name: str = "Meerkat",
user_name: str = "Me",
):
"""Update the conversation history and get a response from the chatbot."""
history.METHOD_NAME(message, USER, user_name, datetime.now())
response = get_chatbot_response(history, lookback, instructions)
history.METHOD_NAME(response, CHATBOT, chatbot_name, datetime.now())
# A pure Python component in Meerkat just needs to subclass `div`!
class ChatGPT(mk.gui.html.div):
"""A basic chatbot component."""
# Expose the chat Component that is used internally
chat: mk.gui.Chat = None
# Expose the chat history
history: ConversationHistory = None
# Expose the `on_send` endpoint of the `Chat` component.
on_send: mk.gui.Endpoint = None
def __init__(
self,
lookback: int = 10,
instructions: str = "You are a helpful assistant.",
chatbot_name: str = "🤖 Meerkat",
user_name: str = "Me",
img_chatbot: str = "http://meerkat.wiki/favicon.png",
img_user: str = "http://placekitten.com/200/200",
savepath: str = "history.jsonl",
classes: str = "h-full flex flex-col pb-8",
):
# Keep track of the conversation history.
# Also contains a memory that is used for prompting.
history = ConversationHistory(chatbot_name=chatbot_name, savepath=savepath)
# This endpoint takes in one remaining argument, `message`, which
# is the message sent by the user. It then updates the conversation
# history, and gets a response from the chatbot.
on_send: mk.gui.Endpoint = run_chatbot.partial(
history=history,
lookback=lookback,
instructions=instructions,
chatbot_name=chatbot_name,
user_name=user_name,
)
# Create the chat component
chat = mk.gui.Chat(
df=history.df,
img_chatbot=img_chatbot,
img_user=img_user,
on_send=on_send,
)
# Make a little header on top of the chat component.
header = mk.gui.Caption(
"ChatGPT, built using 🔮 [Meerkat](http://meerkat.wiki).",
classes="self-center max-w-full bg-gray-50 dark:bg-slate-300 mx-8 px-2"
" w-fit text-center rounded-t-lg",
)
# Call the constructor for `div`, which we wrap around
# any components made here.
super().__init__([header, chat], classes=classes)
# Store stuff
self.chat = chat
self.history = history
self.on_send = on_send
def test_chatgpt():
"""Test the chatgpt component by simulating a conversation and checking
that the memory is updated."""
# Create chatgpt
chatgpt = ChatGPT(savepath="temp.history.jsonl")
# Simulate a conversation and check the history.
chatgpt.on_send.run(message="Who are you?")
chatgpt.on_send.run(message="How are you?")
chatgpt.on_send.run(message="What is your name?")
# Remove the temp file.
shutil.rmtree("temp.history.jsonl", ignore_errors=True) | null |
531 | def init_input_node(pet_nii):
from clinica.utils.filemanip import get_subject_id, load_volume
from clinica.utils.stream import cprint
from clinica.utils.ux import print_begin_image
image_id = get_subject_id(pet_nii)
try:
load_volume(pet_nii)
except ValueError as e:
error_msg = f"Clinica could not load volumes for {image_id.replace('_', ' | ')}. {str(e)}"
cprint(error_msg, lvl="error")
raise ValueError(error_msg)
print_begin_image(image_id)
return pet_nii
def _check_non_empty_tissue_list(tissues: list) -> None:
"""Check that provided list is non-empty."""
if len(tissues) == 0:
raise RuntimeError(
"The length of the list of tissues must be greater than zero."
)
def _load_tissues(tissues: list):
"""Aggregates the image data contained in the tissue images provided.
Parameters
----------
tissues : list
List of tissue images to aggregate.
Returns
-------
data : np.ndarray
Aggregated data.
affine : np.ndarray
Affine of the first image, acting as the affine
of the aggregated image under the assumption that
all images have the same affine.
header : Nifti1Header
Header of the aggregated image.
"""
import nibabel as nib
import numpy as np
from clinica.pipelines.pet_volume.pet_volume_utils import ( # noqa
_check_non_empty_tissue_list,
)
_check_non_empty_tissue_list(tissues)
img_0 = nib.load(tissues[0])
shape = list(img_0.get_fdata(dtype="float32").shape)
data = np.zeros(shape=shape)
for image in tissues:
data += nib.load(image).get_fdata(dtype="float32")
return data, img_0.affine, img_0.header
def create_binary_mask(
tissues: list,
threshold: float = 0.3,
) -> str:
"""Create a binary mask Nifti1Image from the list of tissues.
Tissue images are summed and the result is thresholded with the
provided `threshold` input.
Parameters
----------
tissues : list
List of paths to tissue Nifti1Images. Must be non-empty.
threshold : float, optional
Threshold to apply when binarizing the Nifti1Image.
Default=0.3.
Returns
-------
out_mask : str
Path to the binary mask Nifti1Image as a string.
"""
from os import getcwd
from os.path import basename, join
import nibabel as nib
from clinica.pipelines.pet_volume.pet_volume_utils import _load_tissues # noqa
data, affine, header = _load_tissues(tissues)
data = (data > threshold) * 1.0
out_mask = join(getcwd(), basename(tissues[0]) + "_brainmask.nii")
mask = nib.Nifti1Image(data, affine, header=header)
nib.save(mask, out_mask)
return out_mask
def apply_binary_mask(image: str, binary_mask: str) -> str:
"""Apply the provided `binary_mask` to the provided `image`.
Parameters
----------
image : str
Path to the Nifti1Image to apply the mask on.
binary_mask : str
Path to the Nifti1Image containing the mask.
Returns
-------
masked_image_path : str
Path to the masked Nifti1Image.
"""
from os import getcwd
from os.path import basename, join
import nibabel as nib
original_image = nib.load(image)
mask = nib.load(binary_mask)
data = original_image.get_fdata(dtype="float32") * mask.get_fdata(dtype="float32")
masked_image_path = join(getcwd(), "masked_" + basename(image))
masked_image = nib.Nifti1Image(
data, original_image.affine, header=original_image.header
)
nib.save(masked_image, masked_image_path)
return masked_image_path
def METHOD_NAME(tissues: list) -> str:
"""Create a pvc mask from tissue list.
Parameters
----------
tissues : list
List of paths to tissue Nifti1Images. Must be non-empty.
Returns
-------
out_mask : str
Path to the resulting mask Nifti1Image.
"""
from os import getcwd
from os.path import join
import nibabel as nib
import numpy as np
from clinica.pipelines.pet_volume.pet_volume_utils import _load_tissues # noqa
background, affine, header = _load_tissues(tissues)
shape = background.shape
shape += tuple([len(tissues) + 1])
data = np.empty(shape=shape, dtype=np.float64)
for i, tissue in enumerate(tissues):
image = nib.load(tissue)
data[..., i] = np.array(image.get_fdata(dtype="float32"))
background = 1.0 - background
data[..., len(tissues)] = np.array(background)
out_mask = join(getcwd(), "pvc_mask.nii")
mask = nib.Nifti1Image(data, affine, header=header)
nib.save(mask, out_mask)
return out_mask
def pet_pvc_name(pet_image: str, pvc_method: str) -> str:
"""Build the name for the PET PVC interface.
Parameters
----------
pet_image : str
Path to the PET scan file.
pvc_method : str
Name of the PVC method. This will be concatenated
with the `pet_image` filename.
Returns
-------
pet_pvc_path : str
Name for the PET PVC interface.
Examples
--------
>>> pet_pvc_name(
... "/home/bids/sub-01/ses-M00/pet/sub-01_ses-M00_task-rest_trc-av45_pet.nii.gz",
... "RBV"
...)
'pvc-rbv_sub-01_ses-M00_task-rest_trc-av45_pet.nii.gz'
"""
from os.path import basename
return "pvc-" + pvc_method.lower() + "_" + basename(pet_image)
def normalize_to_reference(pet_image: str, region_mask: str) -> str:
"""Normalize the provided `pet_image` by dividing by the mean
value of the region defined by the provided `region_mask`.
Parameters
----------
pet_image : str
Path to the Nifti1Image which should be normalized.
region_mask : str
Path to the mask to be used to define the region.
Returns
-------
suvr_pet_path : str
Path to the normalized Nifti1Image.
"""
from os import getcwd
from os.path import basename, join
import nibabel as nib
import numpy as np
pet = nib.load(pet_image)
ref = nib.load(region_mask)
region = pet.get_fdata(dtype="float32") * ref.get_fdata(dtype="float32")
region_mean = np.nanmean(np.where(region != 0, region, np.nan))
data = pet.get_fdata(dtype="float32") / region_mean
suvr_pet_path = join(getcwd(), "suvr_" + basename(pet_image))
suvr_pet = nib.Nifti1Image(data, pet.affine, header=pet.header)
nib.save(suvr_pet, suvr_pet_path)
return suvr_pet_path
def atlas_statistics(in_image: str, in_atlas_list: list) -> list:
"""Generate regional measure from atlas_list in TSV files.
For each atlas name provided it calculates for the input image the mean
for each region in the atlas and saves it to a TSV file.
Parameters
----------
in_image : str
Path to the Nifti image.
in_atlas_list : List
List of names of atlas to be applied.
Returns
-------
atlas_statistics : List
List of paths to TSV files.
"""
from os import getcwd
from os.path import abspath, join
from nipype.utils.filemanip import split_filename
from clinica.utils.atlas import AtlasAbstract
from clinica.utils.statistics import statistics_on_atlas
orig_dir, base, ext = split_filename(str(in_image))
atlas_classes = AtlasAbstract.__subclasses__()
atlas_statistics_list = []
for atlas in in_atlas_list:
for atlas_class in atlas_classes:
if atlas_class.get_name_atlas() == atlas:
out_atlas_statistics = abspath(
join(getcwd(), base + "_space-" + atlas + "_statistics.tsv")
)
statistics_on_atlas(str(in_image), atlas_class(), out_atlas_statistics)
atlas_statistics_list.append(out_atlas_statistics)
break
return atlas_statistics_list
def get_from_list(in_list, index):
return in_list[index] | null |
532 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class QueryIncomeTrendRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'QueryIncomeTrend')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AdSlotType(self): # String
return self.get_query_params().get('AdSlotType')
def set_AdSlotType(self, AdSlotType): # String
self.add_query_param('AdSlotType', AdSlotType)
def get_StartTime(self): # Long
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # Long
self.add_query_param('StartTime', StartTime)
def get_Slot(self): # Long
return self.get_query_params().get('Slot')
def set_Slot(self, Slot): # Long
self.add_query_param('Slot', Slot)
def get_UserId(self): # String
return self.get_query_params().get('UserId')
def set_UserId(self, UserId): # String
self.add_query_param('UserId', UserId)
def get_OriginSiteUserId(self): # String
return self.get_query_params().get('OriginSiteUserId')
def set_OriginSiteUserId(self, OriginSiteUserId): # String
self.add_query_param('OriginSiteUserId', OriginSiteUserId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_MediaName(self): # String
return self.get_query_params().get('MediaName')
def set_MediaName(self, MediaName): # String
self.add_query_param('MediaName', MediaName)
def get_SlotDimension(self): # String
return self.get_query_params().get('SlotDimension')
def set_SlotDimension(self, SlotDimension): # String
self.add_query_param('SlotDimension', SlotDimension)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def get_TenantId(self): # String
return self.get_query_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_query_param('TenantId', TenantId)
def get_AdSlotId(self): # String
return self.get_query_params().get('AdSlotId')
def set_AdSlotId(self, AdSlotId): # String
self.add_query_param('AdSlotId', AdSlotId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Dimension(self): # String
return self.get_query_params().get('Dimension')
def set_Dimension(self, Dimension): # String
self.add_query_param('Dimension', Dimension)
def get_QueryType(self): # String
return self.get_query_params().get('QueryType')
def set_QueryType(self, QueryType): # String
self.add_query_param('QueryType', QueryType)
def METHOD_NAME(self): # String
return self.get_query_params().get('Business')
def set_Business(self, Business): # String
self.add_query_param('Business', Business)
def get_EndTime(self): # Long
return self.get_query_params().get('EndTime')
def set_EndTime(self, EndTime): # Long
self.add_query_param('EndTime', EndTime)
def get_MediaId(self): # String
return self.get_query_params().get('MediaId')
def set_MediaId(self, MediaId): # String
self.add_query_param('MediaId', MediaId)
def get_Environment(self): # String
return self.get_query_params().get('Environment')
def set_Environment(self, Environment): # String
self.add_query_param('Environment', Environment)
def get_UserSite(self): # String
return self.get_query_params().get('UserSite')
def set_UserSite(self, UserSite): # String
self.add_query_param('UserSite', UserSite)
def get_AdSlotName(self): # String
return self.get_query_params().get('AdSlotName')
def set_AdSlotName(self, AdSlotName): # String
self.add_query_param('AdSlotName', AdSlotName) | null |
533 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class DescribeScalingInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeScalingInstances','ess')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ScalingGroupId(self): # String
return self.get_query_params().get('ScalingGroupId')
def set_ScalingGroupId(self, ScalingGroupId): # String
self.add_query_param('ScalingGroupId', ScalingGroupId)
def get_LifecycleState(self): # String
return self.get_query_params().get('LifecycleState')
def METHOD_NAME(self, LifecycleState): # String
self.add_query_param('LifecycleState', LifecycleState)
def get_CreationType(self): # String
return self.get_query_params().get('CreationType')
def set_CreationType(self, CreationType): # String
self.add_query_param('CreationType', CreationType)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ScalingActivityId(self): # String
return self.get_query_params().get('ScalingActivityId')
def set_ScalingActivityId(self, ScalingActivityId): # String
self.add_query_param('ScalingActivityId', ScalingActivityId)
def get_CreationTypess(self): # RepeatList
return self.get_query_params().get('CreationTypes')
def set_CreationTypess(self, CreationTypes): # RepeatList
for depth1 in range(len(CreationTypes)):
self.add_query_param('CreationTypes.' + str(depth1 + 1), CreationTypes[depth1])
def get_ScalingConfigurationId(self): # String
return self.get_query_params().get('ScalingConfigurationId')
def set_ScalingConfigurationId(self, ScalingConfigurationId): # String
self.add_query_param('ScalingConfigurationId', ScalingConfigurationId)
def get_InstanceIds(self): # RepeatList
return self.get_query_params().get('InstanceId')
def set_InstanceIds(self, InstanceId): # RepeatList
for depth1 in range(len(InstanceId)):
self.add_query_param('InstanceId.' + str(depth1 + 1), InstanceId[depth1])
def get_HealthStatus(self): # String
return self.get_query_params().get('HealthStatus')
def set_HealthStatus(self, HealthStatus): # String
self.add_query_param('HealthStatus', HealthStatus) | null |
534 | import datetime
from graphql import GraphQLError
from pytest import fixture
from ..datetime import Date, DateTime, Time
from ..objecttype import ObjectType
from ..schema import Schema
class Query(ObjectType):
datetime = DateTime(_in=DateTime(name="in"))
date = Date(_in=Date(name="in"))
time = Time(_at=Time(name="at"))
def resolve_datetime(self, info, _in=None):
return _in
def resolve_date(self, info, _in=None):
return _in
def METHOD_NAME(self, info, _at=None):
return _at
schema = Schema(query=Query)
@fixture
def sample_datetime():
utc_datetime = datetime.datetime(2019, 5, 25, 5, 30, 15, 10, datetime.timezone.utc)
return utc_datetime
@fixture
def sample_time(sample_datetime):
time = datetime.time(
sample_datetime.hour,
sample_datetime.minute,
sample_datetime.second,
sample_datetime.microsecond,
sample_datetime.tzinfo,
)
return time
@fixture
def sample_date(sample_datetime):
date = sample_datetime.date()
return date
def test_datetime_query(sample_datetime):
isoformat = sample_datetime.isoformat()
result = schema.execute("""{ datetime(in: "%s") }""" % isoformat)
assert not result.errors
assert result.data == {"datetime": isoformat}
def test_datetime_query_with_variables(sample_datetime):
isoformat = sample_datetime.isoformat()
result = schema.execute(
"""
query GetDate($datetime: DateTime) {
literal: datetime(in: "%s")
value: datetime(in: $datetime)
}
"""
% isoformat,
variable_values={"datetime": isoformat},
)
assert not result.errors
assert result.data == {"literal": isoformat, "value": isoformat}
def test_date_query(sample_date):
isoformat = sample_date.isoformat()
result = schema.execute("""{ date(in: "%s") }""" % isoformat)
assert not result.errors
assert result.data == {"date": isoformat}
def test_date_query_with_variables(sample_date):
isoformat = sample_date.isoformat()
result = schema.execute(
"""
query GetDate($date: Date) {
literal: date(in: "%s")
value: date(in: $date)
}
"""
% isoformat,
variable_values={"date": isoformat},
)
assert not result.errors
assert result.data == {"literal": isoformat, "value": isoformat}
def test_time_query(sample_time):
isoformat = sample_time.isoformat()
result = schema.execute("""{ time(at: "%s") }""" % isoformat)
assert not result.errors
assert result.data == {"time": isoformat}
def test_time_query_with_variables(sample_time):
isoformat = sample_time.isoformat()
result = schema.execute(
"""
query GetTime($time: Time) {
literal: time(at: "%s")
value: time(at: $time)
}
"""
% isoformat,
variable_values={"time": isoformat},
)
assert not result.errors
assert result.data == {"literal": isoformat, "value": isoformat}
def test_bad_datetime_query():
not_a_date = "Some string that's not a datetime"
result = schema.execute("""{ datetime(in: "%s") }""" % not_a_date)
assert result.errors and len(result.errors) == 1
error = result.errors[0]
assert isinstance(error, GraphQLError)
assert (
error.message == "DateTime cannot represent value:"
' "Some string that\'s not a datetime"'
)
assert result.data is None
def test_bad_date_query():
not_a_date = "Some string that's not a date"
result = schema.execute("""{ date(in: "%s") }""" % not_a_date)
error = result.errors[0]
assert isinstance(error, GraphQLError)
assert (
error.message == "Date cannot represent value:"
' "Some string that\'s not a date"'
)
assert result.data is None
def test_bad_time_query():
not_a_date = "Some string that's not a time"
result = schema.execute("""{ time(at: "%s") }""" % not_a_date)
error = result.errors[0]
assert isinstance(error, GraphQLError)
assert (
error.message == "Time cannot represent value:"
' "Some string that\'s not a time"'
)
assert result.data is None
def test_datetime_query_variable(sample_datetime):
isoformat = sample_datetime.isoformat()
# test datetime variable provided as Python datetime
result = schema.execute(
"""query Test($date: DateTime){ datetime(in: $date) }""",
variables={"date": sample_datetime},
)
assert not result.errors
assert result.data == {"datetime": isoformat}
# test datetime variable in string representation
result = schema.execute(
"""query Test($date: DateTime){ datetime(in: $date) }""",
variables={"date": isoformat},
)
assert not result.errors
assert result.data == {"datetime": isoformat}
def test_date_query_variable(sample_date):
isoformat = sample_date.isoformat()
# test date variable provided as Python date
result = schema.execute(
"""query Test($date: Date){ date(in: $date) }""",
variables={"date": sample_date},
)
assert not result.errors
assert result.data == {"date": isoformat}
# test date variable in string representation
result = schema.execute(
"""query Test($date: Date){ date(in: $date) }""", variables={"date": isoformat}
)
assert not result.errors
assert result.data == {"date": isoformat}
def test_time_query_variable(sample_time):
isoformat = sample_time.isoformat()
# test time variable provided as Python time
result = schema.execute(
"""query Test($time: Time){ time(at: $time) }""",
variables={"time": sample_time},
)
assert not result.errors
assert result.data == {"time": isoformat}
# test time variable in string representation
result = schema.execute(
"""query Test($time: Time){ time(at: $time) }""", variables={"time": isoformat}
)
assert not result.errors
assert result.data == {"time": isoformat}
def test_bad_variables(sample_date, sample_datetime, sample_time):
def _test_bad_variables(type_, input_):
result = schema.execute(
f"""query Test($input: {type_}){{ {type_.lower()}(in: $input) }}""",
variables={"input": input_},
)
assert isinstance(result.errors, list)
assert len(result.errors) == 1
assert isinstance(result.errors[0], GraphQLError)
assert result.data is None
not_a_date = dict()
not_a_date_str = "Some string that's not a date"
today = sample_date
now = sample_datetime
time = sample_time
bad_pairs = [
("DateTime", not_a_date),
("DateTime", not_a_date_str),
("DateTime", today),
("DateTime", time),
("Date", not_a_date),
("Date", not_a_date_str),
("Date", time),
("Time", not_a_date),
("Time", not_a_date_str),
("Time", now),
("Time", today),
]
for type_, input_ in bad_pairs:
_test_bad_variables(type_, input_) | null |
535 | import json
from pathlib import Path
import pytest
import requests
from click.testing import CliRunner
from pytest_mock import MockerFixture
from ggshield.__main__ import cli
from ggshield.core.errors import ExitCode
from tests.conftest import _IAC_SINGLE_VULNERABILITY
from tests.repository import Repository
from tests.unit.conftest import my_vcr
from tests.unit.request_mock import create_json_response
# `iac scan` is set for deprecation, but should behave exactly as `iac scan all` in the meantime
pytestmark = pytest.mark.parametrize(
"cli_command", [["iac", "scan", "all"], ["iac", "scan"]]
)
def setup_single_iac_vuln_repo(tmp_path: Path) -> str:
"""
Sets up a local repo with a single vulnerable IaC file from a given tmp_path.
:returns: a string representing the path to the file
"""
repo = Repository.create(tmp_path)
iac_file_name = "iac_file_single_vulnerability.tf"
tracked_file = tmp_path / iac_file_name
tracked_file.write_text(_IAC_SINGLE_VULNERABILITY)
repo.add(tracked_file)
repo.create_commit()
return str(tracked_file)
@my_vcr.use_cassette("test_iac_scan_no_argument")
def METHOD_NAME(cli_fs_runner: CliRunner, cli_command) -> None:
"""
GIVEN -
WHEN running the iac scan command with no argument
THEN the return code is 0
"""
result = cli_fs_runner.invoke(
cli,
cli_command,
)
assert result.exit_code == ExitCode.SUCCESS
@my_vcr.use_cassette("test_iac_scan_empty_directory")
def test_scan_all_valid_args(cli_fs_runner: CliRunner, cli_command) -> None:
"""
GIVEN valid arguments to the iac scan command
WHEN running the iac scan command with those arguments
THEN the return code is 0
"""
result = cli_fs_runner.invoke(
cli,
cli_command
+ [
"--minimum-severity",
"MEDIUM",
"--ignore-policy",
"GG_IAC_0001",
"--ignore-policy",
"GG_IAC_0002",
"--ignore-path",
"**",
".",
],
)
assert result.exit_code == ExitCode.SUCCESS
def test_invalid_policy_id(cli_fs_runner: CliRunner, cli_command) -> None:
"""
GIVEN arguments to the iac scan command with non-correct policy id to ignore
WHEN running the iac scan command with those arguments
THEN the return code is 1
"""
result = cli_fs_runner.invoke(
cli,
cli_command
+ [
"--ignore-policy",
"GG_IAC_0001",
"--ignore-policy",
"GG_IAC_002",
".",
],
)
assert result.exit_code == ExitCode.SCAN_FOUND_PROBLEMS
assert (
"The policies ['GG_IAC_002'] do not match the pattern 'GG_IAC_[0-9]{4}'"
in str(result.exception)
)
def test_iac_scan_all_file_error_response(
cli_fs_runner: CliRunner, cli_command
) -> None:
with cli_fs_runner.isolated_filesystem():
iac_file_path = setup_single_iac_vuln_repo(Path("."))
result = cli_fs_runner.invoke(
cli,
cli_command
+ [
iac_file_path,
],
)
assert result.exit_code == ExitCode.USAGE_ERROR
assert "Error: Invalid value for '[DIRECTORY]'" in result.stdout
def test_iac_scan_all_error_response(
cli_fs_runner: CliRunner, mocker: MockerFixture, cli_command
) -> None:
mocker.patch(
"ggshield.core.client.GGClient.request",
return_value=create_json_response({"detail": "Not found (404)"}, 404),
)
with cli_fs_runner.isolated_filesystem():
setup_single_iac_vuln_repo(Path("."))
result = cli_fs_runner.invoke(
cli,
cli_command
+ [
".",
],
)
assert "Error scanning." in result.stdout
assert "The following chunk is affected" not in result.stdout
assert "404:Not found (404)" in result.stdout
def test_iac_scan_all_json_error_response(
cli_fs_runner: CliRunner, mocker: MockerFixture, cli_command
) -> None:
mocker.patch(
"ggshield.core.client.GGClient.request",
return_value=create_json_response({"detail": "Not found (404)"}, 404),
)
cli_fs_runner.mix_stderr = False
with cli_fs_runner.isolated_filesystem():
setup_single_iac_vuln_repo(Path("."))
result = cli_fs_runner.invoke(
cli,
cli_command
+ [
"--json",
".",
],
)
assert "Error scanning." in result.stderr
assert "404:Not found (404)" in result.stderr
assert json.loads(result.stdout) == {
"entities_with_incidents": [],
"iac_engine_version": "",
"id": ".",
"total_incidents": 0,
"type": "path_scan",
}
def test_iac_scan_all_unknown_error_response(
cli_fs_runner: CliRunner, mocker: MockerFixture, cli_command
) -> None:
mocker.patch(
"ggshield.core.client.GGClient.request",
return_value=create_json_response({"detail": "Not found (404)"}, 404),
)
with cli_fs_runner.isolated_filesystem():
setup_single_iac_vuln_repo(Path("."))
result = cli_fs_runner.invoke(
cli,
cli_command + ["."],
)
assert "Error scanning." in result.stdout
assert "404:Not found (404)" in result.stdout
def test_iac_scan_all_error_response_read_timeout(
cli_fs_runner: CliRunner, mocker: MockerFixture, cli_command
) -> None:
mocker.patch(
"ggshield.core.client.GGClient.request",
side_effect=requests.exceptions.ReadTimeout("Timeout error"),
)
with cli_fs_runner.isolated_filesystem():
setup_single_iac_vuln_repo(Path("."))
result = cli_fs_runner.invoke(
cli,
cli_command + ["."],
)
assert "Error scanning." in result.stdout
assert "504:The request timed out." in result.stdout
def test_iac_scan_all_verbose(cli_fs_runner: CliRunner, cli_command) -> None:
with cli_fs_runner.isolated_filesystem():
# GIVEN a repository with one IaC file and one non-IaC file
path = Path(".")
repo = Repository.create(path)
iac_file_name = "iac_file.tf"
non_iac_file_name = "non_iac_file.txt"
tracked_iac_file = path / iac_file_name
tracked_iac_file.write_text(_IAC_SINGLE_VULNERABILITY)
repo.add(tracked_iac_file)
tracked_non_iac_file = path / non_iac_file_name
tracked_non_iac_file.write_text(_IAC_SINGLE_VULNERABILITY)
repo.add(tracked_non_iac_file)
repo.create_commit()
# WHEN performing a scan all with the verbose option
result = cli_fs_runner.invoke(
cli,
cli_command + [str(path), "-v"],
)
# THEN the IaC file appears in the output
assert iac_file_name in result.stdout
# AND the non-IaC file does not
assert non_iac_file_name not in result.stdout | null |
536 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import six
import argparse
import torch
from onmt.utils.logging import init_logger, logger
from onmt.inputters.inputter import dict_to_vocabs
def get_vocabs(dict_path):
model = torch.load(dict_path, map_location=torch.device("cpu"))
vocabs = dict_to_vocabs(model["vocab"])
enc_vocab = vocabs["src"]
dec_vocab = vocabs["tgt"]
logger.info("From: %s" % dict_path)
logger.info("\t* source vocab: %d words" % len(enc_vocab))
logger.info("\t* target vocab: %d words" % len(dec_vocab))
return enc_vocab, dec_vocab
def METHOD_NAME(file_enc, skip_lines=0, filter_set=None):
embs = dict()
total_vectors_in_file = 0
with open(file_enc, "rb") as f:
for i, line in enumerate(f):
if i < skip_lines:
continue
if not line:
break
if len(line) == 0:
# is this reachable?
continue
l_split = line.decode("utf8").strip().split(" ")
if len(l_split) == 2:
continue
total_vectors_in_file += 1
if filter_set is not None and l_split[0] not in filter_set:
continue
embs[l_split[0]] = [float(em) for em in l_split[1:]]
return embs, total_vectors_in_file
def convert_to_torch_tensor(word_to_float_list_dict, vocab):
dim = len(six.next(six.itervalues(word_to_float_list_dict)))
tensor = torch.zeros((len(vocab), dim))
for word, values in word_to_float_list_dict.items():
tensor[vocab[word]] = torch.Tensor(values)
return tensor
def calc_vocab_load_stats(vocab, loaded_embed_dict):
matching_count = len(
set(vocab.tokens_to_ids.values()) & set(loaded_embed_dict.keys())
)
missing_count = len(vocab) - matching_count
percent_matching = matching_count / len(vocab) * 100
return matching_count, missing_count, percent_matching
def main():
parser = argparse.ArgumentParser(description="embeddings_to_torch.py")
parser.add_argument(
"-emb_file_both",
required=False,
help="loads Embeddings for both source and target " "from this file.",
)
parser.add_argument(
"-emb_file_enc", required=False, help="source Embeddings from this file"
)
parser.add_argument(
"-emb_file_dec", required=False, help="target Embeddings from this file"
)
parser.add_argument(
"-output_file", required=True, help="Output file for the prepared data"
)
parser.add_argument("-dict_file", required=True, help="Dictionary file")
parser.add_argument("-verbose", action="store_true", default=False)
parser.add_argument(
"-skip_lines",
type=int,
default=0,
help="Skip first lines of the embedding file",
)
parser.add_argument("-type", choices=["GloVe", "word2vec"], default="GloVe")
opt = parser.parse_args()
enc_vocab, dec_vocab = get_vocabs(opt.dict_file)
# Read in embeddings
skip_lines = 1 if opt.type == "word2vec" else opt.skip_lines
if opt.emb_file_both is not None:
if opt.emb_file_enc is not None:
raise ValueError(
"If --emb_file_both is passed in, you should not" "set --emb_file_enc."
)
if opt.emb_file_dec is not None:
raise ValueError(
"If --emb_file_both is passed in, you should not" "set --emb_file_dec."
)
set_of_src_and_tgt_vocab = set(enc_vocab.ids_to_tokens) | set(
dec_vocab.ids_to_tokens
)
logger.info(
"Reading encoder and decoder embeddings from {}".format(opt.emb_file_both)
)
src_vectors, total_vec_count = METHOD_NAME(
opt.emb_file_both, skip_lines, set_of_src_and_tgt_vocab
)
tgt_vectors = src_vectors
logger.info("\tFound {} total vectors in file".format(total_vec_count))
else:
if opt.emb_file_enc is None:
raise ValueError(
"If --emb_file_enc not provided. Please specify "
"the file with encoder embeddings, or pass in "
"--emb_file_both"
)
if opt.emb_file_dec is None:
raise ValueError(
"If --emb_file_dec not provided. Please specify "
"the file with encoder embeddings, or pass in "
"--emb_file_both"
)
logger.info("Reading encoder embeddings from {}".format(opt.emb_file_enc))
src_vectors, total_vec_count = METHOD_NAME(
opt.emb_file_enc, skip_lines, filter_set=set(enc_vocab.ids_to_tokens)
)
logger.info("\tFound {} total vectors in file.".format(total_vec_count))
logger.info("Reading decoder embeddings from {}".format(opt.emb_file_dec))
tgt_vectors, total_vec_count = METHOD_NAME(
opt.emb_file_dec, skip_lines, filter_set=set(dec_vocab.ids_to_tokens)
)
logger.info("\tFound {} total vectors in file".format(total_vec_count))
logger.info("After filtering to vectors in vocab:")
logger.info(
"\t* enc: %d match, %d missing, (%.2f%%)"
% calc_vocab_load_stats(enc_vocab, src_vectors)
)
logger.info(
"\t* dec: %d match, %d missing, (%.2f%%)"
% calc_vocab_load_stats(dec_vocab, tgt_vectors)
)
# Write to file
enc_output_file = opt.output_file + ".enc.pt"
dec_output_file = opt.output_file + ".dec.pt"
logger.info(
"\nSaving embedding as:\n\t* enc: %s\n\t* dec: %s"
% (enc_output_file, dec_output_file)
)
torch.save(convert_to_torch_tensor(src_vectors, enc_vocab), enc_output_file)
torch.save(convert_to_torch_tensor(tgt_vectors, dec_vocab), dec_output_file)
logger.info("\nDone.")
if __name__ == "__main__":
init_logger("embeddings_to_torch.log")
main() | null |
537 | __author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision$"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from .base import validatorBase
from .logging import *
from .validators import rdfAbout, noduplicates, text, eater
from .root import rss11_namespace as rss11_ns
from .extension import extension_everywhere
rdfNS = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
#
# rdf:RDF element. The valid children include "channel", "item", "textinput", "image"
#
class rdf(validatorBase,object):
def do_rss090_channel(self):
from .channel import channel
self.dispatcher.defaultNamespaces.append("http://my.netscape.com/rdf/simple/0.9/")
return channel(), noduplicates()
def do_channel(self):
from .channel import rss10Channel
return rdfAbout(), rss10Channel(), noduplicates()
def _is_090(self):
return "http://my.netscape.com/rdf/simple/0.9/" in self.dispatcher.defaultNamespaces
def _withAbout(self,v):
if self._is_090():
return v
else:
return v, rdfAbout()
def do_item(self):
from .item import rss10Item
return self._withAbout(rss10Item())
def do_textinput(self):
from .textInput import textInput
return self._withAbout(textInput())
def do_image(self):
return self._withAbout(rss10Image())
def do_cc_License(self):
return eater()
def do_taxo_topic(self):
return eater()
def do_rdf_Description(self):
return eater()
def prevalidate(self):
self.setFeedType(TYPE_RSS1)
def validate(self):
if not "channel" in self.children and not "rss090_channel" in self.children:
self.log(MissingElement({"parent":self.name.replace('_',':'), "element":"channel"}))
from .validators import rfc2396_full
class rss10Image(validatorBase, extension_everywhere):
def validate(self):
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "url" in self.children:
self.log(MissingElement({"parent":self.name, "element":"url"}))
def do_title(self):
from .image import title
return title(), noduplicates()
def do_link(self):
return rfc2396_full(), noduplicates()
def do_url(self):
return rfc2396_full(), noduplicates()
def do_dc_creator(self):
return text()
def do_dc_subject(self):
return text() # duplicates allowed
def do_dc_date(self):
from .validators import w3cdtf
return w3cdtf(), noduplicates()
def METHOD_NAME(self):
return eater()
#
# This class performs RSS 1.x specific validations on extensions.
#
class rdfExtension(validatorBase):
def __init__(self, qname, literal=False):
validatorBase.__init__(self)
self.qname=qname
self.literal=literal
def textOK(self):
pass
def setElement(self, name, attrs, parent):
validatorBase.setElement(self, name, attrs, parent)
if (rdfNS,"parseType") in attrs:
if attrs[(rdfNS,"parseType")] == "Literal": self.literal=True
if not self.literal:
# ensure no rss11 children
if self.qname==rss11_ns:
from .logging import UndefinedElement
self.log(UndefinedElement({"parent":parent.name, "element":name}))
# no duplicate rdf:abouts
if (rdfNS,"about") in attrs:
about = attrs[(rdfNS,"about")]
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
if about in self.dispatcher.__dict__["abouts"]:
self.log(DuplicateValue(
{"parent":parent.name, "element":"rdf:about", "value":about}))
else:
self.dispatcher.__dict__["abouts"].append(about)
def getExpectedAttrNames(self):
# no rss11 attributes
if self.literal or not self.attrs: return list(self.attrs.keys())
return [(ns,n) for ns,n in list(self.attrs.keys()) if ns!=rss11_ns]
def validate(self):
# rdflib 2.0.5 does not catch mixed content errors
if self.value.strip() and self.children and not self.literal:
self.log(InvalidRDF({"message":"mixed content"}))
def startElementNS(self, name, qname, attrs):
# ensure element is "namespace well formed"
if name.find(':') != -1:
from .logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":name}))
# ensure all attribute namespaces are properly defined
for (namespace,attr) in list(attrs.keys()):
if ':' in attr and not namespace:
from .logging import MissingNamespace
self.log(MissingNamespace({"parent":self.name, "element":attr}))
# eat children
self.children.append((qname,name))
self.push(rdfExtension(qname, self.literal), name, attrs)
def characters(self, string):
if not self.literal: validatorBase.characters(self, string) | null |
538 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2023 Valory AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Tests for the `autonomy analyse logs` command"""
import contextlib
import os
from typing import Tuple
from autonomy.analyse.logs.base import LOGS_DB
from autonomy.deploy.constants import WARNING
from tests.conftest import DATA_DIR
from tests.test_autonomy.test_cli.base import BaseCliTest
LOGS_DIR = DATA_DIR / "logs"
LOGS_DB_FILE = LOGS_DIR / LOGS_DB
AVAILABLE_ROUNDS = (
"registration_startup",
"new_tokens",
"leaderboard_observation",
"image_code_calculation",
"image_generation",
"db_update",
"reset_and_pause",
)
class BaseLogAnalyserTest(BaseCliTest):
"""Base test class for the log analyser."""
cli_options: Tuple[str, ...] = ("analyse", "logs")
@classmethod
def teardown_class(cls) -> None:
"""Teardown class."""
with contextlib.suppress(PermissionError):
if LOGS_DB_FILE.exists():
os.remove(LOGS_DB_FILE)
class TestAnalyseLogs(BaseLogAnalyserTest):
"""Test `autonomy analyse logs`"""
def test_agent_options(self) -> None:
"""Test print agent options."""
result = self.run_cli(commands=("--from-dir", str(LOGS_DIR)))
assert result.exit_code == 1, result.stdout
assert "Available agents: ['aea_0']" in result.output
def test_logs_table(self) -> None:
"""Test print agent options."""
result = self.run_cli(commands=("--from-dir", str(LOGS_DIR), "-a", "aea_0"))
assert result.exit_code == 0, result.stdout
assert "Entered in the 'registration_startup' behaviour" in result.output
def test_log_level_filter(self) -> None:
"""Test print agent options."""
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR),
"-a",
"aea_0",
"--log-level",
WARNING,
)
)
assert result.exit_code == 0, result.stdout
assert "[WARNING]" in result.output
assert "[INFO]" not in result.output
def test_start_time_filter(self) -> None:
"""Test print agent options."""
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR),
"-a",
"aea_0",
"--start-time",
"2023-01-23 15:39:20,424", # actual timestamp from the logs file
)
)
assert result.exit_code == 0, result.stdout
# actual timestamps from the logs file
assert "2023-01-23 15:39:21.441000" in result.output
assert "2023-01-23 15:39:19.686000" not in result.output
def test_end_time_filter(self) -> None:
"""Test print agent options."""
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR),
"-a",
"aea_0",
"--end-time",
"2023-01-23 15:39:20,424", # actual timestamp from the logs file
)
)
assert result.exit_code == 0, result.stdout
# actual timestamps from the logs file
assert "2023-01-23 15:39:21.441000" not in result.output
assert "2023-01-23 15:39:19.686000" in result.output
def test_period_filter(self) -> None:
"""Test print agent options."""
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR),
"-a",
"aea_0",
"--period",
"1",
)
)
assert result.exit_code == 0, result.stdout
assert "period 0" not in result.output
assert "period 1" in result.output
assert "period 2" not in result.output
def test_round_filter(self) -> None:
"""Test print agent options."""
round_name = "new_tokens"
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR),
"-a",
"aea_0",
"--round",
round_name,
)
)
assert result.exit_code == 0, result.stdout
assert round_name in result.output
assert all([_round not in result.output for _round in AVAILABLE_ROUNDS[2:]])
def METHOD_NAME(self) -> None:
"""Test print agent options."""
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR),
"-a",
"aea_0",
"-ir",
".*Entered in the.*",
)
)
assert result.exit_code == 0, result.stdout
assert "Entered in the" in result.output
assert "arrived block with timestamp" not in result.output
def test_exclude_regex_filter(self) -> None:
"""Test print agent options."""
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR),
"-a",
"aea_0",
"-er",
".*Entered in the.*",
)
)
assert result.exit_code == 0, result.stdout
assert "Entered in the" not in result.output
assert "arrived block with timestamp" in result.output
def test_fsm_path(self) -> None:
"""Test print agent options."""
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR),
"-a",
"aea_0",
"--fsm",
)
)
assert result.exit_code == 0, result.stdout
assert all(
[
f"|_ {_round} | Event.DONE" in result.output
for _round in AVAILABLE_ROUNDS
]
)
def test_empty_logs_dir(self) -> None:
"""Test print agent options."""
result = self.run_cli(
commands=(
"--from-dir",
str(LOGS_DIR.parent),
"-a",
"aea_0",
"--fsm",
)
)
assert result.exit_code == 1, result.stdout
assert "Cannot find agent log data in" in result.output | null |
539 | from __future__ import annotations
import os
import sys
from array import array
import pytest
from dask.multiprocessing import get_context
from dask.sizeof import sizeof
from dask.utils import funcname
try:
import pandas as pd
except ImportError:
pd = None
requires_pandas = pytest.mark.skipif(pd is None, reason="requires pandas")
def METHOD_NAME():
assert sizeof(1) == sys.getsizeof(1)
def test_name():
assert funcname(sizeof) == "sizeof"
def test_containers():
assert sizeof([1, 2, [3]]) > (sys.getsizeof(3) * 3 + sys.getsizeof([]))
def test_bytes_like():
assert 1000 <= sizeof(bytes(1000)) <= 2000
assert 1000 <= sizeof(bytearray(1000)) <= 2000
assert 1000 <= sizeof(memoryview(bytes(1000))) <= 2000
assert 8000 <= sizeof(array("d", range(1000))) <= 9000
def test_numpy():
np = pytest.importorskip("numpy")
assert 8000 <= sizeof(np.empty(1000, dtype="f8")) <= 9000
dt = np.dtype("f8")
assert sizeof(dt) == sys.getsizeof(dt)
def test_numpy_0_strided():
np = pytest.importorskip("numpy")
x = np.broadcast_to(1, (100, 100, 100))
assert sizeof(x) <= 8
@requires_pandas
def test_pandas():
df = pd.DataFrame(
{"x": [1, 2, 3], "y": ["a" * 100, "b" * 100, "c" * 100]}, index=[10, 20, 30]
)
assert sizeof(df) >= sizeof(df.x) + sizeof(df.y) - sizeof(df.index)
assert sizeof(df.x) >= sizeof(df.index)
assert sizeof(df.y) >= 100 * 3
assert sizeof(df.index) >= 20
assert isinstance(sizeof(df), int)
assert isinstance(sizeof(df.x), int)
assert isinstance(sizeof(df.index), int)
@requires_pandas
def test_pandas_contiguous_dtypes():
"""2+ contiguous columns of the same dtype in the same DataFrame share the same
surface thus have lower overhead
"""
df1 = pd.DataFrame([[1, 2.2], [3, 4.4]])
df2 = pd.DataFrame([[1.1, 2.2], [3.3, 4.4]])
assert sizeof(df2) < sizeof(df1)
@requires_pandas
def test_pandas_multiindex():
index = pd.MultiIndex.from_product([range(5), ["a", "b", "c", "d", "e"]])
actual_size = sys.getsizeof(index)
assert 0.5 * actual_size < sizeof(index) < 2 * actual_size
assert isinstance(sizeof(index), int)
@requires_pandas
def test_pandas_repeated_column():
df = pd.DataFrame({"x": list(range(10_000))})
df2 = df[["x", "x", "x"]]
df3 = pd.DataFrame({"x": list(range(10_000)), "y": list(range(10_000))})
assert 80_000 < sizeof(df) < 85_000
assert 80_000 < sizeof(df2) < 85_000
assert 160_000 < sizeof(df3) < 165_000
def test_sparse_matrix():
sparse = pytest.importorskip("scipy.sparse")
sp = sparse.eye(10)
# These are the 32-bit Python 2.7 values.
assert sizeof(sp.todia()) >= 152
assert sizeof(sp.tobsr()) >= 232
assert sizeof(sp.tocoo()) >= 240
assert sizeof(sp.tocsc()) >= 232
assert sizeof(sp.tocsr()) >= 232
assert sizeof(sp.todok()) >= 188
assert sizeof(sp.tolil()) >= 204
@requires_pandas
@pytest.mark.parametrize("cls_name", ["Series", "DataFrame", "Index"])
@pytest.mark.parametrize("dtype", [object, "string[python]"])
def test_pandas_object_dtype(dtype, cls_name):
cls = getattr(pd, cls_name)
s1 = cls([f"x{i:3d}" for i in range(1000)], dtype=dtype)
assert sizeof("x000") * 1000 < sizeof(s1) < 2 * sizeof("x000") * 1000
x = "x" * 100_000
y = "y" * 100_000
z = "z" * 100_000
w = "w" * 100_000
# High duplication of references to the same object
s2 = cls([x, y, z, w] * 1000, dtype=dtype)
assert 400_000 < sizeof(s2) < 500_000
# Low duplication of references to the same object
s3 = cls([x, y, z, w], dtype=dtype)
s4 = cls([x, y, z, x], dtype=dtype)
s5 = cls([x, x, x, x], dtype=dtype)
assert sizeof(s5) < sizeof(s4) < sizeof(s3)
@requires_pandas
@pytest.mark.parametrize("dtype", [object, "string[python]"])
def test_dataframe_object_dtype(dtype):
x = "x" * 100_000
y = "y" * 100_000
z = "z" * 100_000
w = "w" * 100_000
# High duplication of references to the same object, across different columns
objs = [x, y, z, w]
df1 = pd.DataFrame([objs * 3] * 1000, dtype=dtype)
assert 400_000 < sizeof(df1) < 550_000
# Low duplication of references to the same object, across different columns
df2 = pd.DataFrame([[x, y], [z, w]], dtype=dtype)
df3 = pd.DataFrame([[x, y], [z, x]], dtype=dtype)
df4 = pd.DataFrame([[x, x], [x, x]], dtype=dtype)
assert sizeof(df4) < sizeof(df3) < sizeof(df2)
@pytest.mark.parametrize("cls_name", ["Series", "DataFrame", "Index"])
def test_pandas_string_arrow_dtype(cls_name):
pytest.importorskip("pyarrow")
cls = getattr(pd, cls_name)
s = cls(["x" * 100_000, "y" * 50_000], dtype="string[pyarrow]")
assert 150_000 < sizeof(s) < 155_000
@requires_pandas
def test_pandas_empty():
df = pd.DataFrame(
{"x": [1, 2, 3], "y": ["a" * 100, "b" * 100, "c" * 100]}, index=[10, 20, 30]
)
empty = df.head(0)
assert sizeof(empty) > 0
assert sizeof(empty.x) > 0
assert sizeof(empty.y) > 0
assert sizeof(empty.index) > 0
@requires_pandas
def test_pyarrow_table():
pa = pytest.importorskip("pyarrow")
df = pd.DataFrame(
{"x": [1, 2, 3], "y": ["a" * 100, "b" * 100, "c" * 100]}, index=[10, 20, 30]
)
table = pa.Table.from_pandas(df)
assert sizeof(table) > sizeof(table.schema.metadata)
assert isinstance(sizeof(table), int)
assert isinstance(sizeof(table.columns[0]), int)
assert isinstance(sizeof(table.columns[1]), int)
assert isinstance(sizeof(table.columns[2]), int)
empty = pa.Table.from_pandas(df.head(0))
assert sizeof(empty) > sizeof(empty.schema.metadata)
assert sizeof(empty.columns[0]) > 0
assert sizeof(empty.columns[1]) > 0
assert sizeof(empty.columns[2]) > 0
def test_dict():
np = pytest.importorskip("numpy")
x = np.ones(10000)
assert sizeof({"x": x}) > x.nbytes
assert sizeof({"x": [x]}) > x.nbytes
assert sizeof({"x": [{"y": x}]}) > x.nbytes
d = {i: x for i in range(100)}
assert sizeof(d) > x.nbytes * 100
assert isinstance(sizeof(d), int)
def _get_sizeof_on_path(path, size):
sys.path.append(os.fsdecode(path))
# Dask will have already called _register_entry_point_plugins
# before we can modify sys.path, so we re-register here.
import dask.sizeof
dask.sizeof._register_entry_point_plugins()
import class_impl
cls = class_impl.Impl(size)
return sizeof(cls)
def test_register_backend_entrypoint(tmp_path):
# Create special sizeof implementation for a dummy class
(tmp_path / "impl_sizeof.py").write_bytes(
b"def sizeof_plugin(sizeof):\n"
b' print("REG")\n'
b' @sizeof.register_lazy("class_impl")\n'
b" def register_impl():\n"
b" import class_impl\n"
b" @sizeof.register(class_impl.Impl)\n"
b" def sizeof_impl(obj):\n"
b" return obj.size \n"
)
# Define dummy class that possesses a size attribute
(tmp_path / "class_impl.py").write_bytes(
b"class Impl:\n def __init__(self, size):\n self.size = size"
)
dist_info = tmp_path / "impl_sizeof-0.0.0.dist-info"
dist_info.mkdir()
(dist_info / "entry_points.txt").write_bytes(
b"[dask.sizeof]\nimpl = impl_sizeof:sizeof_plugin\n"
)
with get_context().Pool(1) as pool:
assert (
pool.apply(_get_sizeof_on_path, args=(tmp_path, 3_14159265)) == 3_14159265
)
pool.join() | null |
540 | import shutil
from dataclasses import replace
import pytest
from pharmpy.deps import numpy as np
from pharmpy.deps import pandas as pd
from pharmpy.deps import sympy
from pharmpy.internals.fs.cwd import chdir
from pharmpy.modeling import (
add_iiv,
add_population_parameter,
create_joint_distribution,
fix_or_unfix_parameters,
fix_parameters,
fix_parameters_to,
get_omegas,
get_sigmas,
get_thetas,
set_lower_bounds,
set_upper_bounds,
unconstrain_parameters,
unfix_parameters,
unfix_parameters_to,
update_inits,
)
from pharmpy.tools import read_modelfit_results
def test_get_thetas(pheno):
thetas = get_thetas(pheno)
assert len(thetas) == 3
assert thetas['PTVCL'].init == 0.00469307
def test_get_omegas(pheno):
omegas = get_omegas(pheno)
assert len(omegas) == 2
assert omegas['IVCL'].init == 0.0309626
def test_get_sigmas(pheno):
sigmas = get_sigmas(pheno)
assert len(sigmas) == 1
assert sigmas['SIGMA_1_1'].init == 0.013241
def test_fix_parameters(load_model_for_test, testdata):
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
assert not model.parameters['THETA_1'].fix
model = fix_parameters(model, ['THETA_1'])
assert model.parameters['THETA_1'].fix
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
assert not model.parameters['THETA_1'].fix
model = fix_parameters(model, 'THETA_1')
assert model.parameters['THETA_1'].fix
def test_unfix_parameters(load_model_for_test, testdata):
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = fix_parameters(model, ['THETA_1'])
assert model.parameters['THETA_1'].fix
model = unfix_parameters(model, ['THETA_1'])
assert not model.parameters['THETA_1'].fix
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = fix_parameters(model, 'THETA_1')
assert model.parameters['THETA_1'].fix
model = unfix_parameters(model, 'THETA_1')
assert not model.parameters['THETA_1'].fix
def test_fix_or_unfix_parameters(load_model_for_test, testdata):
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = fix_or_unfix_parameters(model, {'THETA_1': True})
assert model.parameters['THETA_1'].fix
model = fix_or_unfix_parameters(model, {'THETA_1': False})
assert not model.parameters['THETA_1'].fix
def test_unconstrain_parameters(load_model_for_test, testdata):
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
fix_or_unfix_parameters(model, {'THETA_1': True})
unconstrain_parameters(model, ['THETA_1'])
assert not model.parameters['THETA_1'].fix
fix_or_unfix_parameters(model, {'THETA_1': True})
unconstrain_parameters(model, 'THETA_1')
assert not model.parameters['THETA_1'].fix
def test_fix_parameters_to(load_model_for_test, testdata):
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = fix_parameters_to(model, {'THETA_1': 0})
assert model.parameters['THETA_1'].fix
assert model.parameters['THETA_1'].init == 0
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = fix_parameters_to(model, {'THETA_1': 0, 'OMEGA_1_1': 0})
assert model.parameters['THETA_1'].fix
assert model.parameters['THETA_1'].init == 0
assert model.parameters['THETA_1'].fix
assert model.parameters['OMEGA_1_1'].init == 0
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = fix_parameters_to(model, {'THETA_1': 0, 'OMEGA_1_1': 1})
assert model.parameters['THETA_1'].init == 0
assert model.parameters['OMEGA_1_1'].init == 1
def test_unfix_parameters_to(load_model_for_test, testdata):
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = fix_parameters(model, ['THETA_1'])
assert model.parameters['THETA_1'].fix
model = unfix_parameters_to(model, {'THETA_1': 0})
assert not model.parameters['THETA_1'].fix
assert model.parameters['THETA_1'].init == 0
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = fix_parameters(model, ['THETA_1', 'OMEGA_1_1'])
assert model.parameters['THETA_1'].fix
assert model.parameters['OMEGA_1_1'].fix
model = unfix_parameters_to(model, {'THETA_1': 0, 'OMEGA_1_1': 0})
assert not model.parameters['THETA_1'].fix
assert not model.parameters['OMEGA_1_1'].fix
assert model.parameters['THETA_1'].init == 0
assert model.parameters['OMEGA_1_1'].init == 0
def METHOD_NAME(load_model_for_test, testdata):
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = set_upper_bounds(model, {'THETA_1': 100})
assert model.parameters['THETA_1'].upper == 100
assert model.parameters['OMEGA_1_1'].upper == sympy.oo
model = set_lower_bounds(model, {'THETA_1': -100})
assert model.parameters['THETA_1'].lower == -100
assert model.parameters['OMEGA_1_1'].lower == 0
def test_add_population_parameter(load_model_for_test, testdata):
model = load_model_for_test(testdata / 'nonmem' / 'minimal.mod')
model = add_population_parameter(model, 'NEWPARAM', 23)
assert len(model.parameters) == 4
assert model.parameters['NEWPARAM'].init == 23
def test_update_inits_move_est(load_model_for_test, pheno_path):
model = load_model_for_test(pheno_path)
res = read_modelfit_results(pheno_path)
model = create_joint_distribution(model, individual_estimates=res.individual_estimates)
model = add_iiv(model, 'S1', 'add')
param_est = res.parameter_estimates.copy()
param_est['IIV_CL_IIV_V'] = 0.0285 # Correlation > 0.99
param_est['IIV_S1'] = 0.0005
model = update_inits(model, param_est, move_est_close_to_bounds=True)
assert model.parameters['IVCL'].init == param_est['IVCL']
assert model.parameters['IIV_S1'].init == 0.01
assert round(model.parameters['IIV_CL_IIV_V'].init, 6) == 0.025757
def test_update_inits_zero_fix(load_model_for_test, pheno_path):
model = load_model_for_test(pheno_path)
d = {name: 0 for name in model.random_variables.iiv.parameter_names}
model = fix_parameters_to(model, d)
res = read_modelfit_results(pheno_path)
param_est = res.parameter_estimates.drop(index=['IVCL'])
model = update_inits(model, param_est)
assert model.parameters['IVCL'].init == 0
assert model.parameters['IVCL'].fix
model = load_model_for_test(pheno_path)
d = {name: 0 for name in model.random_variables.iiv.parameter_names}
model = fix_parameters_to(model, d)
param_est = res.parameter_estimates.drop(index=['IVCL'])
model = update_inits(model, param_est, move_est_close_to_bounds=True)
assert model.parameters['IVCL'].init == 0
assert model.parameters['IVCL'].fix
def test_update_inits_no_res(load_model_for_test, testdata, tmp_path):
shutil.copy(testdata / 'nonmem/pheno.mod', tmp_path / 'run1.mod')
shutil.copy(testdata / 'nonmem/pheno.dta', tmp_path / 'pheno.dta')
with chdir(tmp_path):
shutil.copy(testdata / 'nonmem/pheno.ext', tmp_path / 'run1.ext')
shutil.copy(testdata / 'nonmem/pheno.lst', tmp_path / 'run1.lst')
model = load_model_for_test('run1.mod')
res = read_modelfit_results('run1.mod')
modelfit_results = replace(
res,
parameter_estimates=pd.Series(
np.nan, name='estimates', index=list(model.parameters.nonfixed.inits.keys())
),
)
with pytest.raises(ValueError):
update_inits(model, modelfit_results.parameter_estimates) | null |
541 | import logging
from os.path import dirname
from galaxy.queue_worker import job_rule_modules
from galaxy.structured_app import StructuredApp
from galaxy.tool_util.toolbox.watcher import (
get_tool_conf_watcher,
get_tool_watcher,
)
from galaxy.util.watcher import get_watcher
log = logging.getLogger(__name__)
class ConfigWatchers:
"""Contains ToolConfWatcher, ToolWatcher and ToolDataWatcher objects."""
def __init__(self, app: StructuredApp):
self.app = app
self.active = False
# ToolConfWatcher objects will watch the tool_cache if the tool_cache is passed into get_tool_conf_watcher.
# Watching the tool_cache means removing outdated items from the tool_cache.
# Only the reload_toolbox callback will re-populate the cache, so we pass the tool_cache only to the ToolConfWatcher that
# watches regular tools.
# If there are multiple ToolConfWatcher objects for the same handler or web process a race condition occurs between the two cache_cleanup functions.
# If the reload_data_managers callback wins, the cache will miss the tools that had been removed from the cache
# and will be blind to further changes in these tools.
def reload_toolbox():
save_integrated_tool_panel = False
try:
# Run and wait for toolbox reload on the process that watches the config files.
# The toolbox reload will update the integrated_tool_panel_file
self.app.queue_worker.send_local_control_task("reload_toolbox", get_response=True),
except Exception:
save_integrated_tool_panel = True
log.exception("Exception occured while reloading toolbox")
self.app.queue_worker.send_control_task(
"reload_toolbox", noop_self=True, kwargs={"save_integrated_tool_panel": save_integrated_tool_panel}
),
self.tool_config_watcher = get_tool_conf_watcher(
reload_callback=reload_toolbox,
tool_cache=self.app.tool_cache,
)
self.data_manager_config_watcher = get_tool_conf_watcher(
reload_callback=lambda: self.app.queue_worker.send_control_task("reload_data_managers"),
)
self.tool_data_watcher = get_watcher(self.app.config, "watch_tool_data_dir", monitor_what_str="data tables")
self.tool_watcher = get_tool_watcher(self, app.config)
if getattr(self.app, "is_job_handler", False):
self.job_rule_watcher = get_watcher(app.config, "watch_job_rules", monitor_what_str="job rules")
else:
self.job_rule_watcher = get_watcher(app.config, "__invalid__")
self.core_config_watcher = get_watcher(app.config, "watch_core_config", monitor_what_str="core config file")
self.tour_watcher = get_watcher(app.config, "watch_tours", monitor_what_str="tours")
@property
def watchers(self):
return (
self.tool_watcher,
self.tool_config_watcher,
self.data_manager_config_watcher,
self.tool_data_watcher,
self.tool_watcher,
self.job_rule_watcher,
self.core_config_watcher,
self.tour_watcher,
)
def change_state(self, active):
if active:
self.start()
elif self.active:
self.METHOD_NAME()
def start(self):
for watcher in self.watchers:
watcher.start()
[self.tool_config_watcher.watch_file(config) for config in self.tool_config_paths]
[self.data_manager_config_watcher.watch_file(config) for config in self.data_manager_configs]
for tool_data_path in self.tool_data_paths:
self.tool_data_watcher.watch_directory(
tool_data_path,
callback=lambda path: self.app.queue_worker.send_control_task(
"reload_tool_data_tables", kwargs={"path": path}
),
require_extensions=(".loc",),
recursive=True,
)
for job_rules_directory in self.job_rules_paths:
self.job_rule_watcher.watch_directory(
job_rules_directory,
callback=lambda: self.app.queue_worker.send_control_task("reload_job_rules"),
recursive=True,
ignore_extensions=(".pyc", ".pyo", ".pyd"),
)
if self.app.config.config_file:
self.core_config_watcher.watch_file(
self.app.config.config_file,
callback=lambda path: self.app.queue_worker.send_control_task("reload_core_config"),
)
self.tour_watcher.watch_directory(
self.app.config.tour_config_dir,
callback=lambda path: self.app.queue_worker.send_control_task("reload_tour", kwargs={"path": path}),
)
self.active = True
def METHOD_NAME(self):
for watcher in self.watchers:
watcher.METHOD_NAME()
self.active = False
def update_watch_data_table_paths(self):
if hasattr(self.tool_data_watcher, "monitored_dirs"):
for tool_data_table_path in self.tool_data_paths:
if tool_data_table_path not in self.tool_data_watcher.monitored_dirs:
self.tool_data_watcher.watch_directory(tool_data_table_path)
@property
def data_manager_configs(self):
data_manager_configs = []
if hasattr(self.app.config, "data_manager_config_file"):
data_manager_configs.append(self.app.config.data_manager_config_file)
if hasattr(self.app.config, "shed_data_manager_config_file"):
data_manager_configs.append(self.app.config.shed_data_manager_config_file)
return data_manager_configs
@property
def tool_data_paths(self):
tool_data_paths = []
if hasattr(self.app.config, "tool_data_path"):
tool_data_paths.append(self.app.config.tool_data_path)
if hasattr(self.app.config, "shed_tool_data_path"):
tool_data_paths.append(self.app.config.shed_tool_data_path)
return tool_data_paths
@property
def tool_config_paths(self):
tool_config_paths = []
if hasattr(self.app.config, "tool_configs"):
tool_config_paths = self.app.config.tool_configs
return tool_config_paths
@property
def job_rules_paths(self):
job_rules_paths = []
for rules_module in job_rule_modules(self.app):
job_rules_dir = dirname(rules_module.__file__)
job_rules_paths.append(job_rules_dir)
return job_rules_paths | null |
542 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkconfig.endpoint import endpoint_data
class UpdateConfigDeliveryChannelRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Config', '2020-09-07', 'UpdateConfigDeliveryChannel')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NonCompliantNotification(self): # Boolean
return self.get_query_params().get('NonCompliantNotification')
def set_NonCompliantNotification(self, NonCompliantNotification): # Boolean
self.add_query_param('NonCompliantNotification', NonCompliantNotification)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_ConfigurationSnapshot(self): # Boolean
return self.get_query_params().get('ConfigurationSnapshot')
def set_ConfigurationSnapshot(self, ConfigurationSnapshot): # Boolean
self.add_query_param('ConfigurationSnapshot', ConfigurationSnapshot)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_DeliveryChannelTargetArn(self): # String
return self.get_query_params().get('DeliveryChannelTargetArn')
def set_DeliveryChannelTargetArn(self, DeliveryChannelTargetArn): # String
self.add_query_param('DeliveryChannelTargetArn', DeliveryChannelTargetArn)
def get_DeliveryChannelCondition(self): # String
return self.get_query_params().get('DeliveryChannelCondition')
def set_DeliveryChannelCondition(self, DeliveryChannelCondition): # String
self.add_query_param('DeliveryChannelCondition', DeliveryChannelCondition)
def get_ConfigurationItemChangeNotification(self): # Boolean
return self.get_query_params().get('ConfigurationItemChangeNotification')
def set_ConfigurationItemChangeNotification(self, ConfigurationItemChangeNotification): # Boolean
self.add_query_param('ConfigurationItemChangeNotification', ConfigurationItemChangeNotification)
def get_DeliveryChannelName(self): # String
return self.get_query_params().get('DeliveryChannelName')
def set_DeliveryChannelName(self, DeliveryChannelName): # String
self.add_query_param('DeliveryChannelName', DeliveryChannelName)
def get_DeliverySnapshotTime(self): # String
return self.get_query_params().get('DeliverySnapshotTime')
def set_DeliverySnapshotTime(self, DeliverySnapshotTime): # String
self.add_query_param('DeliverySnapshotTime', DeliverySnapshotTime)
def get_DeliveryChannelId(self): # String
return self.get_query_params().get('DeliveryChannelId')
def set_DeliveryChannelId(self, DeliveryChannelId): # String
self.add_query_param('DeliveryChannelId', DeliveryChannelId)
def METHOD_NAME(self): # String
return self.get_query_params().get('OversizedDataOSSTargetArn')
def set_OversizedDataOSSTargetArn(self, OversizedDataOSSTargetArn): # String
self.add_query_param('OversizedDataOSSTargetArn', OversizedDataOSSTargetArn)
def get_Status(self): # Long
return self.get_query_params().get('Status')
def set_Status(self, Status): # Long
self.add_query_param('Status', Status) | null |
543 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class ModifyBackupPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifyBackupPolicy')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_LocalLogRetentionHours(self): # String
return self.get_query_params().get('LocalLogRetentionHours')
def set_LocalLogRetentionHours(self, LocalLogRetentionHours): # String
self.add_query_param('LocalLogRetentionHours', LocalLogRetentionHours)
def get_BackupPriority(self): # Integer
return self.get_query_params().get('BackupPriority')
def set_BackupPriority(self, BackupPriority): # Integer
self.add_query_param('BackupPriority', BackupPriority)
def get_LogBackupFrequency(self): # String
return self.get_query_params().get('LogBackupFrequency')
def set_LogBackupFrequency(self, LogBackupFrequency): # String
self.add_query_param('LogBackupFrequency', LogBackupFrequency)
def get_ArchiveBackupKeepCount(self): # Integer
return self.get_query_params().get('ArchiveBackupKeepCount')
def set_ArchiveBackupKeepCount(self, ArchiveBackupKeepCount): # Integer
self.add_query_param('ArchiveBackupKeepCount', ArchiveBackupKeepCount)
def get_BackupLog(self): # String
return self.get_query_params().get('BackupLog')
def set_BackupLog(self, BackupLog): # String
self.add_query_param('BackupLog', BackupLog)
def get_BackupInterval(self): # String
return self.get_query_params().get('BackupInterval')
def set_BackupInterval(self, BackupInterval): # String
self.add_query_param('BackupInterval', BackupInterval)
def get_HighSpaceUsageProtection(self): # String
return self.get_query_params().get('HighSpaceUsageProtection')
def set_HighSpaceUsageProtection(self, HighSpaceUsageProtection): # String
self.add_query_param('HighSpaceUsageProtection', HighSpaceUsageProtection)
def get_LogBackupLocalRetentionNumber(self): # Integer
return self.get_query_params().get('LogBackupLocalRetentionNumber')
def set_LogBackupLocalRetentionNumber(self, LogBackupLocalRetentionNumber): # Integer
self.add_query_param('LogBackupLocalRetentionNumber', LogBackupLocalRetentionNumber)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_EnableBackupLog(self): # String
return self.get_query_params().get('EnableBackupLog')
def set_EnableBackupLog(self, EnableBackupLog): # String
self.add_query_param('EnableBackupLog', EnableBackupLog)
def get_BackupPolicyMode(self): # String
return self.get_query_params().get('BackupPolicyMode')
def set_BackupPolicyMode(self, BackupPolicyMode): # String
self.add_query_param('BackupPolicyMode', BackupPolicyMode)
def get_PreferredBackupPeriod(self): # String
return self.get_query_params().get('PreferredBackupPeriod')
def set_PreferredBackupPeriod(self, PreferredBackupPeriod): # String
self.add_query_param('PreferredBackupPeriod', PreferredBackupPeriod)
def get_EnableIncrementDataBackup(self): # Boolean
return self.get_query_params().get('EnableIncrementDataBackup')
def set_EnableIncrementDataBackup(self, EnableIncrementDataBackup): # Boolean
self.add_query_param('EnableIncrementDataBackup', EnableIncrementDataBackup)
def get_ReleasedKeepPolicy(self): # String
return self.get_query_params().get('ReleasedKeepPolicy')
def set_ReleasedKeepPolicy(self, ReleasedKeepPolicy): # String
self.add_query_param('ReleasedKeepPolicy', ReleasedKeepPolicy)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_CompressType(self): # String
return self.get_query_params().get('CompressType')
def set_CompressType(self, CompressType): # String
self.add_query_param('CompressType', CompressType)
def get_LocalLogRetentionSpace(self): # String
return self.get_query_params().get('LocalLogRetentionSpace')
def set_LocalLogRetentionSpace(self, LocalLogRetentionSpace): # String
self.add_query_param('LocalLogRetentionSpace', LocalLogRetentionSpace)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ArchiveBackupKeepPolicy(self): # String
return self.get_query_params().get('ArchiveBackupKeepPolicy')
def set_ArchiveBackupKeepPolicy(self, ArchiveBackupKeepPolicy): # String
self.add_query_param('ArchiveBackupKeepPolicy', ArchiveBackupKeepPolicy)
def get_PreferredBackupTime(self): # String
return self.get_query_params().get('PreferredBackupTime')
def METHOD_NAME(self, PreferredBackupTime): # String
self.add_query_param('PreferredBackupTime', PreferredBackupTime)
def get_BackupRetentionPeriod(self): # String
return self.get_query_params().get('BackupRetentionPeriod')
def set_BackupRetentionPeriod(self, BackupRetentionPeriod): # String
self.add_query_param('BackupRetentionPeriod', BackupRetentionPeriod)
def get_BackupMethod(self): # String
return self.get_query_params().get('BackupMethod')
def set_BackupMethod(self, BackupMethod): # String
self.add_query_param('BackupMethod', BackupMethod)
def get_ArchiveBackupRetentionPeriod(self): # String
return self.get_query_params().get('ArchiveBackupRetentionPeriod')
def set_ArchiveBackupRetentionPeriod(self, ArchiveBackupRetentionPeriod): # String
self.add_query_param('ArchiveBackupRetentionPeriod', ArchiveBackupRetentionPeriod)
def get_Category(self): # String
return self.get_query_params().get('Category')
def set_Category(self, Category): # String
self.add_query_param('Category', Category)
def get_LogBackupRetentionPeriod(self): # String
return self.get_query_params().get('LogBackupRetentionPeriod')
def set_LogBackupRetentionPeriod(self, LogBackupRetentionPeriod): # String
self.add_query_param('LogBackupRetentionPeriod', LogBackupRetentionPeriod) | null |
544 | #
# formatter.py
#
# Convert parsed content blocks to a structured document (library file).
#
# Copyright 2002-2018 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This is the base Formatter class. Its purpose is to convert a content
# processor's data into specific documents (i.e., table of contents, global
# index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example, the
# file `tohtml.py' contains the definition of the `HtmlFormatter' sub-class
# to output HTML.
#
from sources import *
from content import *
from utils import *
################################################################
##
## FORMATTER CLASS
##
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( key = index_key )
# also add section names to dictionary (without making them appear
# in the index)
for section in self.sections:
self.add_identifier( section.name, section )
def add_identifier( self, name, block ):
if name in self.identifiers:
# duplicate name!
sys.stderr.write( "WARNING: duplicate definition for"
+ " '" + name + "' "
+ "in " + block.location() + ", "
+ "previous definition in "
+ self.identifiers[name].location()
+ "\n" )
else:
self.identifiers[name] = block
#
# formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def METHOD_NAME( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
skip_entry = 0
try:
block = self.identifiers[name]
# `block_names' can contain field names also,
# which we filter out
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
if field.name == name:
skip_entry = 1
except:
skip_entry = 1 # this happens e.g. for `/empty/' entries
if skip_entry:
continue
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.METHOD_NAME( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof | null |
545 | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Attribute data transfer testing module
Covers all data read/write and type-conversion operations for attributes.
"""
import numpy as np
from .common import TestCase, ut
import h5py
from h5py import h5a, h5s, h5t
from h5py import File
from h5py._hl.base import is_empty_dataspace
class BaseAttrs(TestCase):
def setUp(self):
self.f = File(self.mktemp(), 'w')
def tearDown(self):
if self.f:
self.f.close()
class TestScalar(BaseAttrs):
"""
Feature: Scalar types map correctly to array scalars
"""
def test_int(self):
""" Integers are read as correct NumPy type """
self.f.attrs['x'] = np.array(1, dtype=np.int8)
out = self.f.attrs['x']
self.assertIsInstance(out, np.int8)
def test_compound(self):
""" Compound scalars are read as numpy.void """
dt = np.dtype([('a', 'i'), ('b', 'f')])
data = np.array((1, 4.2), dtype=dt)
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertIsInstance(out, np.void)
self.assertEqual(out, data)
self.assertEqual(out['b'], data['b'])
class TestArray(BaseAttrs):
"""
Feature: Non-scalar types are correctly retrieved as ndarrays
"""
def test_single(self):
""" Single-element arrays are correctly recovered """
data = np.ndarray((1,), dtype='f')
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertIsInstance(out, np.ndarray)
self.assertEqual(out.shape, (1,))
def test_multi(self):
""" Rank-1 arrays are correctly recovered """
data = np.ndarray((42,), dtype='f')
data[:] = 42.0
data[10:35] = -47.0
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertIsInstance(out, np.ndarray)
self.assertEqual(out.shape, (42,))
self.assertArrayEqual(out, data)
class TestTypes(BaseAttrs):
"""
Feature: All supported types can be stored in attributes
"""
def test_int(self):
""" Storage of integer types """
dtypes = (np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64)
for dt in dtypes:
data = np.ndarray((1,), dtype=dt)
data[...] = 42
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, dt)
self.assertArrayEqual(out, data)
def test_float(self):
""" Storage of floating point types """
dtypes = tuple(np.dtype(x) for x in ('<f4', '>f4', '>f8', '<f8'))
for dt in dtypes:
data = np.ndarray((1,), dtype=dt)
data[...] = 42.3
self.f.attrs['x'] = data
out = self.f.attrs['x']
# TODO: Clean up after issue addressed !
print("dtype: ", out.dtype, dt)
print("value: ", out, data)
self.assertEqual(out.dtype, dt)
self.assertArrayEqual(out, data)
def METHOD_NAME(self):
""" Storage of complex types """
dtypes = tuple(np.dtype(x) for x in ('<c8', '>c8', '<c16', '>c16'))
for dt in dtypes:
data = np.ndarray((1,), dtype=dt)
data[...] = -4.2j + 35.9
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, dt)
self.assertArrayEqual(out, data)
def test_string(self):
""" Storage of fixed-length strings """
dtypes = tuple(np.dtype(x) for x in ('|S1', '|S10'))
for dt in dtypes:
data = np.ndarray((1,), dtype=dt)
data[...] = 'h'
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, dt)
self.assertEqual(out[0], data[0])
def test_bool(self):
""" Storage of NumPy booleans """
data = np.ndarray((2,), dtype=np.bool_)
data[...] = True, False
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, data.dtype)
self.assertEqual(out[0], data[0])
self.assertEqual(out[1], data[1])
def test_vlen_string_array(self):
""" Storage of vlen byte string arrays"""
dt = h5py.string_dtype(encoding='ascii')
data = np.ndarray((2,), dtype=dt)
data[...] = "Hello", "Hi there! This is HDF5!"
self.f.attrs['x'] = data
out = self.f.attrs['x']
self.assertEqual(out.dtype, dt)
self.assertEqual(out[0], data[0])
self.assertEqual(out[1], data[1])
def test_string_scalar(self):
""" Storage of variable-length byte string scalars (auto-creation) """
self.f.attrs['x'] = b'Hello'
out = self.f.attrs['x']
self.assertEqual(out, 'Hello')
self.assertEqual(type(out), str)
aid = h5py.h5a.open(self.f.id, b"x")
tid = aid.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertEqual(tid.get_cset(), h5py.h5t.CSET_ASCII)
self.assertTrue(tid.is_variable_str())
def test_unicode_scalar(self):
""" Storage of variable-length unicode strings (auto-creation) """
self.f.attrs['x'] = u"Hello" + chr(0x2340) + u"!!"
out = self.f.attrs['x']
self.assertEqual(out, u"Hello" + chr(0x2340) + u"!!")
self.assertEqual(type(out), str)
aid = h5py.h5a.open(self.f.id, b"x")
tid = aid.get_type()
self.assertEqual(type(tid), h5py.h5t.TypeStringID)
self.assertEqual(tid.get_cset(), h5py.h5t.CSET_UTF8)
self.assertTrue(tid.is_variable_str())
class TestEmpty(BaseAttrs):
def setUp(self):
BaseAttrs.setUp(self)
sid = h5s.create(h5s.NULL)
tid = h5t.C_S1.copy()
tid.set_size(10)
aid = h5a.create(self.f.id, b'x', tid, sid)
self.empty_obj = h5py.Empty(np.dtype("S10"))
def test_read(self):
self.assertEqual(
self.empty_obj, self.f.attrs['x']
)
def test_write(self):
self.f.attrs["y"] = self.empty_obj
self.assertTrue(is_empty_dataspace(h5a.open(self.f.id, b'y')))
def test_modify(self):
with self.assertRaises(IOError):
self.f.attrs.modify('x', 1)
def test_values(self):
# list() is for Py3 where these are iterators
values = list(self.f.attrs.values())
self.assertEqual(
[self.empty_obj], values
)
def test_items(self):
items = list(self.f.attrs.items())
self.assertEqual(
[(u"x", self.empty_obj)], items
)
def test_itervalues(self):
values = list(self.f.attrs.values())
self.assertEqual(
[self.empty_obj], values
)
def test_iteritems(self):
items = list(self.f.attrs.items())
self.assertEqual(
[(u"x", self.empty_obj)], items
)
class TestWriteException(BaseAttrs):
"""
Ensure failed attribute writes don't leave garbage behind.
"""
def test_write(self):
""" ValueError on string write wipes out attribute """
s = b"Hello\x00Hello"
try:
self.f.attrs['x'] = s
except ValueError:
pass
with self.assertRaises(KeyError):
self.f.attrs['x'] | null |
546 | import os
from functools import (
lru_cache,
wraps,
)
from multiprocessing import get_context
from threading import local
from typing import (
Any,
Callable,
Dict,
)
import pebble
from celery import (
Celery,
shared_task,
Task,
)
from celery.signals import (
worker_init,
worker_shutting_down,
)
from kombu import serialization
from galaxy.celery.base_task import GalaxyTaskBeforeStart
from galaxy.config import Configuration
from galaxy.main_config import find_config
from galaxy.util import ExecutionTimer
from galaxy.util.custom_logging import get_logger
from galaxy.util.properties import load_app_properties
from ._serialization import (
schema_dumps,
schema_loads,
)
log = get_logger(__name__)
MAIN_TASK_MODULE = "galaxy.celery.tasks"
DEFAULT_TASK_QUEUE = "galaxy.internal"
TASKS_MODULES = [MAIN_TASK_MODULE]
PYDANTIC_AWARE_SERIALIZER_NAME = "pydantic-aware-json"
APP_LOCAL = local()
serialization.register(
PYDANTIC_AWARE_SERIALIZER_NAME, encoder=schema_dumps, decoder=schema_loads, content_type="application/json"
)
class GalaxyCelery(Celery):
fork_pool: pebble.ProcessPool
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def gen_task_name(self, name, module):
module = self.trim_module_name(module)
return super().gen_task_name(name, module)
def trim_module_name(self, module):
"""
Drop "celery.tasks" infix for less verbose task names:
- galaxy.celery.tasks.do_foo >> galaxy.do_foo
- galaxy.celery.tasks.subtasks.do_fuz >> galaxy.subtasks.do_fuz
"""
if module.startswith("galaxy.celery.tasks"):
module = f"galaxy{module[19:]}"
return module
class GalaxyTask(Task):
"""
Custom celery task used to limit number of tasks executions per user
per second.
"""
def METHOD_NAME(self, task_id, args, kwargs):
"""
Set appropriate before start object from DI container.
"""
app = get_galaxy_app()
assert app
app[GalaxyTaskBeforeStart](self, task_id, args, kwargs)
def set_thread_app(app):
APP_LOCAL.app = app
def get_galaxy_app():
try:
return APP_LOCAL.app
except AttributeError:
import galaxy.app
if galaxy.app.app:
return galaxy.app.app
return build_app()
@lru_cache(maxsize=1)
def build_app():
kwargs = get_app_properties()
if kwargs:
kwargs["check_migrate_databases"] = False
kwargs["use_display_applications"] = False
kwargs["use_converters"] = False
import galaxy.app
galaxy_app = galaxy.app.GalaxyManagerApplication(configure_logging=False, **kwargs)
return galaxy_app
@lru_cache(maxsize=1)
def get_app_properties():
config_file = os.environ.get("GALAXY_CONFIG_FILE")
galaxy_root_dir = os.environ.get("GALAXY_ROOT_DIR")
if not config_file and galaxy_root_dir:
config_file = find_config(config_file, galaxy_root_dir)
if config_file:
properties = load_app_properties(
config_file=os.path.abspath(config_file),
config_section="galaxy",
)
if galaxy_root_dir:
properties["root_dir"] = galaxy_root_dir
return properties
@lru_cache(maxsize=1)
def get_config():
kwargs = get_app_properties() or {}
kwargs["override_tempdir"] = False
return Configuration(**kwargs)
def init_fork_pool():
# Do slow imports when workers boot.
from galaxy.datatypes import registry # noqa: F401
from galaxy.metadata import set_metadata # noqa: F401
@worker_init.connect
def setup_worker_pool(sender=None, conf=None, instance=None, **kwargs):
context = get_context("forkserver")
celery_app.fork_pool = pebble.ProcessPool(
max_workers=sender.concurrency, max_tasks=100, initializer=init_fork_pool, context=context
)
@worker_shutting_down.connect
def tear_down_pool(sig, how, exitcode, **kwargs):
log.debug("shutting down forkserver pool")
celery_app.fork_pool.stop()
celery_app.fork_pool.join(timeout=5)
def galaxy_task(*args, action=None, **celery_task_kwd):
if "serializer" not in celery_task_kwd:
celery_task_kwd["serializer"] = PYDANTIC_AWARE_SERIALIZER_NAME
def decorate(func: Callable):
@shared_task(base=GalaxyTask, **celery_task_kwd)
@wraps(func)
def wrapper(*args, **kwds):
app = get_galaxy_app()
assert app
desc = func.__name__
if action is not None:
desc += f" to {action}"
try:
timer = app.execution_timer_factory.get_timer("internals.tasks.{func.__name__}", desc)
except AttributeError:
timer = ExecutionTimer()
try:
rval = app.magic_partial(func)(*args, **kwds)
message = f"Successfully executed Celery task {desc} {timer}"
log.info(message)
return rval
except Exception:
log.warning(f"Celery task execution failed for {desc} {timer}")
raise
return wrapper
if len(args) == 1 and callable(args[0]):
return decorate(args[0])
else:
return decorate
def init_celery_app():
celery_app_kwd: Dict[str, Any] = {
"include": TASKS_MODULES,
"task_default_queue": DEFAULT_TASK_QUEUE,
"task_create_missing_queues": True,
"timezone": "UTC",
}
celery_app = GalaxyCelery("galaxy", **celery_app_kwd)
celery_app.set_default()
config = get_config()
config_celery_app(config, celery_app)
setup_periodic_tasks(config, celery_app)
return celery_app
def config_celery_app(config, celery_app):
# Apply settings from galaxy's config
if config.celery_conf:
celery_app.conf.update(config.celery_conf)
# Handle special cases
if not celery_app.conf.broker_url:
celery_app.conf.broker_url = config.amqp_internal_connection
def setup_periodic_tasks(config, celery_app):
def schedule_task(task, interval):
if interval > 0:
task_key = task.replace("_", "-")
module_name = celery_app.trim_module_name(MAIN_TASK_MODULE)
task_name = f"{module_name}.{task}"
beat_schedule[task_key] = {
"task": task_name,
"schedule": interval,
}
beat_schedule: Dict[str, Dict[str, Any]] = {}
schedule_task("prune_history_audit_table", config.history_audit_table_prune_interval)
schedule_task("cleanup_short_term_storage", config.short_term_storage_cleanup_interval)
schedule_task("cleanup_expired_notifications", config.expired_notifications_cleanup_interval)
if config.object_store_cache_monitor_driver in ["auto", "celery"]:
schedule_task("clean_object_store_caches", config.object_store_cache_monitor_interval)
if beat_schedule:
celery_app.conf.beat_schedule = beat_schedule
celery_app = init_celery_app() | null |
547 | import os.path
from pcs import settings
from pcs.common import reports
from pcs.common.str_tools import join_multilines
from pcs.common.types import StringCollection
from pcs.lib.env import LibraryEnvironment
from pcs.lib.errors import LibraryError
def _unfence_node_devices(
env: LibraryEnvironment,
plug: str,
original_devices: StringCollection,
updated_devices: StringCollection,
fence_agent: str,
):
"""
Unfence shared devices by calling fence agent script. Only newly added
devices will be unfenced (set(updated_devices) - set(original_devices)).
Before unfencing, original devices are checked if any of them are not
fenced. If there is a fenced device, unfencing will be skipped.
env -- provides communication with externals
plug -- an information used for unfencing (a node name for fence_scsi,
registration key for fence_mpath)
original_devices -- list of devices defined before update
updated_devices -- list of devices defined after update
fence_agent -- fance agent name
"""
devices_to_unfence = set(updated_devices) - set(original_devices)
if not devices_to_unfence:
return
fence_agent_bin = os.path.join(settings.fence_agent_execs, fence_agent)
fenced_devices = []
# do not check devices being removed
for device in sorted(set(original_devices) & set(updated_devices)):
stdout, stderr, return_code = env.cmd_runner().run(
[
fence_agent_bin,
"--action=status",
f"--devices={device}",
f"--plug={plug}",
]
)
if return_code == 2:
fenced_devices.append(device)
elif return_code != 0:
raise LibraryError(
reports.ReportItem.error(
reports.messages.StonithUnfencingDeviceStatusFailed(
device, join_multilines([stderr, stdout])
)
)
)
if fenced_devices:
# At least one of existing devices is off, which means the node has
# been fenced and new devices should not be unfenced.
env.report_processor.report(
reports.ReportItem.info(
reports.messages.StonithUnfencingSkippedDevicesFenced(
fenced_devices
)
)
)
return
stdout, stderr, return_code = env.cmd_runner().run(
[
fence_agent_bin,
"--action=on",
"--devices",
",".join(sorted(devices_to_unfence)),
f"--plug={plug}",
],
)
if return_code != 0:
raise LibraryError(
reports.ReportItem.error(
reports.messages.StonithUnfencingFailed(
join_multilines([stderr, stdout])
)
)
)
def unfence_node(
env: LibraryEnvironment,
node: str,
original_devices: StringCollection,
updated_devices: StringCollection,
) -> None:
"""
Unfence scsi devices on a node by calling fence_scsi agent script. Only
newly added devices will be unfenced (set(updated_devices) -
set(original_devices)). Before unfencing, original devices are checked
if any of them are not fenced. If there is a fenced device, unfencing will
be skipped.
env -- provides communication with externals
node -- node name on which unfencing is performed
original_devices -- list of devices defined before update
updated_devices -- list of devices defined after update
"""
_unfence_node_devices(
env, node, original_devices, updated_devices, "fence_scsi"
)
def METHOD_NAME(
env: LibraryEnvironment,
key: str,
original_devices: StringCollection,
updated_devices: StringCollection,
) -> None:
"""
Unfence mpath devices on a node by calling fence_mpath agent script. Only
newly added devices will be unfenced (set(updated_devices) -
set(original_devices)). Before unfencing, original devices are checked
if any of them are not fenced. If there is a fenced device, unfencing will
be skipped.
env -- provides communication with externals
key -- registration key of the node for unfencing
original_devices -- list of devices defined before update
updated_devices -- list of devices defined after update
"""
_unfence_node_devices(
env, key, original_devices, updated_devices, "fence_mpath"
) | null |
548 | # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from tempfile import TemporaryDirectory
from typing import Optional, Type
from tests.unit.test_general import _test_materializer
from zenml.materializers.base_materializer import BaseMaterializer
from zenml.materializers.built_in_materializer import (
BuiltInContainerMaterializer,
)
def test_basic_type_materialization():
"""Test materialization for `bool`, `float`, `int`, `str` objects."""
for type_, example in [
(bool, True),
(float, 0.0),
(int, 0),
(str, ""),
]:
result = _test_materializer(
step_output_type=type_,
step_output=example,
expected_metadata_size=1 if type_ == str else 2,
)
assert result == example
def test_bytes_materialization():
"""Test materialization for `bytes` objects.
This is a separate test since `bytes` is not JSON serializable.
"""
example = b""
result = _test_materializer(
step_output_type=bytes, step_output=example, expected_metadata_size=1
)
assert result == example
def test_empty_dict_list_tuple_materialization():
"""Test materialization for empty `dict`, `list`, `tuple` objects."""
for type_, example in [
(dict, {}),
(list, []),
(tuple, ()),
]:
result = _test_materializer(
step_output_type=type_,
step_output=example,
expected_metadata_size=2,
)
assert result == example
def test_simple_dict_list_tuple_materialization(tmp_path):
"""Test materialization for `dict`, `list`, `tuple` with data."""
def _validate_single_file(artifact_uri: str) -> None:
files = os.listdir(artifact_uri)
assert len(files) == 1
for type_, example in [
(dict, {"a": 0, "b": 1, "c": 2}),
(list, [0, 1, 2]),
(tuple, (0, 1, 2)),
]:
result = _test_materializer(
step_output_type=type_,
step_output=example,
validation_function=_validate_single_file,
expected_metadata_size=2,
)
assert result == example
def test_list_of_bytes_materialization():
"""Test materialization for lists of bytes."""
example = [b"0", b"1", b"2"]
result = _test_materializer(
step_output_type=list, step_output=example, expected_metadata_size=2
)
assert result == example
def test_dict_of_bytes_materialization():
"""Test materialization for dicts of bytes."""
example = {"a": b"0", "b": b"1", "c": b"2"}
result = _test_materializer(
step_output_type=dict, step_output=example, expected_metadata_size=2
)
assert result == example
def test_tuple_of_bytes_materialization():
"""Test materialization for tuples of bytes."""
example = (b"0", b"1", b"2")
result = _test_materializer(
step_output_type=tuple, step_output=example, expected_metadata_size=2
)
assert result == example
def test_set_materialization():
"""Test materialization for `set` objects."""
for example in [set(), {1, 2, 3}, {b"0", b"1", b"2"}]:
result = _test_materializer(
step_output_type=set, step_output=example, expected_metadata_size=2
)
assert result == example
def test_mixture_of_all_builtin_types():
"""Test a mixture of built-in types as the ultimate stress test."""
example = [
{
"a": (42, 1.0, "aa", True), # tuple of serializable basic types
"b": {
"ba": ["baa", "bab"],
"bb": [3.7, 1.8],
}, # dict of lists of serializable basic types
"c": b"ca", # bytes (non-serializable)
}, # non-serializable dict
{1.0, 2.0, 4, 4}, # set of serializable types
] # non-serializable list
result = _test_materializer(
step_output_type=list, step_output=example, expected_metadata_size=2
)
assert result == example
def test_none_values():
"""Tests serialization of `None` values in container types."""
for type_, example in [
(list, [1, "a", None]),
(tuple, (1, "a", None)),
(dict, {"key": None}),
]:
result = _test_materializer(
step_output_type=type_,
step_output=example,
expected_metadata_size=2,
)
assert result == example
class CustomType:
"""Custom type used for testing the container materializer below."""
myname = "aria"
def __eq__(self, __value: object) -> bool:
if isinstance(__value, CustomType):
return self.myname == __value.myname
return False
class CustomSubType(CustomType):
"""Subtype of CustomType."""
myname = "axl"
class CustomTypeMaterializer(BaseMaterializer):
"""Mock materializer for custom types.
Does not actually save anything to disk, just initializes the type.
"""
ASSOCIATED_TYPES = (CustomType,)
def save(self, data: CustomType) -> None:
"""Save the data (not)."""
pass
def METHOD_NAME(self, data_type: Type[CustomType]) -> Optional[CustomType]:
"""Load the data."""
return data_type()
def test_container_materializer_for_custom_types(mocker):
"""Test container materializer for custom types.
This ensures that:
- The container materializer can handle custom types.
- Custom types are loaded as the correct type.
- The materializer of the subtype does not need to be registered in the
materializer registry when the container is loaded.
"""
from zenml.materializers.materializer_registry import materializer_registry
example = [CustomType(), CustomSubType()]
with TemporaryDirectory() as artifact_uri:
materializer = BuiltInContainerMaterializer(uri=artifact_uri)
# Container materializer should find materializer for both elements in
# the default materializer registry.
materializer.save(example)
# When loading, the default materializer registry should no longer be
# needed because the container materializer should have saved the
# materializer that was used for each element.
mocker.patch.object(
materializer_registry,
"materializer_types",
{},
)
result = materializer.METHOD_NAME(list)
# Check that the loaded elements are of the correct types.
assert isinstance(result[0], CustomType)
assert isinstance(result[1], CustomSubType)
assert result[0].myname == "aria"
assert result[1].myname == "axl"
assert result == example | null |
549 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ListMediaNameRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ListMediaName')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UserId(self): # String
return self.get_query_params().get('UserId')
def set_UserId(self, UserId): # String
self.add_query_param('UserId', UserId)
def get_OriginSiteUserId(self): # String
return self.get_query_params().get('OriginSiteUserId')
def set_OriginSiteUserId(self, OriginSiteUserId): # String
self.add_query_param('OriginSiteUserId', OriginSiteUserId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_MediaName(self): # String
return self.get_query_params().get('MediaName')
def set_MediaName(self, MediaName): # String
self.add_query_param('MediaName', MediaName)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def get_TenantId(self): # String
return self.get_query_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_query_param('TenantId', TenantId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def METHOD_NAME(self): # String
return self.get_query_params().get('AccessStatus')
def set_AccessStatus(self, AccessStatus): # String
self.add_query_param('AccessStatus', AccessStatus)
def get_FirstScene(self): # String
return self.get_query_params().get('FirstScene')
def set_FirstScene(self, FirstScene): # String
self.add_query_param('FirstScene', FirstScene)
def get_EndCreateTime(self): # Long
return self.get_query_params().get('EndCreateTime')
def set_EndCreateTime(self, EndCreateTime): # Long
self.add_query_param('EndCreateTime', EndCreateTime)
def get_Business(self): # String
return self.get_query_params().get('Business')
def set_Business(self, Business): # String
self.add_query_param('Business', Business)
def get_Os(self): # String
return self.get_query_params().get('Os')
def set_Os(self, Os): # String
self.add_query_param('Os', Os)
def get_MediaStatus(self): # String
return self.get_query_params().get('MediaStatus')
def set_MediaStatus(self, MediaStatus): # String
self.add_query_param('MediaStatus', MediaStatus)
def get_Environment(self): # String
return self.get_query_params().get('Environment')
def set_Environment(self, Environment): # String
self.add_query_param('Environment', Environment)
def get_StartCreateTime(self): # Long
return self.get_query_params().get('StartCreateTime')
def set_StartCreateTime(self, StartCreateTime): # Long
self.add_query_param('StartCreateTime', StartCreateTime)
def get_UserSite(self): # String
return self.get_query_params().get('UserSite')
def set_UserSite(self, UserSite): # String
self.add_query_param('UserSite', UserSite)
def get_SecondScene(self): # String
return self.get_query_params().get('SecondScene')
def set_SecondScene(self, SecondScene): # String
self.add_query_param('SecondScene', SecondScene)
def get_MediaType(self): # String
return self.get_query_params().get('MediaType')
def set_MediaType(self, MediaType): # String
self.add_query_param('MediaType', MediaType) | null |
550 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalikafka.endpoint import endpoint_data
class StartInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'alikafka', '2019-09-16', 'StartInstance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SelectedZones(self): # String
return self.get_query_params().get('SelectedZones')
def set_SelectedZones(self, SelectedZones): # String
self.add_query_param('SelectedZones', SelectedZones)
def get_IsEipInner(self): # Boolean
return self.get_query_params().get('IsEipInner')
def set_IsEipInner(self, IsEipInner): # Boolean
self.add_query_param('IsEipInner', IsEipInner)
def get_SecurityGroup(self): # String
return self.get_query_params().get('SecurityGroup')
def set_SecurityGroup(self, SecurityGroup): # String
self.add_query_param('SecurityGroup', SecurityGroup)
def get_DeployModule(self): # String
return self.get_query_params().get('DeployModule')
def set_DeployModule(self, DeployModule): # String
self.add_query_param('DeployModule', DeployModule)
def get_IsSetUserAndPassword(self): # Boolean
return self.get_query_params().get('IsSetUserAndPassword')
def set_IsSetUserAndPassword(self, IsSetUserAndPassword): # Boolean
self.add_query_param('IsSetUserAndPassword', IsSetUserAndPassword)
def get_Password(self): # String
return self.get_query_params().get('Password')
def set_Password(self, Password): # String
self.add_query_param('Password', Password)
def get_Notifier(self): # String
return self.get_query_params().get('Notifier')
def set_Notifier(self, Notifier): # String
self.add_query_param('Notifier', Notifier)
def get_IsForceSelectedZones(self): # Boolean
return self.get_query_params().get('IsForceSelectedZones')
def set_IsForceSelectedZones(self, IsForceSelectedZones): # Boolean
self.add_query_param('IsForceSelectedZones', IsForceSelectedZones)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_UserPhoneNum(self): # String
return self.get_query_params().get('UserPhoneNum')
def set_UserPhoneNum(self, UserPhoneNum): # String
self.add_query_param('UserPhoneNum', UserPhoneNum)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def METHOD_NAME(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_ServiceVersion(self): # String
return self.get_query_params().get('ServiceVersion')
def set_ServiceVersion(self, ServiceVersion): # String
self.add_query_param('ServiceVersion', ServiceVersion)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_KMSKeyId(self): # String
return self.get_query_params().get('KMSKeyId')
def set_KMSKeyId(self, KMSKeyId): # String
self.add_query_param('KMSKeyId', KMSKeyId)
def get_Config(self): # String
return self.get_query_params().get('Config')
def set_Config(self, Config): # String
self.add_query_param('Config', Config)
def get_Username(self): # String
return self.get_query_params().get('Username')
def set_Username(self, Username): # String
self.add_query_param('Username', Username) | null |
551 | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from abc import abstractmethod
from collections import Counter, OrderedDict
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Type
from .e2e_test_system import DataCollector
from .logging import get_logger
from .training_tests_actions import BaseOTXTestAction
from .training_tests_stage import OTXTestStage, OTXTestStagesStorageInterface, Validator
logger = get_logger()
def _get_duplications(arr):
c = Counter(arr)
dups = [k for k, v in c.items() if v > 1]
return dups
def _str_dict_with_shortened_vals(d, max_len=200):
assert isinstance(d, dict)
if not d:
return "{}"
def _shorten(v):
sv = str(v)
if len(sv) <= max_len:
return sv
return sv[:max_len] + "..."
s = "\n".join(f"{k}: {_shorten(v)}," for k, v in d.items())
s = "\n ".join(s.split("\n"))
s = "{\n " + s + "\n}"
return s
class OTXTestCaseInterface(OTXTestStagesStorageInterface):
@classmethod
@abstractmethod
def get_list_of_test_stages(cls):
raise NotImplementedError("The method get_list_of_test_stages is not implemented")
@abstractmethod
def run_stage(
self,
stage_name: str,
data_collector: DataCollector,
cur_test_expected_metrics_callback: Optional[Callable[[], Dict]],
):
raise NotImplementedError("The method run_stage is not implemented")
def generate_otx_integration_test_case_class(
test_actions_classes: List[Type[BaseOTXTestAction]],
) -> Type:
test_actions_classes = deepcopy(test_actions_classes)
# check names' duplication
classes_names = [action_cls._name for action_cls in test_actions_classes]
name_dups = _get_duplications(classes_names)
if name_dups:
raise ValueError(f"Wrong input: there are duplications in names of actions; duplications = {name_dups}")
class _OTXIntegrationTestCase(OTXTestCaseInterface):
_TEST_STAGES = [action_cls._name for action_cls in test_actions_classes]
@classmethod
def get_list_of_test_stages(cls):
return deepcopy(cls._TEST_STAGES)
def __init__(self, params_factories_for_test_actions: Dict[str, Callable[[], Dict]]):
logger.debug("initialization of test case: begin")
self._stages = OrderedDict()
for action_cls in test_actions_classes:
logger.debug(f"initialization of test case: action_cls={action_cls}")
cur_name = action_cls._name
assert cur_name is not None
cur_params_factory = params_factories_for_test_actions.get(cur_name)
if cur_params_factory is not None:
logger.debug("initialization of test case: calling params factory")
cur_params = cur_params_factory()
else:
cur_params = {}
assert isinstance(cur_params, dict), f"Wrong params received from factory: {cur_params}"
short_params_str = _str_dict_with_shortened_vals(cur_params)
logger.info(f"initialization of test case: add action '{cur_name}' " f"with params={short_params_str}")
cur_action = action_cls(**cur_params)
# Note that `self` is used as stages_storage for OTXTestStage below
cur_stage = OTXTestStage(action=cur_action, stages_storage=self)
self._stages[cur_name] = cur_stage
assert list(self._stages.keys()) == list(self._TEST_STAGES)
# test results should be kept between stages
self.test_results_storage: OrderedDict = OrderedDict()
logger.debug("initialization of test case: end")
# implementation of method from OTXTestStagesStorageInterface
def METHOD_NAME(self, name: str) -> "OTXTestStage":
return self._stages[name]
def run_stage(
self,
stage_name: str,
data_collector: DataCollector,
cur_test_expected_metrics_callback: Optional[Callable[[], Dict]],
):
assert stage_name in self._TEST_STAGES, f"Wrong stage_name {stage_name}"
validator = Validator(cur_test_expected_metrics_callback)
self._stages[stage_name].run_once(data_collector, self.test_results_storage, validator)
return _OTXIntegrationTestCase | null |
552 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for security-check.py
'''
import os
import subprocess
import unittest
from utils import determine_wellknown_cmd
def METHOD_NAME(filename):
with open(filename, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
{
printf("the quick brown fox jumps over the lazy god\\n");
return 0;
}
''')
def clean_files(source, executable):
os.remove(source)
os.remove(executable)
def call_security_check(cc, source, executable, options):
subprocess.run([*cc,source,'-o',executable] + options, check=True)
p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
return (p.returncode, p.stdout.rstrip())
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'gcc')
METHOD_NAME(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE NX RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
(1, executable+': failed separate_code'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
(0, ''))
clean_files(source, executable)
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = determine_wellknown_cmd('CC', 'x86_64-w64-mingw32-gcc')
METHOD_NAME(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--disable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA NX RELOC_SECTION'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--disable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA RELOC_SECTION'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-no-pie','-fno-PIE']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va','-pie','-fPIE']),
(1, executable+': failed PIE DYNAMIC_BASE HIGH_ENTROPY_VA')) # -pie -fPIE does nothing unless --dynamicbase is also supplied
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--no-high-entropy-va','-pie','-fPIE']),
(1, executable+': failed HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--enable-reloc-section','-Wl,--dynamicbase','-Wl,--high-entropy-va','-pie','-fPIE']),
(0, ''))
clean_files(source, executable)
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'clang')
METHOD_NAME(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
(1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS Canary CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']),
(1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']),
(1, executable+': failed PIE NOUNDEFS LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']),
(1, executable+': failed PIE LAZY_BINDINGS CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']),
(1, executable+': failed PIE CONTROL_FLOW'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
(1, executable+': failed PIE'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
(0, ''))
clean_files(source, executable)
if __name__ == '__main__':
unittest.main() | null |
553 | # /********************************************************************************
# * Copyright (c) 2023 Contributors to the Eclipse Foundation
# *
# * See the NOTICE file(s) distributed with this work for additional
# * information regarding copyright ownership.
# *
# * This program and the accompanying materials are made available under the
# * terms of the Apache License 2.0 which is available at
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * SPDX-License-Identifier: Apache-2.0
# ********************************************************************************/
import pytest
from kuksa_client.grpc import Datapoint
#
# Client rules:
# For simple strings like abd it is optional to quote them ("abc") or not (abc)
# Quotes are needed if you have commas ("ab, c")
# If you have duoble quotes in strings you must escape them
#
# Note that KUKSA Server has different rules, there the payload must be valid JSON,
# so not all tests shown below are recommended as they cannot be used for KUKSA Server
def test_array_parse_no_quote():
"""
No need for quotes just because you have a blank
"""
test_str = r'[say hello, abc]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == "say hello"
assert my_array[1] == "abc"
def test_array_parse_no_inside_quote():
"""Quotes are OK"""
test_str = r'["say hello","abc"]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == "say hello"
assert my_array[1] == "abc"
def test_array_parse_no_inside_quote_single():
"""Quotes are OK"""
test_str = "['say hello','abc']"
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == "say hello"
assert my_array[1] == "abc"
def test_array_parse_double_quote():
test_str = r'["say \"hello\"","abc"]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == "say \"hello\""
assert my_array[1] == "abc"
def test_array_parse_single_quote():
test_str = r'[say \'hello\',abc]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == "say 'hello'"
assert my_array[1] == "abc"
def METHOD_NAME():
test_str = r'["say, hello","abc"]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == r'say, hello'
assert my_array[1] == "abc"
def test_array_square():
"""No problem having square brackets as part of strings"""
test_str = r'[say hello[], abc]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == "say hello[]"
assert my_array[1] == "abc"
def test_array_empty_string_quoted():
test_str = r'["", abc]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == ""
assert my_array[1] == "abc"
def test_array_empty_string_not_quoted():
"""In this case the first item is ignored"""
test_str = r'[, abc]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 1
assert my_array[0] == "abc"
def test_double_comma():
"""In this case the middle item is ignored"""
test_str = r'[def,, abc]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 2
assert my_array[0] == "def"
assert my_array[1] == "abc"
def test_quotes_in_string_values():
"""Escaped double quotes, so in total 4 items"""
test_str = r'["dtc1, dtc2", dtc3, \" dtc4, dtc4\"]'
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 4
assert my_array[0] == "dtc1, dtc2"
assert my_array[1] == "dtc3"
assert my_array[2] == "\" dtc4"
assert my_array[3] == "dtc4\""
def test_quotes_in_string_values_2():
"""Doubee quotes in double quotes so in total three values"""
test_str = "['dtc1, dtc2', dtc3, \" dtc4, dtc4\"]"
my_array = list(Datapoint.cast_array_values(Datapoint.cast_str, test_str))
assert len(my_array) == 3
assert my_array[0] == 'dtc1, dtc2'
assert my_array[1] == "dtc3"
assert my_array[2] == " dtc4, dtc4"
def test_int_no_quote():
test_str = r'[123,456]'
my_array = list(Datapoint.cast_array_values(int, test_str))
assert len(my_array) == 2
assert my_array[0] == 123
assert my_array[1] == 456
def test_int_quote():
"""Quoting individual int values is not allowed"""
test_str = r'["123","456"]'
with pytest.raises(ValueError):
list(Datapoint.cast_array_values(int, test_str))
def test_float_no_quote():
test_str = r'[123,456.23]'
my_array = list(Datapoint.cast_array_values(float, test_str))
assert len(my_array) == 2
assert my_array[0] == 123
assert my_array[1] == 456.23
def test_cast_str():
"""Unquoted quotation marks shall be removed, quoted kept without quotes"""
test_str = r'"say hello"'
assert Datapoint.cast_str(test_str) == r'say hello'
test_str = r'"say \"hello\""'
assert Datapoint.cast_str(test_str) == r'say "hello"'
test_str = r'say "hello"'
assert Datapoint.cast_str(test_str) == r'say "hello"'
def test_cast_bool():
assert Datapoint.cast_bool("true") is True
assert Datapoint.cast_bool("True") is True
assert Datapoint.cast_bool("T") is True
assert Datapoint.cast_bool("t") is True
assert Datapoint.cast_bool("false") is False
assert Datapoint.cast_bool("False") is False
assert Datapoint.cast_bool("F") is False
assert Datapoint.cast_bool("f") is False
# And then some other, treated as true for now
assert Datapoint.cast_bool("Ja") is True
assert Datapoint.cast_bool("Nein") is True
assert Datapoint.cast_bool("Doch") is True | null |
554 | """Collection of Action detection evaluiation utils.."""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import logging
import time
from collections import defaultdict
import numpy as np
from mmaction.core.evaluation.ava_evaluation import (
object_detection_evaluation as mm_det_eval,
)
from mmaction.core.evaluation.ava_evaluation import standard_fields
from mmaction.core.evaluation.ava_utils import print_time, read_exclusions
# pylint: disable=too-many-locals, too-many-branches
def det_eval(predictions, result_type, labels, video_infos, exclude_file, verbose=True, custom_classes=None):
"""Evaluation method for AVA Dataset."""
assert result_type in ["mAP"]
start = time.time()
categories, class_whitelist = METHOD_NAME(labels)
if custom_classes is not None:
custom_classes = custom_classes[1:]
assert set(custom_classes).issubset(set(class_whitelist))
class_whitelist = custom_classes
categories = [cat for cat in categories if cat["id"] in custom_classes]
# loading gt, do not need gt score
gt_boxes, gt_labels = _load_gt(video_infos)
if verbose:
print_time("Reading detection results", start)
if exclude_file is not None:
with open(exclude_file, encoding="utf-8") as ex_file:
excluded_keys = read_exclusions(ex_file)
else:
excluded_keys = []
start = time.time()
boxes, labels, scores = predictions
if verbose:
print_time("Reading detection results", start)
# Evaluation for mAP
pascal_evaluator = mm_det_eval.PascalDetectionEvaluator(categories)
start = time.time()
for image_key in gt_boxes:
if verbose and image_key in excluded_keys:
logging.info("Found excluded timestamp in detections: %s. It will be ignored.", image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key,
{
standard_fields.InputDataFields.groundtruth_boxes: np.array(gt_boxes[image_key], dtype=float),
standard_fields.InputDataFields.groundtruth_classes: np.array(gt_labels[image_key], dtype=int),
},
)
if verbose:
print_time("Convert groundtruth", start)
start = time.time()
for image_key in boxes:
if verbose and image_key in excluded_keys:
logging.info("Found excluded timestamp in detections: %s. It will be ignored.", image_key)
continue
pascal_evaluator.add_single_detected_image_info(
image_key,
{
standard_fields.DetectionResultFields.detection_boxes: np.array(boxes[image_key], dtype=float),
standard_fields.DetectionResultFields.detection_classes: np.array(labels[image_key], dtype=int),
standard_fields.DetectionResultFields.detection_scores: np.array(scores[image_key], dtype=float),
},
)
if verbose:
print_time("convert detections", start)
start = time.time()
metrics = pascal_evaluator.evaluate()
if verbose:
print_time("run_evaluator", start)
for display_name, value in metrics.items():
print(f"{display_name}=\t{value}")
return {display_name: value for display_name, value in metrics.items() if "ByCategory" not in display_name}
def METHOD_NAME(labels):
"""Generate label map from LabelEntity."""
labelmap = []
class_ids = set()
for label in labels:
labelmap.append({"id": int(label.id), "name": str(label.name)})
class_ids.add(int(label.id))
return labelmap, class_ids
def _load_gt(video_infos):
"""Generate ground truth information from video_infos."""
boxes = defaultdict(list)
labels = defaultdict(list)
for video_info in video_infos:
img_key = video_info["img_key"]
gt_bboxes = video_info["gt_bboxes"]
gt_labels = video_info["gt_labels"]
for gt_label, gt_bbox in zip(gt_labels, gt_bboxes):
for idx, val in enumerate(gt_label):
if val == 1:
boxes[img_key].append(gt_bbox)
labels[img_key].append(idx)
return boxes, labels | null |
555 | # -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" Helper classes and methods for the REST API """
import json
import flask
from flask import Response
import inginious.common.custom_yaml as yaml
from inginious.frontend.pages.utils import INGIniousPage
class APIPage(INGIniousPage):
""" Generic handler for all API pages """
def GET(self, *args, **kwargs):
""" GET request """
return self._handle_api(self.API_GET, args, kwargs)
def PUT(self, *args, **kwargs):
""" PUT request """
return self._handle_api(self.API_PUT, args, kwargs)
def POST(self, *args, **kwargs):
""" POST request """
return self._handle_api(self.API_POST, args, kwargs)
def DELETE(self, *args, **kwargs):
""" DELETE request """
return self._handle_api(self.METHOD_NAME, args, kwargs)
def PATCH(self, *args, **kwargs):
""" PATCH request """
return self._handle_api(self.API_PATCH, args, kwargs)
def HEAD(self, *args, **kwargs):
""" HEAD request """
return self._handle_api(self.API_HEAD, args, kwargs)
def OPTIONS(self, *args, **kwargs):
""" OPTIONS request """
return self._handle_api(self.API_OPTIONS, args, kwargs)
def _handle_api(self, handler, handler_args, handler_kwargs):
""" Handle call to subclasses and convert the output to an appropriate value """
try:
status_code, return_value = handler(*handler_args, **handler_kwargs)
except APIError as error:
return error.send()
return _api_convert_output(status_code, return_value)
def _guess_available_methods(self):
""" Guess the method implemented by the subclass"""
available_methods = []
for m in ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"]:
self_method = getattr(type(self), "API_{}".format(m))
super_method = getattr(APIPage, "API_{}".format(m))
if self_method != super_method:
available_methods.append(m)
return available_methods
def invalid_method(self):
""" Returns 405 Invalid Method to the client """
raise APIInvalidMethod(self._guess_available_methods())
def API_GET(self, *args, **kwargs): # pylint: disable=unused-argument
""" API GET request. Should be overridden by subclasses """
self.invalid_method()
def API_PUT(self, *args, **kwargs): # pylint: disable=unused-argument
""" API PUT request. Should be overridden by subclasses """
self.invalid_method()
def API_POST(self, *args, **kwargs): # pylint: disable=unused-argument
""" API POST request. Should be overridden by subclasses """
self.invalid_method()
def METHOD_NAME(self, *args, **kwargs): # pylint: disable=unused-argument
""" API DELETE request. Should be overridden by subclasses """
self.invalid_method()
def API_PATCH(self, *args, **kwargs): # pylint: disable=unused-argument
""" API PATCH request. Should be overridden by subclasses """
self.invalid_method()
def API_HEAD(self, *args, **kwargs): # pylint: disable=unused-argument
""" API HEAD request. Should be overridden by subclasses """
self.invalid_method()
def API_OPTIONS(self, *args, **kwargs): # pylint: disable=unused-argument
""" API OPTIONS request. Should be overridden by subclasses """
self.invalid_method()
class APIAuthenticatedPage(APIPage):
"""
A wrapper for pages that needs authentication. Automatically checks that the client is authenticated and returns "403 Forbidden" if it's
not the case.
"""
def _handle_api(self, handler, handler_args, handler_kwargs):
return APIPage._handle_api(self, (lambda *args, **kwargs: self._verify_authentication(handler, args, kwargs)), handler_args, handler_kwargs)
def _verify_authentication(self, handler, args, kwargs):
""" Verify that the user is authenticated """
if not self.user_manager.session_logged_in():
raise APIForbidden()
return handler(*args, **kwargs)
class APIError(Exception):
""" Standard API Error """
def __init__(self, status_code, return_value):
super(APIError, self).__init__()
self.status_code = status_code
self.return_value = return_value
def send(self, response=None):
""" Send the API Exception to the client """
return _api_convert_output(self.status_code, self.return_value, response)
class APIInvalidMethod(APIError):
""" Invalid method error """
def __init__(self, methods):
APIError.__init__(self, 405, {"error": "This endpoint has no such method"})
self.methods = methods
def send(self):
response = Response()
response.headers['Allow'] = ",".join(self.methods)
return APIError.send(self, response)
class APIInvalidArguments(APIError):
""" Invalid arguments error """
def __init__(self):
APIError.__init__(self, 400, {"error": "Invalid arguments for this method"})
class APIForbidden(APIError):
""" Forbidden error """
def __init__(self, message="You are not authenticated"):
APIError.__init__(self, 403, {"error": message})
class APINotFound(APIError):
""" Not found error """
def __init__(self, message="Not found"):
APIError.__init__(self, 404, {"error": message})
def _api_convert_output(status_code, return_value, response=None):
if not response:
response = Response()
response.status_code = status_code
""" Convert the output to what the client asks """
content_type = flask.request.environ.get('CONTENT_TYPE', 'text/json')
if "text/json" in content_type:
response.content_type = 'text/json; charset=utf-8'
response.response = [json.dumps(return_value)]
return response
if "text/html" in content_type:
response.content_type = 'text/html; charset=utf-8'
dump = yaml.dump(return_value)
response.response = ["<pre>" + dump + "</pre>"]
return response
if "text/yaml" in content_type or \
"text/x-yaml" in content_type or \
"application/yaml" in content_type or \
"application/x-yaml" in content_type:
response.content_type = 'text/yaml; charset=utf-8'
response.response = [yaml.dump(return_value)]
return response
response.content_type = 'text/json; charset=utf-8'
response.response = [json.dumps(return_value)]
return response | null |
556 | # -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" Tasks """
from inginious.frontend.pages.api._api_page import APIAuthenticatedPage, APINotFound, APIForbidden
from inginious.frontend.parsable_text import ParsableText
class APITasks(APIAuthenticatedPage):
r"""
Endpoint
::
/api/v0/courses/[a-zA-Z_\-\.0-9]+/tasks(/[a-zA-Z_\-\.0-9]+)?
"""
def METHOD_NAME(self, val):
""" Util to remove parsable text from a dict, recursively """
if isinstance(val, ParsableText):
return val.original_content()
if isinstance(val, list):
for key, val2 in enumerate(val):
val[key] = self.METHOD_NAME(val2)
return val
if isinstance(val, dict):
for key, val2 in val.items():
val[key] = self.METHOD_NAME(val2)
return val
def API_GET(self, courseid, taskid): # pylint: disable=arguments-differ
"""
List tasks available to the connected client. Returns a dict in the form
::
{
"taskid1":
{
"name": "Name of the course", #the name of the course
"authors": [],
"contact_url": "",
"deadline": "",
"status": "success" # can be "succeeded", "failed" or "notattempted"
"grade": 0.0,
"context": "" # context of the task, in RST
"problems": # dict of the subproblems
{
# see the format of task.yaml for the content of the dict. Contains everything but
# responses of multiple-choice and match problems.
}
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id/tasks/the_task_id, this dict will contain one entry or the page will return 404 Not
Found.
"""
try:
course = self.course_factory.get_course(courseid)
except:
raise APINotFound("Course not found")
if not self.user_manager.course_is_open_to_user(course, lti=False):
raise APIForbidden("You are not registered to this course")
if taskid is None:
tasks = course.get_tasks()
else:
try:
tasks = {taskid: course.get_task(taskid)}
except:
raise APINotFound("Task not found")
output = []
for taskid, task in tasks.items():
task_cache = self.user_manager.get_task_cache(self.user_manager.session_username(), task.get_course_id(), task.get_id())
data = {
"id": taskid,
"name": task.get_name(self.user_manager.session_language()),
"authors": task.get_authors(self.user_manager.session_language()),
"contact_url": task.get_contact_url(self.user_manager.session_language()),
"status": "notviewed" if task_cache is None else "notattempted" if task_cache["tried"] == 0 else "succeeded" if task_cache["succeeded"] else "failed",
"grade": task_cache.get("grade", 0.0) if task_cache is not None else 0.0,
"context": task.get_context(self.user_manager.session_language()).original_content(),
"problems": []
}
for problem in task.get_problems():
pcontent = problem.get_original_content()
pcontent["id"] = problem.get_id()
if pcontent["type"] == "match":
del pcontent["answer"]
if pcontent["type"] == "multiple_choice":
pcontent["choices"] = {key: val["text"] for key, val in enumerate(pcontent["choices"])}
pcontent = self.METHOD_NAME(pcontent)
data["problems"].append(pcontent)
output.append(data)
return 200, output | null |
557 | from galaxy import model
from galaxy.app_unittest_utils.tools_support import UsesApp
from galaxy.tools.parameters import (
basic,
dataset_matcher,
)
from galaxy.util import (
bunch,
XML,
)
from galaxy.util.unittest import TestCase
from .test_data_parameters import MockHistoryDatasetAssociation
class MockTool:
def __init__(self, app):
self.app = app
self.tool_type = "default"
self.valid_input_states = model.Dataset.valid_input_states
class TestDatasetMatcher(TestCase, UsesApp):
def test_hda_mismatches(self):
# Datasets not visible are not "valid" for param.
self.mock_hda.visible = False
assert not self.test_context.hda_match(self.mock_hda)
# Datasets that don't match datatype are not valid.
self.mock_hda.visible = True
self.mock_hda.extension = "data"
self.mock_hda.conversion_destination = (False, None, None)
assert not self.test_context.hda_match(self.mock_hda)
def test_valid_hda_direct_match(self):
# Datasets that visible and matching are valid
self.mock_hda.visible = True
self.mock_hda.extension = "txt"
hda_match = self.test_context.hda_match(self.mock_hda, check_implicit_conversions=False)
assert hda_match
# Match is not a conversion and so matching hda is the same hda
# supplied.
assert not hda_match.implicit_conversion
assert hda_match.hda == self.mock_hda
def test_valid_hda_implicit_convered(self):
# Find conversion returns an HDA to an already implicitly converted
# dataset.
self.mock_hda.extension = "data"
converted_hda = model.HistoryDatasetAssociation()
self.mock_hda.conversion_destination = (False, "tabular", converted_hda)
hda_match = self.test_context.hda_match(self.mock_hda)
assert hda_match
assert hda_match.implicit_conversion
assert hda_match.hda == converted_hda
assert hda_match.target_ext == "tabular"
def test_hda_match_implicit_can_convert(self):
# Find conversion returns a target extension to convert to, but not
# a previously implicitly converted dataset.
self.mock_hda.extension = "data"
self.mock_hda.conversion_destination = (False, "tabular", None)
hda_match = self.test_context.hda_match(self.mock_hda)
assert hda_match
assert hda_match.implicit_conversion
assert hda_match.hda == self.mock_hda
assert hda_match.target_ext == "tabular"
def METHOD_NAME(self):
self.mock_hda.extension = "data"
self.mock_hda.conversion_destination = (False, "tabular", bunch.Bunch())
hda_match = self.test_context.hda_match(self.mock_hda, check_implicit_conversions=False)
assert not hda_match
def test_data_destination_tools_require_public(self):
self.tool.tool_type = "data_destination"
# Public datasets okay and valid
self.app.security_agent.dataset_is_public = lambda dataset: True
hda_match = self.test_context.hda_match(self.mock_hda)
assert hda_match
# Non-public datasets not valid
self.app.security_agent.dataset_is_public = lambda dataset: False
hda_match = self.test_context.hda_match(self.mock_hda)
assert not hda_match
def test_filtered_hda_matched_key(self):
self.filtered_param = True
data1_val = model.HistoryDatasetAssociation()
data1_val.dbkey = "hg18"
self.other_values = {"data1": data1_val}
assert self.test_context.filter_values == {"hg18"}
# mock_hda is hg19, other is hg18 so should not be "valid hda"
hda_match = self.test_context.hda_match(self.mock_hda)
assert not hda_match
def test_filtered_hda_unmatched_key(self):
self.filtered_param = True
data1_val = model.HistoryDatasetAssociation()
data1_val.dbkey = "hg19"
self.other_values = {"data1": data1_val}
# Other param value and this dataset both hg19, should be valid
hda_match = self.test_context.hda_match(self.mock_hda)
assert hda_match
def test_metadata_filtered_hda_options_filter_attribute_matched_keys(self):
self.metadata_filtered_param = True
data1_val = model.HistoryDatasetAssociation()
self.other_values = {"data1": data1_val}
hda1 = MockHistoryDatasetAssociation()
hda1.metadata = MockMetadata()
hda1.metadata.foo = "bar"
hda2 = MockHistoryDatasetAssociation()
hda2.metadata = MockMetadata()
hda2.metadata.foo = "baz"
assert self.test_context.filter_values == {"baz", "bar"}
hda_match = self.test_context.hda_match(hda1)
assert hda_match
hda_match = self.test_context.hda_match(hda2)
assert hda_match
def test_metadata_filtered_hda_options_filter_attribute_unmatched_key(self):
self.metadata_filtered_param = True
data1_val = model.HistoryDatasetAssociation()
self.other_values = {"data1": data1_val}
hda = MockHistoryDatasetAssociation()
hda.metadata = MockMetadata()
hda.metadata.foo = "no-match"
assert self.test_context.filter_values == {"baz", "bar"}
hda_match = self.test_context.hda_match(hda)
assert not hda_match
def setUp(self):
self.setup_app()
self.mock_hda = MockHistoryDatasetAssociation()
self.tool = MockTool(self.app)
self.current_user_roles = []
self.other_values = {}
# Reset lazily generated stuff
self.filtered_param = False
self.metadata_filtered_param = False
self._test_context = None
self.param = None
@property
def test_context(self):
if self._test_context is None:
option_xml = ""
if self.filtered_param:
option_xml = """<options><filter type="data_meta" ref="data1" key="dbkey" /></options>"""
if self.metadata_filtered_param:
option_xml = """
<options options_filter_attribute="metadata.foo">
<filter type="add_value" value="bar" />
<filter type="add_value" value="baz" />
</options>"""
param_xml = XML(f"""<param name="data2" type="data" format="txt">{option_xml}</param>""")
self.param = basic.DataToolParameter(
self.tool,
param_xml,
)
trans = bunch.Bunch(
app=self.app,
get_current_user_roles=lambda: self.current_user_roles,
workflow_building_mode=True,
)
self._test_context = dataset_matcher.get_dataset_matcher_factory(trans).dataset_matcher(
param=self.param, other_values=self.other_values
)
return self._test_context
class MockMetadata:
def __init__(self):
self.foo = None | null |
558 | # Copyright (c) 2023 Mira Geoscience Ltd.
#
# This file is part of geoh5py.
#
# geoh5py is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# geoh5py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with geoh5py. If not, see <https://www.gnu.org/licenses/>.
# pylint: skip-file
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING
from .. import interfaces
from ..workspace import Workspace
if TYPE_CHECKING:
from ..interfaces.objects import BlockModel as i_BlockModel
from ..interfaces.objects import Curve as i_Curve
from ..interfaces.objects import Drillhole as i_Drillhole
from ..interfaces.objects import GeoImage as i_GeoImage
from ..interfaces.objects import GeometryTransformation as i_GeometryTransformation
from ..interfaces.objects import Grid2D as i_Grid2D
from ..interfaces.objects import Label as i_Label
from ..interfaces.objects import Object as i_Object
from ..interfaces.objects import ObjectQuery as i_ObjectQuery
from ..interfaces.objects import Octree as i_Octree
from ..interfaces.objects import Points as i_Points
from ..interfaces.objects import Surface as i_Surface
from ..interfaces.shared import Uuid as i_Uuid
# pylint: disable=too-many-public-methods
class ObjectsHandler:
def get_type(self, object_class: int) -> i_Uuid:
# TODO
pass
def get_class(self, type_uid: i_Uuid) -> int:
# TODO
pass
@staticmethod
def get_all() -> list[i_Object]:
Workspace.active().data
# TODO
return []
def find(self, query: i_ObjectQuery) -> list[i_Object]:
# TODO
pass
def set_allow_move(self, objects: list[i_Uuid], allow: bool) -> None:
# TODO
pass
def move_to_group(self, objects: list[i_Uuid], destination_group: i_Uuid) -> None:
# TODO
pass
def get(self, uid: i_Uuid) -> i_Object:
Workspace.active().find_object(uuid.UUID(uid.id))
# TODO
return interfaces.objects.Object()
def narrow_points(self, uid: i_Uuid) -> i_Points:
# TODO
pass
def narrow_curve(self, uid: i_Uuid) -> i_Curve:
# TODO
pass
def narrow_surface(self, uid: i_Uuid) -> i_Surface:
# TODO
pass
def narrow_grid2d(self, uid: i_Uuid) -> i_Grid2D:
# TODO
pass
def METHOD_NAME(self, uid: i_Uuid) -> i_Drillhole:
# TODO
pass
def narrow_blockmodel(self, uid: i_Uuid) -> i_BlockModel:
# TODO
pass
def narrow_octree(self, uid: i_Uuid) -> i_Octree:
# TODO
pass
def narrow_geoimage(self, uid: i_Uuid) -> i_GeoImage:
# TODO
pass
def narrow_label(self, uid: i_Uuid) -> i_Label:
# TODO
pass
def create_any_object(
self,
type_uid: i_Uuid,
name: str,
parent_group: i_Uuid,
attributes: dict[str, str],
) -> i_Object:
# TODO
pass
def transform(
self, objects: list[i_Uuid], transformation: i_GeometryTransformation
) -> None:
# TODO
pass
def set_public(self, entities: list[i_Uuid], is_public: bool) -> None:
# TODO
pass
def set_visible(self, entities: list[i_Uuid], visible: bool) -> None:
# TODO
pass
def set_allow_delete(self, entities: list[i_Uuid], allow: bool) -> None:
# TODO
pass
def set_allow_rename(self, entities: list[i_Uuid], allow: bool) -> None:
# TODO
pass
def rename(self, entities: i_Uuid, new_name: str) -> None:
# TODO
pass | null |
559 | # Drakkar-Software OctoBot-Tentacles
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import aiohttp
import pytest
import asyncio
import tentacles.Services.Interfaces.web_interface.tests as web_interface_tests
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
async def test_browse_all_pages_no_required_password():
async with web_interface_tests.get_web_interface(False) as web_interface_instance:
async with aiohttp.ClientSession() as session:
await asyncio.gather(
*[web_interface_tests.check_page_no_login_redirect(
f"http://localhost:{web_interface_tests.PORT}{rule.replace('.', '/')}",
session)
for rule in _get_all_native_rules(web_interface_instance,
black_list=["/advanced/tentacles",
"/advanced/tentacle_packages"]
)])
async def test_browse_all_pages_required_password_without_login():
async with web_interface_tests.get_web_interface(True) as web_interface_instance:
async with aiohttp.ClientSession() as session:
await asyncio.gather(
*[web_interface_tests.check_page_login_redirect(
f"http://localhost:{web_interface_tests.PORT}{rule.replace('.', '/')}",
session)
for rule in _get_all_native_rules(web_interface_instance)])
async def METHOD_NAME():
async with web_interface_tests.get_web_interface(True) as web_interface_instance:
async with aiohttp.ClientSession() as session:
await web_interface_tests.login_user_on_session(session)
# correctly display pages: session is logged in
await asyncio.gather(
*[web_interface_tests.check_page_no_login_redirect(
f"http://localhost:{web_interface_tests.PORT}{rule.replace('.', '/')}",
session)
for rule in _get_all_native_rules(web_interface_instance, ["/logout"])])
async with aiohttp.ClientSession() as unauthenticated_session:
# redirect to login page: session is not logged in
await asyncio.gather(
*[web_interface_tests.check_page_login_redirect(
f"http://localhost:{web_interface_tests.PORT}{rule.replace('.', '/')}",
unauthenticated_session)
for rule in _get_all_native_rules(web_interface_instance)])
async def test_logout():
async with web_interface_tests.get_web_interface(True):
async with aiohttp.ClientSession() as session:
await web_interface_tests.login_user_on_session(session)
await web_interface_tests.check_page_no_login_redirect(f"http://localhost:{web_interface_tests.PORT}/",
session)
await web_interface_tests.check_page_login_redirect(f"http://localhost:{web_interface_tests.PORT}/logout",
session)
await web_interface_tests.check_page_login_redirect(f"http://localhost:{web_interface_tests.PORT}/",
session)
def _get_all_native_rules(web_interface_instance, black_list=None):
if black_list is None:
black_list = []
full_back_list = URL_BLACK_LIST + black_list + web_interface_tests.get_plugins_routes(web_interface_instance)
return set(rule.rule
for rule in web_interface_instance.server_instance.url_map.iter_rules()
if "GET" in rule.methods
and _has_no_empty_params(rule)
and rule.rule not in full_back_list)
# backlist endpoints expecting additional data
URL_BLACK_LIST = ["/symbol_market_status", "/tentacle_media", "/watched_symbols", "/export_logs",
"/api/first_exchange_details"]
def _has_no_empty_params(rule):
defaults = rule.defaults if rule.defaults is not None else ()
arguments = rule.arguments if rule.arguments is not None else ()
return len(defaults) >= len(arguments) | null |
560 | import cocotb
from cocotb.triggers import Timer
@cocotb.test()
async def test_in_vect_packed(dut):
test_value = 0x5
dut.in_vect_packed.value = test_value
await Timer(1, "ns")
assert dut.out_vect_packed.value == test_value
@cocotb.test()
async def test_in_vect_unpacked(dut):
test_value = [0x1, 0x0, 0x1]
dut.in_vect_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_vect_unpacked.value == test_value
@cocotb.test()
async def test_in_arr(dut):
test_value = 0x5
dut.in_arr.value = test_value
await Timer(1, "ns")
assert dut.out_arr.value == test_value
@cocotb.test()
async def METHOD_NAME(dut):
test_value = (0x5 << 6) | (0x5 << 3) | 0x5
dut.in_2d_vect_packed_packed.value = test_value
await Timer(1, "ns")
assert dut.out_2d_vect_packed_packed.value == test_value
@cocotb.test()
async def test_in_2d_vect_packed_unpacked(dut):
test_value = [0x5, 0x5, 0x5]
dut.in_2d_vect_packed_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_2d_vect_packed_unpacked.value == test_value
@cocotb.test()
async def test_in_2d_vect_unpacked_unpacked(dut):
test_value = 3 * [[0x1, 0x0, 0x1]]
dut.in_2d_vect_unpacked_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_2d_vect_unpacked_unpacked.value == test_value
@cocotb.test()
async def test_in_arr_packed(dut):
test_value = 365
dut.in_arr_packed.value = test_value
await Timer(1, "ns")
assert dut.out_arr_packed.value == test_value
@cocotb.test()
async def test_in_arr_unpacked(dut):
test_value = [0x5, 0x5, 0x5]
dut.in_arr_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_arr_unpacked.value == test_value
@cocotb.test()
async def test_in_2d_arr(dut):
test_value = 365
dut.in_2d_arr.value = test_value
await Timer(1, "ns")
assert dut.out_2d_arr.value == test_value
@cocotb.test()
async def test_in_vect_packed_packed_packed(dut):
test_value = 95869805
dut.in_vect_packed_packed_packed.value = test_value
await Timer(1, "ns")
assert dut.out_vect_packed_packed_packed.value == test_value
# Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605)
@cocotb.test(
expect_error=IndexError
if cocotb.LANGUAGE == "verilog" and cocotb.SIM_NAME.lower().startswith("modelsim")
else ()
)
async def test_in_vect_packed_packed_unpacked(dut):
test_value = [365, 365, 365]
dut.in_vect_packed_packed_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_vect_packed_packed_unpacked.value == test_value
@cocotb.test()
async def test_in_vect_packed_unpacked_unpacked(dut):
test_value = 3 * [3 * [5]]
dut.in_vect_packed_unpacked_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_vect_packed_unpacked_unpacked.value == test_value
@cocotb.test()
async def test_in_vect_unpacked_unpacked_unpacked(dut):
test_value = 3 * [3 * [[1, 0, 1]]]
dut.in_vect_unpacked_unpacked_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_vect_unpacked_unpacked_unpacked.value == test_value
@cocotb.test()
async def test_in_arr_packed_packed(dut):
test_value = (365 << 18) | (365 << 9) | (365)
dut.in_arr_packed_packed.value = test_value
await Timer(1, "ns")
assert dut.out_arr_packed_packed.value == test_value
# Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605)
@cocotb.test(
expect_error=IndexError
if cocotb.LANGUAGE == "verilog" and cocotb.SIM_NAME.lower().startswith("modelsim")
else ()
)
async def test_in_arr_packed_unpacked(dut):
test_value = [365, 365, 365]
dut.in_arr_packed_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_arr_packed_unpacked.value == test_value
@cocotb.test()
async def test_in_arr_unpacked_unpacked(dut):
test_value = 3 * [3 * [5]]
dut.in_arr_unpacked_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_arr_unpacked_unpacked.value == test_value
@cocotb.test()
async def test_in_2d_arr_packed(dut):
test_value = (365 << 18) | (365 << 9) | (365)
dut.in_2d_arr_packed.value = test_value
await Timer(1, "ns")
assert dut.out_2d_arr_packed.value == test_value
# Questa is unable to access elements of a logic array if the last dimension is unpacked (gh-2605)
@cocotb.test(
expect_error=IndexError
if cocotb.LANGUAGE == "verilog" and cocotb.SIM_NAME.lower().startswith("modelsim")
else ()
)
async def test_in_2d_arr_unpacked(dut):
test_value = [365, 365, 365]
dut.in_2d_arr_unpacked.value = test_value
await Timer(1, "ns")
assert dut.out_2d_arr_unpacked.value == test_value
@cocotb.test()
async def test_in_3d_arr(dut):
test_value = (365 << 18) | (365 << 9) | (365)
dut.in_3d_arr.value = test_value
await Timer(1, "ns")
assert dut.out_3d_arr.value == test_value | null |
561 | from copy import deepcopy
from typing import Any, Dict, Sequence, Tuple
import numpy as np
from ..C import FVAL, GRAD, HESS, HESSP, RDATAS, RES, SRES, ModeType
from .base import ObjectiveBase, ResultDict
class AggregatedObjective(ObjectiveBase):
"""Aggregates multiple objectives into one objective."""
def __init__(
self,
objectives: Sequence[ObjectiveBase],
x_names: Sequence[str] = None,
):
"""
Initialize objective.
Parameters
----------
objectives:
Sequence of pypesto.ObjectiveBase instances
x_names:
Sequence of names of the (optimized) parameters.
(Details see documentation of x_names in
:class:`pypesto.ObjectiveBase`)
"""
# input typechecks
if not isinstance(objectives, Sequence):
raise TypeError(
f'Objectives must be a Sequence, ' f'was {type(objectives)}.'
)
if not all(
isinstance(objective, ObjectiveBase) for objective in objectives
):
raise TypeError(
'Objectives must only contain elements of type'
'pypesto.Objective'
)
if not objectives:
raise ValueError('Length of objectives must be at least one')
self._objectives = objectives
super().__init__(x_names=x_names)
def __deepcopy__(self, memodict=None):
"""Create copy of objective."""
other = AggregatedObjective(
objectives=[deepcopy(objective) for objective in self._objectives],
x_names=deepcopy(self.x_names),
)
for key in set(self.__dict__.keys()) - {'_objectives', 'x_names'}:
other.__dict__[key] = deepcopy(self.__dict__[key])
return other
def check_mode(self, mode: ModeType) -> bool:
"""See `ObjectiveBase` documentation."""
return all(
objective.check_mode(mode) for objective in self._objectives
)
def check_sensi_orders(
self,
sensi_orders: Tuple[int, ...],
mode: ModeType,
) -> bool:
"""See `ObjectiveBase` documentation."""
return all(
objective.check_sensi_orders(sensi_orders, mode)
for objective in self._objectives
)
def METHOD_NAME(
self,
x: np.ndarray,
sensi_orders: Tuple[int, ...],
mode: ModeType,
kwargs_list: Sequence[Dict[str, Any]] = None,
**kwargs,
) -> ResultDict:
"""
See `ObjectiveBase` for more documentation.
Main method to overwrite from the base class. It handles and
delegates the actual objective evaluation.
Parameters
----------
kwargs_list:
Objective-specific keyword arguments, where the dictionaries are
ordered by the objectives.
"""
if kwargs_list is None:
kwargs_list = [{}] * len(self._objectives)
elif len(kwargs_list) != len(self._objectives):
raise ValueError(
"The length of `kwargs_list` must match the number of "
"objectives you are aggregating."
)
return aggregate_results(
[
objective.METHOD_NAME(
x,
sensi_orders,
mode,
**kwargs,
**cur_kwargs,
)
for objective, cur_kwargs in zip(self._objectives, kwargs_list)
]
)
def initialize(self):
"""See `ObjectiveBase` documentation."""
for objective in self._objectives:
objective.initialize()
def get_config(self) -> dict:
"""Return basic information of the objective configuration."""
info = super().get_config()
for n_obj, obj in enumerate(self._objectives):
info[f'objective_{n_obj}'] = obj.get_config()
return info
def aggregate_results(rvals: Sequence[ResultDict]) -> ResultDict:
"""
Aggregate the results from the provided ResultDicts into a single one.
Parameters
----------
rvals:
results to aggregate
"""
# sum over fval/grad/hess, if available in all rvals
result = {
key: sum(rval[key] for rval in rvals)
for key in [FVAL, GRAD, HESS, HESSP]
if all(key in rval for rval in rvals)
}
# extract rdatas and flatten
result[RDATAS] = []
for rval in rvals:
if RDATAS in rval:
result[RDATAS].extend(rval[RDATAS])
# initialize res and sres
if RES in rvals[0]:
res = np.asarray(rvals[0][RES])
else:
res = None
if SRES in rvals[0]:
sres = np.asarray(rvals[0][SRES])
else:
sres = None
# skip iobj=0 after initialization, stack matrices
for rval in rvals[1:]:
if res is not None:
res = np.hstack([res, np.asarray(rval[RES])])
if sres is not None:
sres = np.vstack([sres, np.asarray(rval[SRES])])
# fill res, sres into result
if res is not None:
result[RES] = res
if sres is not None:
result[SRES] = sres
return result | null |
562 | from hashlib import new
import math, decimal, datetime, random
from multiprocessing.dummy import current_process
from operator import index
from numpy import who
from plugin import plugin
"""
The script used to calculate the moon phases is inspired by:
https://gist.github.com/miklb/ed145757971096565723
The algorithm used to calculate the moon phases belongs to:
Author: Sean B. Palmer, inamidst.com
http://en.wikipedia.org/wiki/Lunar_phase#Lunar_phase_calculation
"""
dec = decimal.Decimal
# Colors meant for the display of the lunar phases
COLORS = [
'\33[31m',
'\33[32m',
'\33[33m',
'\33[34m',
'\33[35m',
'\33[31m',
'\33[32m',
'\33[33m']
@plugin('moonphase')
def moonphase(jarvis, s):
pos = METHOD_NAME()
current_phase = phase_calculator(pos)
phasename = phase(current_phase)
print(COLORS[current_phase])
details_text = True
# Illumination request
if s == "illumination":
jarvis.say("Phase: "+ phasename)
jarvis.say("Illumination: " + f"{pos: .2%}")
# Art request
elif s == "art" or s == "ascii":
jarvis.say(ascii_art(current_phase))
# Help request
elif s == "help":
jarvis.say(help_text())
details_text = False
# Fullmoon request
elif s == "fullmoon" or s =="full":
fullmoon_day = fullmoon_finder()
details_text = False
fullmoon_text(fullmoon_day)
# Default request
else:
jarvis.say("The current moon phase for today is: " + phasename)
# The next prints will appear only if the user request is about the current day
if details_text == True:
# Links to nineplanets.org moon phase site
jarvis.say("")
jarvis.say("More details at:")
jarvis.say("\033[0;40mhttps://nineplanets.org/moon/phase/today/")
def METHOD_NAME(now=None):
if now is None:
now = datetime.datetime.now()
diff = now - datetime.datetime(2001, 1, 1)
days = dec(diff.days) + (dec(diff.seconds) / dec(86400))
lunations = dec("0.20439731") + (days * dec("0.03386319269"))
return lunations % dec(1)
# Modified version of the position function that runs a loop until the next Fullmoon Phase appears
# Returns the days left until the next Fullmoon Phase as an integer
# Note: due to the nature of the algorithm used, the days are not always 100% accurate
def fullmoon_finder(now=None):
if now is None:
now = datetime.datetime.now()
is_full = False
extra_day = 0
while is_full == False:
new_date = now + datetime.timedelta(days=extra_day)
diff = new_date - datetime.datetime(2001, 1, 1)
days = dec(diff.days) + (dec(diff.seconds) / dec(86400))
lunations = dec("0.20439731") + (days * dec("0.03386319269"))
METHOD_NAME = lunations % dec(1)
new_phase = phase_calculator(METHOD_NAME)
if new_phase == 4:
is_full = True
else:
extra_day += 1
return extra_day
def fullmoon_text(fullmoon_day):
print("\33[33mNote: This tool is not always accurate and will may be off 2 days at a time")
print(COLORS[4])
print("")
if fullmoon_day == 0:
print("The next full moon will approximately appear today")
print("")
print("Hope you enjoy the Full Moon!")
elif fullmoon_day == 1:
print("The next full moon will approximately appear tomorrow")
else:
print("The next full moon will appear in approximately ", fullmoon_day, " days")
print("")
print("More details at:")
print("\033[0;40mhttps://www.timeanddate.com/astronomy/moon/full-moon.html")
# Receives the user's position to calculate and return the current lunar phase in integer form (0-7)
def phase_calculator(pos):
index = (pos * dec(8)) + dec("0.5")
index = math.floor(index)
return index
# Prints a help message
def help_text():
help_text = """
The moonphase plugin aims to inform the user about the current moon phase
moonphase: (Default request) Displays the current moonphase
moonphase art, moonphase ascii: Displays the current moonphase with ASCII art
moonphase fullmoon: Displays the approximate days left until the next Full Moon
moonphase illumination: Displays the current lunar illumination percent
moonphase help: Prints this help prompt
"""
return help_text
# Receives the current lunar phase in integer form and returns the current lunar phase's scientific name
def phase(index):
return {
0: "New Moon",
1: "Waxing Crescent",
2: "First Quarter",
3: "Waxing Gibbous",
4: "Full Moon",
5: "Waning Gibbous",
6: "Last Quarter",
7: "Waning Crescent"
}[int(index) & 7]
# Receives the current lunar phase in integer form and returns the current lunar phase's ASCII art
# Source: https://www.asciiart.eu/space/moons
def ascii_art(index):
ART = [r""" _..._
.:::::::.
::::::::::: NEW MOON
:::::::::::
`:::::::::'
`':::'' """,
r""" _..._
.::::. `.
:::::::. : WAXING CRESCENT
:::::::: :
`::::::' .'
`'::'-' """,
r""" _..._
.:::: `.
:::::: : FIRST QUARTER
:::::: :
`::::: .'
`'::.-'""",
r""" _..._
.::' `.
::: : WAXING GIBBOUS
::: :
`::. .'
`':..-' """,
r""" _..._
.' `.
: : FULL MOON
: :
`. .'
`-...-' """,
r""" _..._
.' `::.
: ::: WANING GIBBOUS
: :::
`. .::'
`-..:'' """,
r""" _..._
.' ::::.
: :::::: LAST QUARTER
: ::::::
`. :::::'
`-.::'' """,
r""" _..._
.' .::::.
: :::::::: WANING CRESCENT
: ::::::::
`. '::::::'
`-.::'' """]
return ART[index | null |
563 | # A class to represent a sweep of frames collected under the same conditions.
# This pertains to the dataset object in the early phases of processing.
from __future__ import annotations
import os
from xia2.Experts.FindImages import find_matching_images
from xia2.Handlers.Phil import PhilIndex
def SweepFactory(template, directory, beam=None):
"""A factory which will return a list of sweep objects which match
the input template and directory."""
sweeps = []
from xia2.Schema import load_imagesets
imagesets = load_imagesets(
template, directory, reversephi=PhilIndex.params.xia2.settings.input.reverse_phi
)
for imageset in imagesets:
scan = imageset.get_scan()
if scan is not None:
sweeps.append(
Sweep(
template,
directory,
imageset=imageset,
id_image=scan.get_image_range()[0],
beam=beam,
)
)
return sweeps
class Sweep:
"""A class to represent a single sweep of frames."""
def __init__(self, template, directory, imageset=None, id_image=None, beam=None):
"""Initialise the sweep by inspecting the images. id_image
defines the first image in this sweep, and hence the identity of
the sweep of more than one are found which match."""
self._identity_attributes = [
"_collect_start",
"_collect_end",
"_template",
"_id_image",
]
if id_image is not None:
self._id_image = id_image
else:
self._id_image = -1
# populate the attributes of this object
self._template = template
self._directory = directory
# populate the rest of the structure
self._images = []
if imageset is not None:
self._imageset = imageset
image_range = imageset.get_scan().get_image_range()
self._images = list(range(image_range[0], image_range[1] + 1))
# if the beam has been specified, then this will
# override the headers
self._beam_centre = beam
self.METHOD_NAME()
def get_template(self):
# try:
# return self._imageset.get_template()
# except Exception:
return self._template
def get_directory(self):
return self._directory
def get_imageset(self):
return self._imageset
def get_images(self):
# check if any more images have appeared
self.METHOD_NAME()
image_range = self._imageset.get_scan().get_image_range()
return list(range(image_range[0], image_range[1] + 1))
def get_distance(self):
return self._imageset.get_detector()[0].get_directed_distance()
def get_wavelength(self):
return self._imageset.get_beam().get_wavelength()
def set_wavelength(self, wavelength):
return self._imageset.get_beam().set_wavelength(wavelength)
def get_beam_centre(self):
from xia2.Schema.Interfaces.FrameProcessor import get_beam_centre
detector = self._imageset.get_detector()
beam = self._imageset.get_beam()
return get_beam_centre(detector, beam)
def METHOD_NAME(self):
"""Check to see if any more frames have appeared - if they
have update myself and reset."""
from xia2.Applications.xia2setup import is_hdf5_name
if is_hdf5_name(os.path.join(self._directory, self._template)):
return
images = find_matching_images(self._template, self._directory)
if len(images) > len(self._images):
self._images = images
from xia2.Schema import load_imagesets
imagesets = load_imagesets(
self._template,
self._directory,
id_image=self._id_image,
use_cache=False,
reversephi=PhilIndex.params.xia2.settings.input.reverse_phi,
)
max_images = 0
best_sweep = None
for imageset in imagesets:
scan = imageset.get_scan()
if scan is None:
continue
if imageset.get_scan().get_num_images() > max_images:
best_sweep = imageset
self._imageset = best_sweep | null |
564 | import imdb
from colorama import Fore, Style
from plugin import plugin, require
from functools import lru_cache
app = imdb.IMDb()
def main(jarvis, movie):
movie_id = search_movie(jarvis, movie)
if movie_id is None:
return None
return get_movie_by_id(movie_id)
@lru_cache(maxsize=50, typed=False)
def search_movie(jarvis, movie, all_results=False):
if movie == '':
jarvis.say("Please add movie name!", Fore.RED)
return None
results = app.search_movie(movie, results=10)
if not results:
jarvis.say("Error: Did not find movie!", Fore.RED)
return None
if not all_results:
first = results[0]
return first.movieID
return results
@lru_cache(maxsize=20, typed=False)
def get_movie_by_id(movie_id):
return app.get_movie(movie_id)
@require(network=True)
@plugin('movie cast')
def movie_cast(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['cast']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie director')
def movie_director(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['director']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie plot')
def movie_plot(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
if 'plot outline' in data:
jarvis.say('Plot outline:', Fore.GREEN)
jarvis.say(data['plot outline'])
jarvis.say('')
if 'plot' in data:
jarvis.say('Plot:', Fore.GREEN)
for d in data['plot']:
jarvis.say(d)
@require(network=True)
@plugin('movie producer')
def movie_producer(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['producers']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie rating')
def movie_rating(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
jarvis.say(str(data['rating']))
@require(network=True)
@plugin('movie year')
def movie_year(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
jarvis.say(str(data['year']))
@require(network=True)
@plugin('movie runtime')
def movie_runtime(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
if 'runtimes' in data:
jarvis.say(str(data['runtimes'][0]) + ' minutes')
else:
jarvis.say("No runtime data present")
@require(network=True)
@plugin('movie countries')
def movie_countries(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['countries']:
jarvis.say(str(d))
@require(network=True)
@plugin('movie genres')
def METHOD_NAME(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['genres']:
jarvis.say(d)
@require(network=True)
@plugin('movie info')
def movie_info(jarvis, movie):
"""
Display table with various information
"""
data = main(jarvis, movie)
if data is not None:
get_movie_info(jarvis, data)
@require(network=True)
@plugin('movie search')
def movie_search(jarvis, movie):
""" search for a movie on IMDB"""
results = search_movie(jarvis, movie, all_results=True)
# if results is None or empty
if not results:
return None
# get only movies from the results, filtering out TV series, etc
movie_results = []
for item in results:
if item['kind'] == 'movie':
movie_results.append(item)
if len(movie_results) > 5:
count = 5
else:
count = len(movie_results)
jarvis.say('')
space = ' '
text = 'ID'
text += space * 3 + 'Movie title'
jarvis.say(text, Fore.GREEN)
for i in range(count):
item = movie_results[i]
text = Fore.GREEN + str(i + 1) + space * 4
text += Fore.RESET + item['smart long imdb canonical title']
jarvis.say(text)
jarvis.say('')
jarvis.say('Please enter ID to know more(q - quit):')
input_id = jarvis.input()
# If nothing is entered, just return
if input_id == '':
return None
if len(input_id) != 1:
return jarvis.say(Fore.RED + 'Please enter valid value')
elif input_id in '123456789':
input_id = int(input_id)
elif input_id == 'q':
return None
# if entered input is out of the given list of ID's
if (int(input_id) > count) or (int(input_id) < 1):
return jarvis.say(Fore.RED + 'Please enter id from the given list')
movie_id = movie_results[input_id - 1].movieID
data = get_movie_by_id(movie_id)
get_movie_info(jarvis, data)
def colorized_output(key, value):
"""
pretty print key value pair
"""
green_text = Fore.GREEN + "{:<14}".format(key)
normal_text = Style.RESET_ALL + ": " + str(value)
return green_text + normal_text
def get_movie_info(jarvis, data):
"""
Takes a movie attributes as input and prints them accordingly
"""
jarvis.say('')
jarvis.say(
'What type of information do you want: cast, producers, genres, etc.?')
jarvis.say('Write one after another separated by space, please:')
movie_attributes = jarvis.input()
movie_attributes = movie_attributes.split()
jarvis.say('')
for attribute in movie_attributes:
if attribute in data:
value = data[attribute]
if attribute == 'genres':
value = ', '.join(value)
if attribute == 'cast':
lst = [person['name'] for person in value]
value = ', '.join(lst[0:3])
if isinstance(value, list):
value = value[0]
jarvis.say(colorized_output(attribute.capitalize(), str(value)))
else:
jarvis.say(
colorized_output(
attribute.capitalize(),
'no information retrieved'))
# print IMDB url of the movie
movie_url = app.urls['movie_base'] + 'tt' + data.movieID
jarvis.say(colorized_output('IMDB url', movie_url))
jarvis.say('') | null |
565 | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Vertex step operator flavor."""
from typing import TYPE_CHECKING, Optional, Type
from zenml.config.base_settings import BaseSettings
from zenml.integrations.gcp import GCP_VERTEX_STEP_OPERATOR_FLAVOR
from zenml.integrations.gcp.google_credentials_mixin import (
GoogleCredentialsConfigMixin,
)
from zenml.models.service_connector_models import ServiceConnectorRequirements
from zenml.step_operators.base_step_operator import (
BaseStepOperatorConfig,
BaseStepOperatorFlavor,
)
if TYPE_CHECKING:
from zenml.integrations.gcp.step_operators import VertexStepOperator
class VertexStepOperatorSettings(BaseSettings):
"""Settings for the Vertex step operator.
Attributes:
accelerator_type: Defines which accelerator (GPU, TPU) is used for the
job. Check out out this table to see which accelerator
type and count are compatible with your chosen machine type:
https://cloud.google.com/vertex-ai/docs/training/configure-compute#gpu-compatibility-table.
accelerator_count: Defines number of accelerators to be used for the
job. Check out out this table to see which accelerator
type and count are compatible with your chosen machine type:
https://cloud.google.com/vertex-ai/docs/training/configure-compute#gpu-compatibility-table.
machine_type: Machine type specified here
https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types.
"""
accelerator_type: Optional[str] = None
accelerator_count: int = 0
machine_type: str = "n1-standard-4"
class VertexStepOperatorConfig( # type: ignore[misc] # https://github.com/pydantic/pydantic/issues/4173
BaseStepOperatorConfig,
GoogleCredentialsConfigMixin,
VertexStepOperatorSettings,
):
"""Configuration for the Vertex step operator.
Attributes:
region: Region name, e.g., `europe-west1`.
encryption_spec_key_name: Encryption spec key name.
"""
region: str
# customer managed encryption key resource name
# will be applied to all Vertex AI resources if set
encryption_spec_key_name: Optional[str] = None
@property
def is_remote(self) -> bool:
"""Checks if this stack component is running remotely.
This designation is used to determine if the stack component can be
used with a local ZenML database or if it requires a remote ZenML
server.
Returns:
True if this config is for a remote component, False otherwise.
"""
return True
class VertexStepOperatorFlavor(BaseStepOperatorFlavor):
"""Vertex Step Operator flavor."""
@property
def name(self) -> str:
"""Name of the flavor.
Returns:
Name of the flavor.
"""
return GCP_VERTEX_STEP_OPERATOR_FLAVOR
@property
def service_connector_requirements(
self,
) -> Optional[ServiceConnectorRequirements]:
"""Service connector resource requirements for service connectors.
Specifies resource requirements that are used to filter the available
service connector types that are compatible with this flavor.
Returns:
Requirements for compatible service connectors, if a service
connector is required for this flavor.
"""
return ServiceConnectorRequirements(
resource_type="gcp-generic",
)
@property
def METHOD_NAME(self) -> Optional[str]:
"""A url to point at docs explaining this flavor.
Returns:
A flavor docs url.
"""
return self.generate_default_docs_url()
@property
def sdk_docs_url(self) -> Optional[str]:
"""A url to point at SDK docs explaining this flavor.
Returns:
A flavor SDK docs url.
"""
return self.generate_default_sdk_docs_url()
@property
def logo_url(self) -> str:
"""A url to represent the flavor in the dashboard.
Returns:
The flavor logo.
"""
return "https://public-flavor-logos.s3.eu-central-1.amazonaws.com/step_operator/vertexai.png"
@property
def config_class(self) -> Type[VertexStepOperatorConfig]:
"""Returns `VertexStepOperatorConfig` config class.
Returns:
The config class.
"""
return VertexStepOperatorConfig
@property
def implementation_class(self) -> Type["VertexStepOperator"]:
"""Implementation class for this flavor.
Returns:
The implementation class.
"""
from zenml.integrations.gcp.step_operators import VertexStepOperator
return VertexStepOperator | null |
566 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateOTAFirmwareRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateOTAFirmware')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SignMethod(self):
return self.get_query_params().get('SignMethod')
def set_SignMethod(self,SignMethod):
self.add_query_param('SignMethod',SignMethod)
def get_MultiFiless(self):
return self.get_query_params().get('MultiFiles')
def set_MultiFiless(self, MultiFiless):
for depth1 in range(len(MultiFiless)):
if MultiFiless[depth1].get('Size') is not None:
self.add_query_param('MultiFiles.' + str(depth1 + 1) + '.Size', MultiFiless[depth1].get('Size'))
if MultiFiless[depth1].get('Name') is not None:
self.add_query_param('MultiFiles.' + str(depth1 + 1) + '.Name', MultiFiless[depth1].get('Name'))
if MultiFiless[depth1].get('SignValue') is not None:
self.add_query_param('MultiFiles.' + str(depth1 + 1) + '.SignValue', MultiFiless[depth1].get('SignValue'))
if MultiFiless[depth1].get('FileMd5') is not None:
self.add_query_param('MultiFiles.' + str(depth1 + 1) + '.FileMd5', MultiFiless[depth1].get('FileMd5'))
if MultiFiless[depth1].get('Url') is not None:
self.add_query_param('MultiFiles.' + str(depth1 + 1) + '.Url', MultiFiless[depth1].get('Url'))
def get_NeedToVerify(self):
return self.get_query_params().get('NeedToVerify')
def set_NeedToVerify(self,NeedToVerify):
self.add_query_param('NeedToVerify',NeedToVerify)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_FirmwareUrl(self):
return self.get_query_params().get('FirmwareUrl')
def set_FirmwareUrl(self,FirmwareUrl):
self.add_query_param('FirmwareUrl',FirmwareUrl)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_FirmwareDesc(self):
return self.get_query_params().get('FirmwareDesc')
def set_FirmwareDesc(self,FirmwareDesc):
self.add_query_param('FirmwareDesc',FirmwareDesc)
def get_ModuleName(self):
return self.get_query_params().get('ModuleName')
def set_ModuleName(self,ModuleName):
self.add_query_param('ModuleName',ModuleName)
def get_FirmwareSign(self):
return self.get_query_params().get('FirmwareSign')
def set_FirmwareSign(self,FirmwareSign):
self.add_query_param('FirmwareSign',FirmwareSign)
def get_FirmwareSize(self):
return self.get_query_params().get('FirmwareSize')
def set_FirmwareSize(self,FirmwareSize):
self.add_query_param('FirmwareSize',FirmwareSize)
def get_FirmwareName(self):
return self.get_query_params().get('FirmwareName')
def set_FirmwareName(self,FirmwareName):
self.add_query_param('FirmwareName',FirmwareName)
def METHOD_NAME(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_SrcVersion(self):
return self.get_query_params().get('SrcVersion')
def set_SrcVersion(self,SrcVersion):
self.add_query_param('SrcVersion',SrcVersion)
def get_Udi(self):
return self.get_query_params().get('Udi')
def set_Udi(self,Udi):
self.add_query_param('Udi',Udi)
def get_DestVersion(self):
return self.get_query_params().get('DestVersion')
def set_DestVersion(self,DestVersion):
self.add_query_param('DestVersion',DestVersion | null |
567 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class ModifyPhysicalConnectionAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ModifyPhysicalConnectionAttribute','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PortType(self): # String
return self.get_query_params().get('PortType')
def set_PortType(self, PortType): # String
self.add_query_param('PortType', PortType)
def get_CircuitCode(self): # String
return self.get_query_params().get('CircuitCode')
def set_CircuitCode(self, CircuitCode): # String
self.add_query_param('CircuitCode', CircuitCode)
def METHOD_NAME(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_RedundantPhysicalConnectionId(self): # String
return self.get_query_params().get('RedundantPhysicalConnectionId')
def set_RedundantPhysicalConnectionId(self, RedundantPhysicalConnectionId): # String
self.add_query_param('RedundantPhysicalConnectionId', RedundantPhysicalConnectionId)
def get_PeerLocation(self): # String
return self.get_query_params().get('PeerLocation')
def set_PeerLocation(self, PeerLocation): # String
self.add_query_param('PeerLocation', PeerLocation)
def get_bandwidth(self): # Integer
return self.get_query_params().get('bandwidth')
def set_bandwidth(self, bandwidth): # Integer
self.add_query_param('bandwidth', bandwidth)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_LineOperator(self): # String
return self.get_query_params().get('LineOperator')
def set_LineOperator(self, LineOperator): # String
self.add_query_param('LineOperator', LineOperator)
def get_PhysicalConnectionId(self): # String
return self.get_query_params().get('PhysicalConnectionId')
def set_PhysicalConnectionId(self, PhysicalConnectionId): # String
self.add_query_param('PhysicalConnectionId', PhysicalConnectionId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name) | null |
568 | import itertools
from pharmpy.modeling import add_estimation_step, remove_estimation_step, set_ode_solver
from pharmpy.tools.common import update_initial_estimates
from pharmpy.tools.modelfit import create_fit_workflow
from pharmpy.workflows import Task, Workflow, WorkflowBuilder
def exhaustive(methods, solvers, covs):
wb = WorkflowBuilder()
task_start = Task('start', METHOD_NAME)
wb.add_task(task_start)
candidate_no = 1
for method, solver, cov in itertools.product(methods, solvers, covs):
wf_estmethod = _create_candidate_model_wf(candidate_no, method, solver, cov, update=False)
wb.insert_workflow(wf_estmethod, predecessors=task_start)
candidate_no += 1
return Workflow(wb), None
def exhaustive_with_update(methods, solvers, covs):
wb = WorkflowBuilder()
task_base_model = Task('create_base_model', _create_base_model)
wb.add_task(task_base_model)
wf_fit = create_fit_workflow(n=1)
wb.insert_workflow(wf_fit, predecessors=task_base_model)
task_base_model_fit = wb.output_tasks
candidate_no = 1
for method, solver, cov in itertools.product(methods, solvers, covs):
# This is equivalent to the base model
if not (method == 'FOCE' and solver is None):
# Create model with original estimates
wf_estmethod_original = _create_candidate_model_wf(
candidate_no, method, solver, cov, update=False
)
wb.insert_workflow(wf_estmethod_original, predecessors=task_base_model_fit)
candidate_no += 1
# Create model with updated estimates from FOCE
wf_estmethod_update = _create_candidate_model_wf(
candidate_no, method, solver, cov, update=True
)
wb.insert_workflow(wf_estmethod_update, predecessors=task_base_model_fit)
candidate_no += 1
return Workflow(wb), task_base_model_fit
def exhaustive_only_eval(methods, solvers, covs):
wb = WorkflowBuilder()
task_start = Task('start', METHOD_NAME)
wb.add_task(task_start)
candidate_no = 1
for method, solver, cov in itertools.product(methods, solvers, covs):
wf_estmethod = _create_candidate_model_wf(
candidate_no, method, solver, cov, update=False, is_eval_candidate=True
)
wb.insert_workflow(wf_estmethod, predecessors=task_start)
candidate_no += 1
return Workflow(wb), None
def METHOD_NAME(model):
return model
def _create_candidate_model_wf(candidate_no, method, solver, cov, update, is_eval_candidate=False):
wb = WorkflowBuilder()
model_name = f'estmethod_run{candidate_no}'
task_copy = Task('copy_model', _copy_model, model_name)
wb.add_task(task_copy)
if update:
task_update_inits = Task('update_inits', update_initial_estimates)
wb.add_task(task_update_inits, predecessors=task_copy)
task_prev = task_update_inits
else:
task_prev = task_copy
task_create_candidate = Task(
'create_candidate', _create_candidate_model, method, solver, cov, update, is_eval_candidate
)
wb.add_task(task_create_candidate, predecessors=task_prev)
return Workflow(wb)
def _copy_model(name, model):
return model.replace(name=name)
def _create_base_model(model):
est_settings = _create_est_settings('FOCE')
eval_settings = _create_eval_settings()
base_model = model.replace(name='base_model')
est_method, eval_method = est_settings['method'], eval_settings['method']
if eval_method is not None:
cov = eval_settings['cov']
else:
cov = est_settings['cov']
base_model = base_model.replace(
description=_create_description(
[est_method, eval_method], cov=cov, solver=None, update=False
)
)
while len(base_model.estimation_steps) > 0:
base_model = remove_estimation_step(base_model, 0)
base_model = add_estimation_step(base_model, **est_settings)
base_model = add_estimation_step(base_model, **eval_settings)
return base_model
def _create_candidate_model(method, solver, cov, update, is_eval_candidate, model):
est_settings = _create_est_settings(method, is_eval_candidate)
laplace = True if method == 'LAPLACE' else False
eval_settings = _create_eval_settings(laplace, cov)
eval_method = eval_settings['method']
model = model.replace(
description=_create_description(
[method, eval_method], solver=solver, cov=cov, update=update
)
)
while len(model.estimation_steps) > 0:
model = remove_estimation_step(model, 0)
model = add_estimation_step(model, **est_settings)
model = add_estimation_step(model, **eval_settings)
if solver:
model = set_ode_solver(model, solver)
return model
def _create_est_settings(method, is_eval_candidate=False):
est_settings = {
'method': method,
'interaction': True,
'laplace': False,
'auto': True,
'keep_every_nth_iter': 10,
'cov': None,
}
if method == 'LAPLACE':
est_settings['method'] = 'FOCE'
est_settings['laplace'] = True
if is_eval_candidate:
est_settings['evaluation'] = True
else:
est_settings['maximum_evaluations'] = 9999
return est_settings
def _create_eval_settings(laplace=False, cov=None):
eval_settings = {
'method': 'IMP',
'interaction': True,
'evaluation': True,
'laplace': False,
'maximum_evaluations': 9999,
'isample': 10000,
'niter': 10,
'keep_every_nth_iter': 10,
'cov': cov,
}
if laplace:
eval_settings['laplace'] = True
return eval_settings
def _create_description(methods, solver, cov, update=False):
model_description = ','.join(methods)
if solver:
model_description += f';{solver}'
if cov:
model_description += f';{cov}'
if update:
model_description += ' (update)'
return model_description | null |
569 | # Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
from ..common_utils.entity_managers import PipelineManager
from ..common_utils.test_utils import format_name
from ..utils.pipeline_utils import *
MAX_REP_COUNT = 150
class TestTerminateInstanceBeforeKubeRegistration(object):
pipeline_id = None
run_id = None
state = FailureIndicator()
test_case = "TC-SCALING-7"
@classmethod
def setup_class(cls):
logging.basicConfig(filename=get_log_filename(), level=logging.INFO,
format='%(levelname)s %(asctime)s %(module)s:%(message)s')
pipeline_name = format_name("test_terminate_instance_before_registration")
cls.pipeline_id = PipelineManager.create(pipeline_name)
logging.info("Pipeline {} with ID {} created.".format(pipeline_name, cls.pipeline_id))
try:
run_id = run_pipe(pipeline_name, "-id", "11")[0]
cls.run_id = run_id
logging.info("Pipeline run with ID {}.".format(cls.run_id))
wait_for_required_status("SCHEDULED", run_id, MAX_REP_COUNT)
wait_for_instance_creation(run_id, MAX_REP_COUNT)
logging.info("Instance {} created.".format(run_id))
terminate_instance(run_id)
wait_for_instance_termination(run_id, MAX_REP_COUNT)
logging.info("Instance {} terminated.".format(run_id))
except BaseException as e:
logging.error(e.message)
cls.METHOD_NAME()
raise RuntimeError(e.message)
@classmethod
def METHOD_NAME(cls):
node_name = get_node_name(cls.run_id)
terminate_node(node_name)
logging.info("Node {} was terminated".format(node_name))
if not cls.state.failure:
PipelineManager.delete(cls.pipeline_id)
logging.info("Pipeline {} deleted".format(cls.pipeline_id))
wait_for_instance_termination(cls.run_id, 150)
@pytest.mark.run(order=2)
def test_pipe_should_still_wait(self):
try:
status = get_pipe_status(self.run_id)
if status != "SUCCESS":
status = wait_for_required_status("RUNNING", self.run_id, 400, validation=False)
assert status == "RUNNING" or status == "SUCCESS", \
"Pipeline should wait for node registration. Current status: {}".format(status)
except AssertionError as e:
logging.info("Case {} failed!".format(self.test_case))
self.state.failure = True
pytest.fail("Test case {} failed.\n{}".format(self.test_case, e.message))
@pytest.mark.run(order=1)
def test_new_node_should_be_created(self):
try:
wait_for_node_up(self.run_id, 400, validation=False)
node_name = get_node_name(self.run_id)
logging.info("Node {} in use.".format(node_name))
assert len(get_cluster_state_for_run_id(self.run_id)) == 1, "Cluster should have exact one extra node."
except AssertionError as e:
logging.info("Case {} failed!".format(self.test_case))
self.state.failure = True
pytest.fail("Test case {} failed.\n{}".format(self.test_case, e.message))
@pytest.mark.run(order=3)
def test_cluster_should_not_have_node_without_label(self):
try:
node_name = get_node_name(self.run_id)
assert len(get_nodes_without_labels(node_name)) == 0, "Cluster should not have nodes without labels."
except AssertionError as e:
logging.info("Case {} failed!".format(self.test_case))
self.state.failure = True
pytest.fail("Test case {} failed.\n{}".format(self.test_case, e.message)) | null |
570 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdas.endpoint import endpoint_data
class ModifyAutoScalingConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'DAS', '2020-01-16', 'ModifyAutoScalingConfig')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Bandwidth(self): # Struct
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self, Bandwidth): # Struct
if Bandwidth.get('ObservationWindowSize') is not None:
self.add_query_param('Bandwidth.ObservationWindowSize', Bandwidth.get('ObservationWindowSize'))
if Bandwidth.get('Upgrade') is not None:
self.add_query_param('Bandwidth.Upgrade', Bandwidth.get('Upgrade'))
if Bandwidth.get('Apply') is not None:
self.add_query_param('Bandwidth.Apply', Bandwidth.get('Apply'))
if Bandwidth.get('BandwidthUsageLowerThreshold') is not None:
self.add_query_param('Bandwidth.BandwidthUsageLowerThreshold', Bandwidth.get('BandwidthUsageLowerThreshold'))
if Bandwidth.get('Downgrade') is not None:
self.add_query_param('Bandwidth.Downgrade', Bandwidth.get('Downgrade'))
if Bandwidth.get('BandwidthUsageUpperThreshold') is not None:
self.add_query_param('Bandwidth.BandwidthUsageUpperThreshold', Bandwidth.get('BandwidthUsageUpperThreshold'))
def METHOD_NAME(self): # Struct
return self.get_query_params().get('Resource')
def set_Resource(self, Resource): # Struct
if Resource.get('Apply') is not None:
self.add_query_param('Resource.Apply', Resource.get('Apply'))
if Resource.get('Enable') is not None:
self.add_query_param('Resource.Enable', Resource.get('Enable'))
if Resource.get('UpgradeObservationWindowSize') is not None:
self.add_query_param('Resource.UpgradeObservationWindowSize', Resource.get('UpgradeObservationWindowSize'))
if Resource.get('DowngradeObservationWindowSize') is not None:
self.add_query_param('Resource.DowngradeObservationWindowSize', Resource.get('DowngradeObservationWindowSize'))
if Resource.get('CpuUsageUpperThreshold') is not None:
self.add_query_param('Resource.CpuUsageUpperThreshold', Resource.get('CpuUsageUpperThreshold'))
def get_Storage(self): # Struct
return self.get_query_params().get('Storage')
def set_Storage(self, Storage): # Struct
if Storage.get('Upgrade') is not None:
self.add_query_param('Storage.Upgrade', Storage.get('Upgrade'))
if Storage.get('Apply') is not None:
self.add_query_param('Storage.Apply', Storage.get('Apply'))
if Storage.get('MaxStorage') is not None:
self.add_query_param('Storage.MaxStorage', Storage.get('MaxStorage'))
if Storage.get('DiskUsageUpperThreshold') is not None:
self.add_query_param('Storage.DiskUsageUpperThreshold', Storage.get('DiskUsageUpperThreshold'))
def get_Spec(self): # Struct
return self.get_query_params().get('Spec')
def set_Spec(self, Spec): # Struct
if Spec.get('ObservationWindowSize') is not None:
self.add_query_param('Spec.ObservationWindowSize', Spec.get('ObservationWindowSize'))
if Spec.get('MaxSpec') is not None:
self.add_query_param('Spec.MaxSpec', Spec.get('MaxSpec'))
if Spec.get('Upgrade') is not None:
self.add_query_param('Spec.Upgrade', Spec.get('Upgrade'))
if Spec.get('Apply') is not None:
self.add_query_param('Spec.Apply', Spec.get('Apply'))
if Spec.get('MemUsageUpperThreshold') is not None:
self.add_query_param('Spec.MemUsageUpperThreshold', Spec.get('MemUsageUpperThreshold'))
if Spec.get('CoolDownTime') is not None:
self.add_query_param('Spec.CoolDownTime', Spec.get('CoolDownTime'))
if Spec.get('CpuUsageUpperThreshold') is not None:
self.add_query_param('Spec.CpuUsageUpperThreshold', Spec.get('CpuUsageUpperThreshold'))
if Spec.get('MaxReadOnlyNodes') is not None:
self.add_query_param('Spec.MaxReadOnlyNodes', Spec.get('MaxReadOnlyNodes'))
if Spec.get('Downgrade') is not None:
self.add_query_param('Spec.Downgrade', Spec.get('Downgrade'))
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_Shard(self): # Struct
return self.get_query_params().get('Shard')
def set_Shard(self, Shard): # Struct
if Shard.get('Upgrade') is not None:
self.add_query_param('Shard.Upgrade', Shard.get('Upgrade'))
if Shard.get('Apply') is not None:
self.add_query_param('Shard.Apply', Shard.get('Apply'))
if Shard.get('MemUsageUpperThreshold') is not None:
self.add_query_param('Shard.MemUsageUpperThreshold', Shard.get('MemUsageUpperThreshold'))
if Shard.get('MinShards') is not None:
self.add_query_param('Shard.MinShards', Shard.get('MinShards'))
if Shard.get('UpgradeObservationWindowSize') is not None:
self.add_query_param('Shard.UpgradeObservationWindowSize', Shard.get('UpgradeObservationWindowSize'))
if Shard.get('DowngradeObservationWindowSize') is not None:
self.add_query_param('Shard.DowngradeObservationWindowSize', Shard.get('DowngradeObservationWindowSize'))
if Shard.get('MemUsageLowerThreshold') is not None:
self.add_query_param('Shard.MemUsageLowerThreshold', Shard.get('MemUsageLowerThreshold'))
if Shard.get('MaxShards') is not None:
self.add_query_param('Shard.MaxShards', Shard.get('MaxShards'))
if Shard.get('Downgrade') is not None:
self.add_query_param('Shard.Downgrade', Shard.get('Downgrade')) | null |
571 | # Copyright (c) 2017-2022 The Molecular Sciences Software Institute, Virginia Tech
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Tests for the BSE main API
"""
import random
import pytest
import basis_set_exchange as bse
from .common_testvars import *
# Use random for getting sets of elements
random.seed(rand_seed, version=2)
# To test role lookup
# yapf: disable
role_tests = [('cc-pvdz', 'rifit', 'cc-pvdz-rifit'),
('def2-tzvp', 'jfit', 'def2-universal-jfit'),
('aug-cc-pv5z', 'jkfit', 'cc-pv5z-jkfit'),
('aug-cc-pv5z', 'jkfit', 'cc-pv5z-jkfit'),
('aug-pcseg-1', 'admmfit', 'aug-admm-1')]
# yapf: enable
@pytest.mark.parametrize('basis_name, basis_ver', bs_names_vers)
def test_get_basis_1(basis_name, basis_ver):
"""For all versions of basis sets, test a simple get_basis
"""
bse.get_basis(basis_name, version=basis_ver)
@pytest.mark.parametrize('basis_name', bs_names)
def test_get_basis_2(basis_name):
"""For all versions of basis sets, test a simple get_basis
with different element selections
"""
this_metadata = bs_metadata[basis_name]
latest = this_metadata['latest_version']
avail_elements = this_metadata['versions'][latest]['elements']
nelements = random.randint(1, len(avail_elements))
selected_elements = random.sample(avail_elements, nelements)
# Change some selected elements to strings
for idx in range(len(selected_elements)):
if idx % 3 == 1:
selected_elements[idx] = bse.lut.element_sym_from_Z(selected_elements[idx])
elif idx % 3 == 2:
selected_elements[idx] = str(selected_elements[idx])
bs = bse.get_basis(basis_name, elements=selected_elements)
assert len(bs['elements']) == len(selected_elements)
# Try to get as an integer
bs = bse.get_basis(basis_name, elements=int(selected_elements[0]))
assert len(bs['elements']) == 1
@pytest.mark.parametrize('basis_name', bs_names_sample)
@pytest.mark.parametrize('bool_opts', bool_matrix(5))
def test_get_basis_3(basis_name, bool_opts):
"""For a sample of basis sets, test different options
"""
bse.get_basis(basis_name,
uncontract_general=bool_opts[0],
uncontract_segmented=bool_opts[1],
uncontract_spdf=bool_opts[2],
make_general=bool_opts[3],
optimize_general=bool_opts[4])
@pytest.mark.parametrize('basis_name', bs_names_sample)
@pytest.mark.parametrize('fmt', bs_write_formats_ecp)
def test_get_basis_4(basis_name, fmt):
"""For a sample of basis sets, test getting different formats
of the latest version
Tests writers that are capable of ECP
"""
# bit of a hack
# crystal can't handle > g projectors
if fmt == 'crystal' and basis_name == 'def2-tzvp':
return
bse.get_basis(basis_name, fmt=fmt)
@pytest.mark.parametrize('basis_name', bs_names_sample_noecp)
@pytest.mark.parametrize('fmt', bs_write_formats_noecp)
def test_get_basis_4_noecp(basis_name, fmt):
"""For a sample of basis sets, test getting different formats
of the latest version
Tests writers that are NOT capable of ECP
"""
# bit of a hack - velox doesn't support cartesian
if fmt == 'veloxchem' and basis_name.startswith("6-31"):
return
bse.get_basis(basis_name, fmt=fmt)
@pytest.mark.parametrize('basis_name', bs_names)
@pytest.mark.parametrize('fmt', ref_formats)
def test_get_references_1(basis_name, fmt):
""" Tests getting references for all basis sets
Also test getting references for a random selection of elements
"""
this_metadata = bs_metadata[basis_name]
for ver in this_metadata['versions'].keys():
bse.get_references(basis_name, fmt=fmt, version=ver)
avail_elements = this_metadata['versions'][ver]['elements']
nelements = random.randint(1, len(avail_elements))
selected_elements = random.sample(avail_elements, nelements)
bse.get_references(basis_name, elements=selected_elements, fmt=fmt, version=ver)
@pytest.mark.parametrize('primary_basis,role,expected', role_tests)
def test_lookup_by_role(primary_basis, role, expected):
"""Test looking up data by role
"""
bs = bse.lookup_basis_by_role(primary_basis, role)
assert bs.lower() == expected.lower()
@pytest.mark.parametrize('basis_name', bs_names)
def test_notes(basis_name):
"""Test getting family, family notes, and basis set notes
"""
bse.get_basis_notes(basis_name)
bse.has_basis_notes(basis_name)
@pytest.mark.parametrize('basis_name', bs_names)
def test_get_family(basis_name):
"""Test getting family"""
fam = bse.get_basis_family(basis_name)
assert fam in all_families
@pytest.mark.parametrize('family', all_families)
def test_family_notes(family):
"""Test getting family notes"""
bse.has_family_notes(family)
bse.get_family_notes(family)
# yapf: disable
@pytest.mark.parametrize('substr,family,role', [['def2', 'ahlrichs', 'orbital'],
['pVDz', None, None],
[None, None, 'jkfit'],
[None, 'pople', None]])
# yapf: enable
def test_filter(substr, family, role):
"""Test filtering basis set"""
md = bse.filter_basis_sets(substr, family, role)
assert len(md) > 0
# yapf: disable
@pytest.mark.parametrize('substr,family,role', [['def2', 'ahlrichs', 'jkfit'],
['qqqqq', None, None],
['6-31', None, 'admmfit']])
# yapf: enable
def METHOD_NAME(substr, family, role):
"""Test filtering basis set (returning zero results)"""
md = bse.filter_basis_sets(substr, family, role)
assert len(md) == 0
# yapf: disable
@pytest.mark.parametrize('fmts', [None,
['gto_spherical', 'scalar_ecp'],
['CARTESIAN_gto']])
# yapf: enable
def test_get_formats(fmts):
'''Test the get_formats function'''
ret = bse.get_formats(fmts)
# JSON is always supported
assert len(ret) > 1
def test_get_reader_formats():
'''Test the get_reader_formats function'''
bse.get_reference_formats() | null |
572 | import logging
import os
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GLib
class KlippyFiles:
def __init__(self, screen):
self._screen = screen
self.callbacks = []
self.files = {}
self.filelist = []
self.gcodes_path = None
def METHOD_NAME(self):
if "virtual_sdcard" in self._screen.printer.get_config_section_list():
vsd = self._screen.printer.get_config_section("virtual_sdcard")
if "path" in vsd:
self.gcodes_path = os.path.expanduser(vsd['path'])
logging.info(f"Gcodes path: {self.gcodes_path}")
def reset(self):
self._screen = None
self.callbacks = None
self.files = None
self.filelist = None
self.gcodes_path = None
def _callback(self, result, method, params):
if method == "server.files.list":
if "result" in result and isinstance(result['result'], list):
newfiles = []
deletedfiles = self.filelist.copy()
for item in result['result']:
file = item['filename'] if "filename" in item else item['path']
if file in self.files:
deletedfiles.remove(file)
else:
newfiles.append(file)
self.add_file(item, False)
if newfiles or len(deletedfiles) > 0:
self.run_callbacks(newfiles, deletedfiles)
if len(deletedfiles) > 0:
for file in deletedfiles:
self.remove_file(file)
elif method == "server.files.directory":
if "result" in result:
directory = params['path'][7:] if params['path'].startswith('gcodes/') else params['path']
if directory[-1] == '/':
directory = directory[:-1]
newfiles = []
for file in result['result']['files']:
fullpath = f"{directory}/{file['filename']}"
if fullpath not in self.filelist:
newfiles.append(fullpath)
if newfiles:
self.run_callbacks(newfiles)
elif method == "server.files.metadata":
if "error" in result.keys():
logging.debug(f"Error in getting metadata for {params['filename']}. Retrying in 6 seconds")
return
for x in result['result']:
self.files[params['filename']][x] = result['result'][x]
if "thumbnails" in self.files[params['filename']]:
self.files[params['filename']]['thumbnails'].sort(key=lambda y: y['size'], reverse=True)
for thumbnail in self.files[params['filename']]['thumbnails']:
thumbnail['local'] = False
if self.gcodes_path is not None:
fpath = os.path.join(self.gcodes_path, params['filename'])
fdir = os.path.dirname(fpath)
path = os.path.join(fdir, thumbnail['relative_path'])
if os.access(path, os.R_OK):
thumbnail['local'] = True
thumbnail['path'] = path
if thumbnail['local'] is False:
fdir = os.path.dirname(params['filename'])
thumbnail['path'] = os.path.join(fdir, thumbnail['relative_path'])
self.run_callbacks(mods=[params['filename']])
def add_file(self, item, notify=True):
if 'filename' not in item and 'path' not in item:
logging.info(f"Error adding item, unknown filename or path: {item}")
return
filename = item['path'] if "path" in item else item['filename']
if filename in self.filelist:
logging.info(f"File already exists: {filename}")
self.request_metadata(filename)
args = None, None, [filename]
GLib.idle_add(self.run_callbacks, *args)
return
self.filelist.append(filename)
self.files[filename] = {
"size": item['size'],
"modified": item['modified']
}
self.request_metadata(filename)
if notify is True:
self.run_callbacks(newfiles=[filename])
def add_file_callback(self, callback):
try:
self.callbacks.append(callback)
except Exception as e:
logging.debug(f"Callback not found: {callback}:\n{e}")
def process_update(self, data):
if 'item' in data and data['item']['root'] != 'gcodes':
return
if data['action'] == "create_dir":
self._screen._ws.klippy.get_file_dir(f"gcodes/{data['item']['path']}", self._callback)
elif data['action'] == "create_file":
self.add_file(data['item'])
elif data['action'] == "delete_file":
self.remove_file(data['item']['path'])
elif data['action'] == "modify_file":
self.request_metadata(data['item']['path'])
elif data['action'] == "move_file":
self.add_file(data['item'], False)
self.remove_file(data['source_item']['path'], False)
self.run_callbacks(newfiles=[data['item']['path']], deletedfiles=[data['source_item']['path']])
return False
def remove_file_callback(self, callback):
if callback in self.callbacks:
self.callbacks.pop(self.callbacks.index(callback))
def file_exists(self, filename):
return filename in self.filelist
def file_metadata_exists(self, filename):
if self.file_exists(filename):
return "slicer" in self.files[filename]
return False
def get_thumbnail_location(self, filename, small=False):
if small and len(self.files[filename]['thumbnails']) > 1 \
and self.files[filename]['thumbnails'][0]['width'] > self.files[filename]['thumbnails'][1]['width']:
thumb = self.files[filename]['thumbnails'][1]
else:
thumb = self.files[filename]['thumbnails'][0]
if thumb['local'] is False:
return ['http', thumb['path']]
return ['file', thumb['path']]
def has_thumbnail(self, filename):
if filename not in self.files:
return False
return "thumbnails" in self.files[filename] and len(self.files[filename]) > 0
def request_metadata(self, filename):
if filename not in self.filelist:
return False
self._screen._ws.klippy.get_file_metadata(filename, self._callback)
def refresh_files(self):
self._screen._ws.klippy.get_file_list(self._callback)
return False
def remove_file(self, filename, notify=True):
if filename not in self.filelist:
return
self.filelist.remove(filename)
self.files.pop(filename, None)
if notify is True:
self.run_callbacks(deletedfiles=[filename])
def run_callbacks(self, newfiles=None, deletedfiles=None, mods=None):
if mods is None:
mods = []
if deletedfiles is None:
deletedfiles = []
if newfiles is None:
newfiles = []
if len(self.callbacks) <= 0:
return False
for cb in self.callbacks:
args = newfiles, deletedfiles, mods
GLib.idle_add(cb, *args)
return False
def get_file_list(self):
return self.filelist
def get_file_info(self, filename):
if filename not in self.files:
return {"path": None, "modified": 0, "size": 0}
return self.files[filename] | null |
573 | # Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
import tempfile
import time
import xml.etree.ElementTree as ET
from pipeline.api import PipelineAPI, TaskStatus
from pipeline.log import Logger
def get_int_run_param(env_var_name, default_value):
return int(os.getenv(env_var_name, default_value))
HCS_PROCESSING_TASK_NAME = 'HCS processing'
HCS_ACTIVE_PROCESSING_TIMEOUT_MIN = get_int_run_param('HCS_PARSING_ACTIVE_PROCESSING_TIMEOUT_MIN', 360)
HCS_CLOUD_FILES_SCHEMA = os.getenv('HCS_PARSING_CLOUD_FILES_SCHEMA', 's3')
HCS_PROCESSING_OUTPUT_FOLDER = os.getenv('HCS_PARSING_OUTPUT_FOLDER')
HCS_INDEX_FILE_NAME = os.getenv('HCS_PARSING_INDEX_FILE_NAME', 'Index.xml')
HCS_IMAGE_DIR_NAME = os.getenv('HCS_PARSING_IMAGE_DIR_NAME', 'Images')
def get_list_run_param(env_var_name, delimiter=','):
param_elements = os.getenv(env_var_name, '').split(delimiter)
return filter(lambda string: string is not None and len(string.strip()) > 0, param_elements)
def get_bool_run_param(env_var_name, default='false'):
return os.getenv(env_var_name, default) == 'true'
def log_run_success(message):
log_run_info(message, status=TaskStatus.SUCCESS)
def log_run_info(message, status=TaskStatus.RUNNING):
Logger.log_task_event(HCS_PROCESSING_TASK_NAME, message, status)
class HcsFileLogger:
def __init__(self, file_path):
self.file_path = file_path
def log_info(self, message, status=TaskStatus.RUNNING):
log_run_info('[{}] {}'.format(self.file_path, message), status)
class HcsParsingUtils:
@staticmethod
def extract_xml_schema(xml_info_root):
full_schema = xml_info_root.tag
return full_schema[:full_schema.rindex('}') + 1]
@staticmethod
def get_file_without_extension(file_path):
return os.path.splitext(file_path)[0]
@staticmethod
def get_basename_without_extension(file_path):
return HcsParsingUtils.get_file_without_extension(os.path.basename(file_path))
@staticmethod
def get_file_last_modification_time(file_path):
return int(os.stat(file_path).st_mtime)
@staticmethod
def extract_plate_from_hcs_xml(hcs_xml_info_root, hcs_schema_prefix=None):
if not hcs_schema_prefix:
hcs_schema_prefix = HcsParsingUtils.extract_xml_schema(hcs_xml_info_root)
plates_list = hcs_xml_info_root.find(hcs_schema_prefix + 'Plates')
plate = plates_list.find(hcs_schema_prefix + 'Plate')
return plate
@staticmethod
def build_preview_file_path(hcs_root_folder_path, with_id=False):
file_name = HcsParsingUtils.build_preview_file_name(hcs_root_folder_path)
if with_id:
file_name = file_name + '.' + hcs_root_folder_path.split('/')[-1]
preview_file_basename = HcsParsingUtils.replace_special_chars(file_name) + '.hcs'
parent_folder = HCS_PROCESSING_OUTPUT_FOLDER \
if HCS_PROCESSING_OUTPUT_FOLDER is not None \
else os.path.dirname(hcs_root_folder_path)
return os.path.join(parent_folder, preview_file_basename)
@staticmethod
def build_preview_file_name(hcs_root_folder_path):
index_file_abs_path = os.path.join(HcsParsingUtils.get_file_without_extension(hcs_root_folder_path),
HCS_IMAGE_DIR_NAME, HCS_INDEX_FILE_NAME)
hcs_xml_info_root = ET.parse(index_file_abs_path).getroot()
hcs_schema_prefix = HcsParsingUtils.extract_xml_schema(hcs_xml_info_root)
file_name = HcsParsingUtils.get_file_without_extension(hcs_root_folder_path)
name_xml_element = HcsParsingUtils.extract_plate_from_hcs_xml(hcs_xml_info_root, hcs_schema_prefix) \
.find(hcs_schema_prefix + 'Name')
if name_xml_element is not None:
file_pretty_name = name_xml_element.text
if file_pretty_name is not None:
file_name = file_pretty_name
return file_name
@staticmethod
def get_stat_active_file_name(hcs_img_path):
return HcsParsingUtils._get_service_file_name(hcs_img_path, 'hcsparser.inprog')
@staticmethod
def get_stat_file_name(hcs_img_path):
return HcsParsingUtils._get_service_file_name(hcs_img_path, 'hcsparser')
@staticmethod
def get_service_directory(hcs_img_path):
name_without_extension = HcsParsingUtils.get_basename_without_extension(hcs_img_path)
parent_dir = HCS_PROCESSING_OUTPUT_FOLDER \
if HCS_PROCESSING_OUTPUT_FOLDER is not None \
else os.path.dirname(hcs_img_path)
return os.path.join(parent_dir, '.hcsparser', name_without_extension)
@staticmethod
def generate_local_service_directory(hcs_img_path):
name_without_extension = HcsParsingUtils.get_basename_without_extension(hcs_img_path)
return tempfile.mkdtemp(prefix=name_without_extension + '.hcsparser.')
@staticmethod
def create_service_dir_if_not_exist(hcs_img_path):
directory = HcsParsingUtils.get_service_directory(hcs_img_path)
if not os.path.exists(directory):
os.makedirs(directory)
@staticmethod
def _get_service_file_name(hcs_img_path, suffix):
parent_dir = HcsParsingUtils.get_service_directory(hcs_img_path)
parser_flag_file = '.stat.{}'.format(suffix)
return os.path.join(parent_dir, parser_flag_file)
@staticmethod
def METHOD_NAME(active_stat_file):
processing_stat_file_modification_date = HcsParsingUtils.get_file_last_modification_time(active_stat_file)
processing_deadline = datetime.datetime.now() - datetime.timedelta(minutes=HCS_ACTIVE_PROCESSING_TIMEOUT_MIN)
return (processing_stat_file_modification_date - time.mktime(processing_deadline.timetuple())) < 0
@staticmethod
def extract_cloud_path(file_path, cloud_scheme=HCS_CLOUD_FILES_SCHEMA):
path_chunks = file_path.split('/cloud-data/', 1)
if len(path_chunks) != 2:
raise RuntimeError('Unable to determine cloud path of [{}]'.format(file_path))
return '{}://{}'.format(cloud_scheme, path_chunks[1])
@staticmethod
def quote_string(string):
return '"{}"'.format(string)
@staticmethod
def replace_special_chars(file_path):
return file_path.replace('/', '|')
@staticmethod
def find_in_xml(element, name):
if element is None:
return None
else:
return element.find(name)
@staticmethod
def find_all_in_xml(element, name):
if element is None:
return []
else:
return element.findall(name)
@staticmethod
def get_hcs_image_folder():
return HCS_IMAGE_DIR_NAME | null |
574 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class ModifySecurityGroupRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'ModifySecurityGroupRule','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_NicType(self): # String
return self.get_query_params().get('NicType')
def set_NicType(self, NicType): # String
self.add_query_param('NicType', NicType)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def METHOD_NAME(self): # String
return self.get_query_params().get('SourcePrefixListId')
def set_SourcePrefixListId(self, SourcePrefixListId): # String
self.add_query_param('SourcePrefixListId', SourcePrefixListId)
def get_SourcePortRange(self): # String
return self.get_query_params().get('SourcePortRange')
def set_SourcePortRange(self, SourcePortRange): # String
self.add_query_param('SourcePortRange', SourcePortRange)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_SourceGroupOwnerId(self): # Long
return self.get_query_params().get('SourceGroupOwnerId')
def set_SourceGroupOwnerId(self, SourceGroupOwnerId): # Long
self.add_query_param('SourceGroupOwnerId', SourceGroupOwnerId)
def get_SourceGroupOwnerAccount(self): # String
return self.get_query_params().get('SourceGroupOwnerAccount')
def set_SourceGroupOwnerAccount(self, SourceGroupOwnerAccount): # String
self.add_query_param('SourceGroupOwnerAccount', SourceGroupOwnerAccount)
def get_Policy(self): # String
return self.get_query_params().get('Policy')
def set_Policy(self, Policy): # String
self.add_query_param('Policy', Policy)
def get_Ipv6SourceCidrIp(self): # String
return self.get_query_params().get('Ipv6SourceCidrIp')
def set_Ipv6SourceCidrIp(self, Ipv6SourceCidrIp): # String
self.add_query_param('Ipv6SourceCidrIp', Ipv6SourceCidrIp)
def get_Ipv6DestCidrIp(self): # String
return self.get_query_params().get('Ipv6DestCidrIp')
def set_Ipv6DestCidrIp(self, Ipv6DestCidrIp): # String
self.add_query_param('Ipv6DestCidrIp', Ipv6DestCidrIp)
def get_PortRange(self): # String
return self.get_query_params().get('PortRange')
def set_PortRange(self, PortRange): # String
self.add_query_param('PortRange', PortRange)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_IpProtocol(self): # String
return self.get_query_params().get('IpProtocol')
def set_IpProtocol(self, IpProtocol): # String
self.add_query_param('IpProtocol', IpProtocol)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_SourceCidrIp(self): # String
return self.get_query_params().get('SourceCidrIp')
def set_SourceCidrIp(self, SourceCidrIp): # String
self.add_query_param('SourceCidrIp', SourceCidrIp)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Priority(self): # String
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # String
self.add_query_param('Priority', Priority)
def get_DestCidrIp(self): # String
return self.get_query_params().get('DestCidrIp')
def set_DestCidrIp(self, DestCidrIp): # String
self.add_query_param('DestCidrIp', DestCidrIp)
def get_SourceGroupId(self): # String
return self.get_query_params().get('SourceGroupId')
def set_SourceGroupId(self, SourceGroupId): # String
self.add_query_param('SourceGroupId', SourceGroupId)
def get_SecurityGroupRuleId(self): # String
return self.get_query_params().get('SecurityGroupRuleId')
def set_SecurityGroupRuleId(self, SecurityGroupRuleId): # String
self.add_query_param('SecurityGroupRuleId', SecurityGroupRuleId) | null |
575 | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Callable, Dict, List, Tuple, Type
if TYPE_CHECKING:
from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
logger = logging.getLogger(__name__)
def aws_batch_batch_system_factory():
from toil.batchSystems.awsBatch import AWSBatchBatchSystem
return AWSBatchBatchSystem
def gridengine_batch_system_factory():
from toil.batchSystems.gridengine import GridEngineBatchSystem
return GridEngineBatchSystem
def parasol_batch_system_factory():
from toil.batchSystems.parasol import ParasolBatchSystem
return ParasolBatchSystem
def METHOD_NAME():
from toil.batchSystems.lsf import LSFBatchSystem
return LSFBatchSystem
def single_machine_batch_system_factory():
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
return SingleMachineBatchSystem
def mesos_batch_system_factory():
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
return MesosBatchSystem
def slurm_batch_system_factory():
from toil.batchSystems.slurm import SlurmBatchSystem
return SlurmBatchSystem
def tes_batch_system_factory():
from toil.batchSystems.tes import TESBatchSystem
return TESBatchSystem
def torque_batch_system_factory():
from toil.batchSystems.torque import TorqueBatchSystem
return TorqueBatchSystem
def htcondor_batch_system_factory():
from toil.batchSystems.htcondor import HTCondorBatchSystem
return HTCondorBatchSystem
def kubernetes_batch_system_factory():
from toil.batchSystems.kubernetes import KubernetesBatchSystem
return KubernetesBatchSystem
BATCH_SYSTEM_FACTORY_REGISTRY: Dict[str, Callable[[], Type["AbstractBatchSystem"]]] = {
'aws_batch' : aws_batch_batch_system_factory,
'parasol' : parasol_batch_system_factory,
'single_machine' : single_machine_batch_system_factory,
'grid_engine' : gridengine_batch_system_factory,
'lsf' : METHOD_NAME,
'mesos' : mesos_batch_system_factory,
'slurm' : slurm_batch_system_factory,
'tes' : tes_batch_system_factory,
'torque' : torque_batch_system_factory,
'htcondor' : htcondor_batch_system_factory,
'kubernetes' : kubernetes_batch_system_factory
}
BATCH_SYSTEMS = list(BATCH_SYSTEM_FACTORY_REGISTRY.keys())
DEFAULT_BATCH_SYSTEM = 'single_machine'
def addBatchSystemFactory(key: str, batchSystemFactory: Callable[[], Type['AbstractBatchSystem']]):
"""
Adds a batch system to the registry for workflow-supplied batch systems.
"""
BATCH_SYSTEMS.append(key)
BATCH_SYSTEM_FACTORY_REGISTRY[key] = batchSystemFactory
# We need a snapshot save/restore system for testing. We can't just tamper with
# the globals because module-level globals are their own references, so we
# can't touch this module's global name bindings from a client module.
def save_batch_system_plugin_state() -> Tuple[List[str], Dict[str, Callable[[], Type['AbstractBatchSystem']]]]:
"""
Return a snapshot of the plugin registry that can be restored to remove
added plugins. Useful for testing the plugin system in-process with other
tests.
"""
snapshot = (list(BATCH_SYSTEMS), dict(BATCH_SYSTEM_FACTORY_REGISTRY))
return snapshot
def restore_batch_system_plugin_state(snapshot: Tuple[List[str], Dict[str, Callable[[], Type['AbstractBatchSystem']]]]):
"""
Restore the batch system registry state to a snapshot from
save_batch_system_plugin_state().
"""
# We need to apply the snapshot without rebinding the names, because that
# won't affect modules that imported the names.
wanted_batch_systems, wanted_registry = snapshot
BATCH_SYSTEMS.clear()
BATCH_SYSTEMS.extend(wanted_batch_systems)
BATCH_SYSTEM_FACTORY_REGISTRY.clear()
BATCH_SYSTEM_FACTORY_REGISTRY.update(wanted_registry) | null |
576 | from galaxy import model
from galaxy.util.unittest import TestCase
from galaxy.workflow import extract
UNDEFINED_JOB = object()
class TestWorkflowExtractSummary(TestCase):
def setUp(self):
self.history = MockHistory()
self.trans = MockTrans(self.history)
def test_empty_history(self):
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert not job_dict
def test_summarize_returns_name_and_dataset_list(self):
# Create two jobs and three datasets, test they are groupped
# by job correctly with correct output names.
hda1 = MockHda()
self.history.active_datasets.append(hda1)
hda2 = MockHda(job=hda1.job, output_name="out2")
self.history.active_datasets.append(hda2)
hda3 = MockHda(output_name="out3")
self.history.active_datasets.append(hda3)
job_dict, warnings = extract.summarize(trans=self.trans)
assert len(job_dict) == 2
assert not warnings
assert job_dict[hda1.job] == [("out1", hda1), ("out2", hda2)]
assert job_dict[hda3.job] == [("out3", hda3)]
def METHOD_NAME(self):
hda = MockHda()
derived_hda_1 = MockHda()
derived_hda_1.copied_from_history_dataset_association = hda
derived_hda_2 = MockHda()
derived_hda_2.copied_from_history_dataset_association = derived_hda_1
self.history.active_datasets.append(derived_hda_2)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
assert job_dict[hda.job] == [("out1", derived_hda_2)]
def test_fake_job_hda(self):
"""Fakes job if creating_job_associations is empty."""
hda = MockHda(job=UNDEFINED_JOB)
self.history.active_datasets.append(hda)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
fake_job = next(iter(job_dict.keys()))
assert fake_job.id.startswith("fake_")
datasets = next(iter(job_dict.values()))
assert datasets == [(None, hda)]
def test_fake_job_hda_name_guess(self):
hda_from_history = MockHda(job=UNDEFINED_JOB)
hda_from_history.copied_from_history_dataset_association = MockHda(job=UNDEFINED_JOB)
self.history.active_datasets.append(hda_from_history)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
fake_job = next(iter(job_dict.keys()))
assert "History" in fake_job.name
self.history.active_datasets.remove(hda_from_history)
hda_from_library = MockHda(job=UNDEFINED_JOB)
hda_from_library.copied_from_library_dataset_dataset_association = MockHda(job=UNDEFINED_JOB)
self.history.active_datasets.append(hda_from_library)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
fake_job = next(iter(job_dict.keys()))
assert "Library" in fake_job.name
def test_fake_job_hdca(self):
hdca = MockHdca()
self.history.active_datasets.append(hdca)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
fake_job = next(iter(job_dict.keys()))
assert fake_job.id.startswith("fake_")
assert fake_job.is_fake
content_instances = next(iter(job_dict.values()))
assert content_instances == [(None, hdca)]
def test_implicit_map_job_hdca(self):
creating_job = model.Job()
hdca = MockHdca(implicit_output_name="out1", job=creating_job)
self.history.active_datasets.append(hdca)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
job = next(iter(job_dict.keys()))
assert job is creating_job
def test_warns_and_skips_datasets_if_not_finished(self):
hda = MockHda(state="queued")
self.history.active_datasets.append(hda)
job_dict, warnings = extract.summarize(trans=self.trans)
assert warnings
assert len(job_dict) == 0
class MockJobToOutputDatasetAssociation:
job = None
def __init__(self, name, dataset):
self.name = name
self.dataset = dataset
class MockHistory:
def __init__(self):
self.active_datasets = []
@property
def active_contents(self):
return self.active_datasets
class MockTrans:
def __init__(self, history):
self.history = history
def get_history(self):
return self.history
class MockHda:
def __init__(self, state="ok", output_name="out1", job=None):
self.hid = 1
self.id = 123
self.state = state
self.copied_from_history_dataset_association = None
self.copied_from_library_dataset_dataset_association = None
self.history_content_type = "dataset"
if job is not UNDEFINED_JOB:
if not job:
job = model.Job()
self.job = job
assoc = MockJobToOutputDatasetAssociation(output_name, self)
assoc.job = job
self.creating_job_associations = [assoc]
else:
self.creating_job_associations = []
class MockHdca:
def __init__(self, implicit_output_name=None, job=None, hid=1):
self.id = 124
self.copied_from_history_dataset_collection_association = None
self.history_content_type = "dataset_collection"
self.implicit_output_name = implicit_output_name
self.hid = 1
self.collection = model.DatasetCollection()
self.creating_job_associations = []
element = model.DatasetCollectionElement(
collection=self.collection,
element=model.HistoryDatasetAssociation(),
element_index=0,
element_identifier="moocow",
)
element.dataset_instance.dataset = model.Dataset()
element.dataset_instance.dataset.state = "ok"
creating = model.JobToOutputDatasetAssociation(
implicit_output_name,
element.dataset_instance,
)
creating.job = job
element.dataset_instance.creating_job_associations = [
creating,
] | null |
577 | # Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intern.service.dvid import DVIDService
import requests
import numpy as np
import json
import ast
class VersioningService(DVIDService):
""" VersioningService for DVID service.
"""
def __init__(self, base_url):
""" Constructor.
Args:
base_url (str): Base url (host) of project service.
Raises:
(KeyError): if given invalid version.
"""
DVIDService.__init__(self)
self.base_url = base_url
def merge(self, UUID, parents, mergeType, note):
""" Creates a conflict-free merge of a set of committed parent UUIDs into a child. Note
the merge will not necessarily create an error immediately
Args:
mergeType (str) = "conflict-free"
parents (array) = [ "parent-uuid1", "parent-uuid2", ... ]
note (str) = this is a description of what I did on this commit
Returns:
merge_child_uuid (str): child generated uuid after merge
Raises:
HTTPError: On non 200 status code
"""
merge_resp = requests.post(
"{}/api/repo/{}/merge".format(self.base_url, UUID),
json={"mergeType": mergeType, "parents": parents, "note": note},
)
if merge_resp.status_code != 200:
raise requests.HTTPError(merge_resp.content)
merge_child_uuid = merge_resp.json()["child"]
return merge_child_uuid
def resolve(self, UUID, data, parents, note):
""" Forces a merge of a set of committed parent UUIDs into a child by specifying a
UUID order that establishes priorities in case of conflicts
Args:
data (array) = [ "instance-name-1", "instance-name2", ... ],
parents (array): [ "parent-uuid1", "parent-uuid2", ... ],
note (str): this is a description of what I did on this commit
Returns:
resolve_child_uuid (str): child generated uuid after resolution
Raises:
HTTPError: On non 200 status code
"""
resolve_resp = requests.post(
"{}/api/repo/{}/resolve".format(self.base_url, UUID),
json={"data": data, "parents": parents, "note": note},
)
if resolve_resp.status_code != 200:
raise requests.HTTPError(resolve_resp.content)
resolve_child_uuid = resolve_resp.json()["child"]
return resolve_child_uuid
def get_log(self, UUID):
"""The log is a list of strings that will be appended to the repo's log. They should be
descriptions for the entire repo and not just one node.
Args:
UUID (str): UUID of the DVID repository
Returns:
str: list of all log recordings related to the DVID repository
Raises:
(ValueError): if given invalid UUID.
"""
if UUID == "":
raise ValueError("The UUID was not specified")
else:
log_resp = requests.get("{}/api/node/{}/log".format(self.base_url, UUID))
if log_resp.status_code != 200:
raise requests.HTTPError(log_resp.content)
log_m = log_resp.content
return log_m
def post_log(self, UUID, log_m):
"""Allows the user to write a short description of the content in the repository
{ "log": [ "provenance data...", "provenance data...", ...] }
Args:
UUID (str): UUID of the DVID repository (str)
log_m (str): Message to record on the repositories history log (str)
Returns:
HTTP Response
Raises:
(ValueError): if given invalid UUID or log.
"""
if UUID == "":
raise ValueError("The UUID was not specified")
elif log_m == "":
raise ValueError("Your log submission cannot be empty")
else:
log_resp = requests.post(
"{}/api/node/{}/log".format(self.base_url, UUID), json={"log": [log_m]}
)
if log_resp.status_code != 200:
raise requests.HTTPError(log_resp.content)
return log_resp
def commit(self, UUID, note="", log_m=""):
"""Allows the user to write a short description of the content in the repository
Args:
UUID (str): UUID of the DVID repository
note (str): human-readable commit message
log_m (str): Message to record on the repositories history log
Returns:
commit_uuid (str): commit hash
Raises:
(ValueError): if given invalid UUID.
"""
if UUID == "":
raise ValueError("The UUID was not specified")
else:
committed = requests.post(
"{}/api/node/{}/commit".format(self.base_url, UUID),
json={"note": note, "log": [log_m]},
)
if committed.status_code != 200:
raise requests.HTTPError(committed.content)
commit_uuid = committed.json()["committed"]
return commit_uuid
def METHOD_NAME(self, UUID, note=""):
"""Allows the user to write a short description of the content in the repository
Args:
UUID (str): UUID of the DVID repository
note (str): Message to record when branching
Returns:
branch_uuid (str): The child branch UUID
Raises:
(KeyError): if given invalid version.
"""
if UUID == "":
raise ValueError("The UUID was not specified")
else:
METHOD_NAME = requests.post(
"{}/api/node/{}/branch".format(self.base_url, UUID), json={"note": note}
)
if METHOD_NAME.status_code != 200:
raise requests.HTTPError(METHOD_NAME.content)
branch_uuid = METHOD_NAME.json()["child"]
return branch_uuid | null |
578 | """Tests for the natural numbers range data type"""
from typing import *
import pytest
from hypothesis import Phase, given, settings, strategies as st
from looper.utils import NatIntervalException, NatIntervalInclusive
gen_pos_int = st.integers(min_value=1)
gen_opt_int = st.one_of(st.integers(), st.none())
def is_non_pos(opt_int: Optional[int]) -> bool:
"""Determine whether the given value is non-positive (and non-null)."""
return opt_int is not None and opt_int < 1
def pytest_generate_tests(metafunc):
if "legit_delim" in metafunc.fixturenames:
metafunc.parametrize("legit_delim", [":", "-"])
def nondecreasing_pair_strategy(**kwargs):
"""Generate a pair of values in which first respects given upper bound and second is no more than first."""
return st.tuples(st.integers(**kwargs), st.integers(**kwargs)).filter(
lambda p: p[0] <= p[1]
)
class NaturalRangePureConstructorTests:
"""Tests for direct use of natural range primary constructor"""
@given(upper_bound=gen_pos_int)
def test_zero_is_prohibited(self, upper_bound):
"""Separate this case since it's an edge case."""
with pytest.raises(NatIntervalException):
NatIntervalInclusive(0, upper_bound)
@given(bounds=nondecreasing_pair_strategy(max_value=0))
def test_non_positive_is_prohibited(self, bounds):
lo, hi = bounds
with pytest.raises(NatIntervalException):
NatIntervalInclusive(lo, hi)
@given(bounds=st.tuples(st.integers(), st.integers()).filter(lambda p: p[0] > p[1]))
def test_upper_less_than_lower__fails_as_expected(self, bounds):
lo, hi = bounds
with pytest.raises(NatIntervalException):
NatIntervalInclusive(lo, hi)
class NaturalRangeFromStringTests:
"""Tests for parsing of natural number range from text, like CLI arg"""
@pytest.mark.parametrize(
"arg_template", ["0{sep}0", "{sep}0", "0{sep}", "0{sep}0", "{sep}0", "0{sep}"]
)
@given(upper_bound=gen_pos_int)
def test_zero__does_not_parse(self, arg_template, legit_delim, upper_bound):
arg = arg_template.format(sep=legit_delim)
with pytest.raises(NatIntervalException):
NatIntervalInclusive.from_string(arg, upper_bound=upper_bound)
@given(upper_bound=st.integers())
def test_just_delimiter__does_not_parse(self, legit_delim, upper_bound):
with pytest.raises(NatIntervalException):
NatIntervalInclusive.from_string(legit_delim, upper_bound=upper_bound)
@given(
lo_hi_upper=st.tuples(gen_opt_int, gen_opt_int, st.integers()).filter(
lambda t: (t[0] is not None or t[1] is not None)
and any(is_non_pos(n) for n in t)
)
)
def test_nonpositive_values__fail_with_expected_error(
self, lo_hi_upper, legit_delim
):
lo, hi, upper_bound = lo_hi_upper
if lo is None and hi is None:
raise ValueError("Both lower and upper bound generated are null.")
if lo is None:
arg = legit_delim + str(hi)
elif hi is None:
arg = str(lo) + legit_delim
else:
arg = str(lo) + legit_delim + str(hi)
with pytest.raises(NatIntervalException):
NatIntervalInclusive.from_string(arg, upper_bound=upper_bound)
@pytest.mark.parametrize("arg", ["1,2", "1;2", "1_2", "1/2", "1.2", "1~2"])
@given(upper_bound=st.integers(min_value=3))
def test_illegal_delimiter__fail_with_expected_error(self, arg, upper_bound):
with pytest.raises(NatIntervalException):
NatIntervalInclusive.from_string(arg, upper_bound=upper_bound)
@given(
lower_and_limit=st.tuples(st.integers(), st.integers()).filter(
lambda p: p[1] < p[0]
)
)
def METHOD_NAME(
self, lower_and_limit, legit_delim
):
lower, limit = lower_and_limit
arg = str(lower) + legit_delim
with pytest.raises(NatIntervalException):
NatIntervalInclusive.from_string(arg, upper_bound=limit)
@given(lower_and_upper=nondecreasing_pair_strategy(min_value=1))
def test_one_sided_lower_with_samples_gteq_bound__succeeds(
self, lower_and_upper, legit_delim
):
lo, upper_bound = lower_and_upper
exp = NatIntervalInclusive(lo, upper_bound)
arg = str(lo) + legit_delim
obs = NatIntervalInclusive.from_string(arg, upper_bound=upper_bound)
assert obs == exp
@given(upper_and_limit=nondecreasing_pair_strategy(min_value=1))
def test_one_sided_upper_with_samples_gteq_bound__succeeds(
self, upper_and_limit, legit_delim
):
upper, limit = upper_and_limit
exp = NatIntervalInclusive(1, upper)
arg = legit_delim + str(upper)
obs = NatIntervalInclusive.from_string(arg, upper_bound=limit)
assert obs == exp
@given(
upper_and_limit=st.tuples(
st.integers(min_value=1), st.integers(min_value=1)
).filter(lambda p: p[1] < p[0])
)
def test_one_sided_upper_with_samples_lt_bound__uses_bound(
self, upper_and_limit, legit_delim
):
upper, limit = upper_and_limit
exp = NatIntervalInclusive(1, limit)
arg = legit_delim + str(upper)
obs = NatIntervalInclusive.from_string(arg, upper_bound=limit)
assert obs == exp
@given(
lower_upper_limit=st.tuples(gen_pos_int, gen_pos_int, gen_pos_int).filter(
lambda t: t[1] < t[0] or t[2] < t[0]
)
)
def test_two_sided_parse_upper_lt_lower(self, lower_upper_limit, legit_delim):
lo, hi, lim = lower_upper_limit
arg = str(lo) + legit_delim + str(hi)
with pytest.raises(NatIntervalException):
NatIntervalInclusive.from_string(arg, upper_bound=lim)
@given(
lo_hi_limit=st.tuples(
st.integers(min_value=2), gen_pos_int, gen_pos_int
).filter(lambda t: t[2] < t[0] <= t[1])
)
def test_two_sided_parse_upper_gteq_lower_with_upper_limit_lt_lower(
self, lo_hi_limit, legit_delim
):
lo, hi, limit = lo_hi_limit
arg = str(lo) + legit_delim + str(hi)
with pytest.raises(NatIntervalException):
NatIntervalInclusive.from_string(arg, upper_bound=limit)
@given(
lo_hi_limit=st.tuples(gen_pos_int, gen_pos_int, gen_pos_int).filter(
lambda t: t[0] < t[2] < t[1]
)
)
def test_two_sided_parse_upper_gteq_lower_with_upper_limit_between_lower_and_upper(
self,
lo_hi_limit,
legit_delim,
):
lo, hi, limit = lo_hi_limit
exp = NatIntervalInclusive(lo, limit)
arg = str(lo) + legit_delim + str(hi)
obs = NatIntervalInclusive.from_string(arg, upper_bound=limit)
assert obs == exp
@given(
lo_hi_upper=st.tuples(gen_pos_int, gen_pos_int, gen_pos_int).filter(
lambda t: t[0] <= t[1] <= t[2]
)
)
def test_two_sided_parse_upper_gteq_lower_with_upper_limit_gteq_upper(
self, lo_hi_upper, legit_delim
):
lo, hi, upper_bound = lo_hi_upper
exp = NatIntervalInclusive(lo, hi)
arg = f"{str(lo)}{legit_delim}{str(hi)}"
obs = NatIntervalInclusive.from_string(arg, upper_bound=upper_bound)
assert obs == exp | null |
579 | import IMP
import IMP.test
import IMP.algebra
import IMP.core
import IMP.npc
class Tests(IMP.test.TestCase):
def evaluate_config(self, m, particles):
particles_by_type = {}
for particle_data in particles:
name, x, y = particle_data
p = IMP.Particle(m, name)
IMP.core.XYZ.setup_particle(p, IMP.algebra.Vector3D(x,y,0.))
particle_type = name[0]
if particle_type in particles_by_type:
particles_by_type[particle_type].append(p)
else:
particles_by_type[particle_type] = [p]
ps = IMP.core.DistancePairScore(IMP.core.Linear(0.0, 1.0))
r = IMP.npc.CompositeRestraint(m, ps)
r.set_maximum_score(10)
for val in particles_by_type.values():
r.add_type(val)
# Sort pairs so that outputs are consistent for assertions
pairs = r.get_connected_pairs()
pairs = sorted([sorted(p) for p in pairs])
pairs = [(m.get_particle_name(p[0]), m.get_particle_name(p[1]))
for p in pairs]
return r.evaluate(False), pairs
def test_composite_restraint_subset(self):
"""Make sure CompositeRestraint finds the right subset"""
m = IMP.Model()
# A2, B2, C2 should be pruned since they are far away, B1-A1-C1 returned
score, pairs = self.evaluate_config(m,
[('A1', 0,0),
('B1', 0,-1),
('C1', 1,0),
('A2', -10,-10),
('B2', 10,10),
('C2', 10,-10)])
self.assertAlmostEqual(score, 2.0, delta=1e-6)
self.assertEqual(pairs, [('A1', 'B1'), ('A1', 'C1')])
def test_composite_restraint_simple(self):
"""Check CompositeRestraint with simple systems, no copies"""
m = IMP.Model()
score, pairs = self.evaluate_config(m,
[('A1', 0,0),
('B1', 1,0),
('C1', 2,0)])
self.assertAlmostEqual(score, 2.0, delta=1e-6)
self.assertEqual(pairs, [('A1', 'B1'), ('B1', 'C1')])
def METHOD_NAME(self):
"""Multiple copies of a particle should be OK"""
m = IMP.Model()
score, pairs = self.evaluate_config(m,
[('A1', 0,0),
('B1', 1,0),
('B2', 2,0),
('C1', 3,0)])
self.assertAlmostEqual(score, 3.0, delta=1e-6)
self.assertEqual(pairs, [('A1', 'B1'), ('B1', 'B2'), ('B2', 'C1')])
def test_composite_restraint_minimum(self):
"""Make sure minimum subtree is returned"""
m = IMP.Model()
# Two possible subtrees: A1-B1-B2-C1 and C1-B3-A2; make sure we find
# the lower scoring one
score, pairs = self.evaluate_config(m,
[('A1', 0,0),
('B1', 1,0),
('B2', 2,0),
('C1', 3,0),
('B3', 4,0),
('A2', 5,0)])
self.assertAlmostEqual(score, 2.0, delta=1e-6)
self.assertEqual(pairs, [('C1', 'B3'), ('B3', 'A2')])
def test_composite_restraint_exceed_max_score_multiple(self):
"""Check handling of multiple copies with an edge above max_score"""
m = IMP.Model()
# We have OK A-B and B-C interactions, but the B-B distance is too large
score, pairs = self.evaluate_config(m,
[('A1', 0,0),
('B1', 1,0),
('B2', 30,0),
('C1', 31,0)])
self.assertAlmostEqual(score, 10.0, delta=1e-6)
self.assertEqual(pairs, [])
def test_composite_restraint_exceed_max_score(self):
"""Check failure with an edge above max_score"""
m = IMP.Model()
# Composite cannot be satisfied
score, pairs = self.evaluate_config(m,
[('A1', 0,0),
('B1', 1,0),
('C1', 31,0)])
self.assertAlmostEqual(score, 10.0, delta=1e-6)
self.assertEqual(pairs, [])
if __name__ == '__main__':
IMP.test.main() | null |
580 | from unittest.mock import AsyncMock
import pytest
import pytest_asyncio
from mock import patch
from db.errors import DuplicateEntity, EntityDoesNotExist
from db.repositories.shared_services import SharedServiceRepository
from db.repositories.operations import OperationRepository
from models.domain.shared_service import SharedService
from models.domain.resource import ResourceType
from models.schemas.shared_service import SharedServiceInCreate
pytestmark = pytest.mark.asyncio
SHARED_SERVICE_ID = "000000d3-82da-4bfc-b6e9-9a7853ef753e"
@pytest_asyncio.fixture
async def shared_service_repo():
with patch('db.repositories.base.BaseRepository._get_container', return_value=AsyncMock()):
with patch('azure.cosmos.CosmosClient') as cosmos_client_mock:
shared_service_repo = await SharedServiceRepository.create(cosmos_client_mock)
yield shared_service_repo
@pytest_asyncio.fixture
async def operations_repo():
with patch('db.repositories.base.BaseRepository._get_container', return_value=None):
with patch('azure.cosmos.CosmosClient') as cosmos_client_mock:
operations_repo = await OperationRepository.create(cosmos_client_mock)
yield operations_repo
@pytest.fixture
def shared_service():
shared_service = SharedService(
id=SHARED_SERVICE_ID,
templateVersion="0.1.0",
etag='',
properties={},
templateName="my-shared-service",
resourcePath="test"
)
return shared_service
@pytest.fixture
def basic_shared_service_request():
return SharedServiceInCreate(
templateName="my-shared-service",
properties={
"display_name": "test",
"description": "test",
"tre_id": "test"
})
async def test_get_shared_service_by_id_raises_if_does_not_exist(shared_service_repo):
shared_service_repo.query = AsyncMock(return_value=[])
with pytest.raises(EntityDoesNotExist):
await shared_service_repo.get_shared_service_by_id(SHARED_SERVICE_ID)
async def test_get_active_shared_services_for_shared_queries_db(shared_service_repo):
shared_service_repo.query = AsyncMock(return_value=[])
await shared_service_repo.get_active_shared_services()
shared_service_repo.query.assert_called_once_with(query=SharedServiceRepository.active_shared_services_query())
@patch('db.repositories.shared_services.SharedServiceRepository.validate_input_against_template')
@patch('core.config.TRE_ID', "1234")
async def METHOD_NAME(validate_input_mock, shared_service_repo, basic_shared_service_request, basic_shared_service_template):
shared_service_repo.query = AsyncMock(return_value=[])
shared_service_to_create = basic_shared_service_request
validate_input_mock.return_value = basic_shared_service_template
shared_service, _ = await shared_service_repo.create_shared_service_item(shared_service_to_create, [])
assert shared_service.templateName == basic_shared_service_request.templateName
assert shared_service.resourceType == ResourceType.SharedService
# We expect tre_id to be overriden in the shared service created
assert shared_service.properties["tre_id"] != shared_service_to_create.properties["tre_id"]
assert shared_service.properties["tre_id"] == "1234"
@patch('db.repositories.shared_services.SharedServiceRepository.validate_input_against_template')
@patch('core.config.TRE_ID', "1234")
async def test_create_shared_service_item_with_the_same_name_twice_fails(validate_input_mock, shared_service_repo, basic_shared_service_request, basic_shared_service_template):
shared_service_repo.query = AsyncMock(return_value=[])
validate_input_mock.return_value = basic_shared_service_template
shared_service, _ = await shared_service_repo.create_shared_service_item(basic_shared_service_request, [])
await shared_service_repo.save_item(shared_service)
shared_service_repo.query = AsyncMock()
shared_service_repo.query.return_value = [shared_service.__dict__]
with pytest.raises(DuplicateEntity):
shared_service = await shared_service_repo.create_shared_service_item(basic_shared_service_request, [])
@patch('db.repositories.shared_services.SharedServiceRepository.validate_input_against_template', side_effect=ValueError)
async def test_create_shared_item_raises_value_error_if_template_is_invalid(_, shared_service_repo, basic_shared_service_request):
shared_service_repo.query = AsyncMock(return_value=[])
shared_service_to_create = basic_shared_service_request
with pytest.raises(ValueError):
await shared_service_repo.create_shared_service_item(shared_service_to_create, []) | null |
581 | import asyncio
import hashlib
import hmac
import logging
import time
import uuid
from base64 import b64decode
from typing import Any, AsyncIterable, Dict, List, Optional
from zlib import MAX_WBITS, decompress
import signalr_aio
import ujson
from async_timeout import timeout
from hummingbot.connector.exchange.bittrex.bittrex_auth import BittrexAuth
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
BITTREX_WS_FEED = "https://socket-v3.bittrex.com/signalr"
MAX_RETRIES = 20
MESSAGE_TIMEOUT = 30.0
NaN = float("nan")
class BittrexAPIUserStreamDataSource(UserStreamTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
PING_TIMEOUT = 10.0
_btausds_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._btausds_logger is None:
cls._btausds_logger = logging.getLogger(__name__)
return cls._btausds_logger
def __init__(self, bittrex_auth: BittrexAuth, trading_pairs: Optional[List[str]] = []):
self._bittrex_auth: BittrexAuth = bittrex_auth
self._trading_pairs = trading_pairs
self._current_listen_key = None
self._listen_for_user_stream_task = None
self._last_recv_time: float = 0
self._websocket_connection: Optional[signalr_aio.Connection] = None
self._hub = None
super().__init__()
@property
def hub(self):
return self._hub
@hub.setter
def hub(self, value):
self._hub = value
@property
def last_recv_time(self) -> float:
return self._last_recv_time
async def _socket_user_stream(self, conn: signalr_aio.Connection) -> AsyncIterable[str]:
try:
while True:
async with timeout(MESSAGE_TIMEOUT):
msg = await conn.msg_queue.get()
self._last_recv_time = time.time()
yield msg
except asyncio.TimeoutError:
self.logger().warning("Message recv() timed out. Reconnecting to Bittrex SignalR WebSocket... ")
def _transform_raw_message(self, msg) -> Dict[str, Any]:
def _decode_message(raw_message: bytes) -> Dict[str, Any]:
try:
decode_msg: bytes = decompress(b64decode(raw_message, validate=True), -MAX_WBITS)
except SyntaxError:
decode_msg: bytes = decompress(b64decode(raw_message, validate=True))
except Exception:
self.logger().error("Error decoding message", exc_info=True)
return {"error": "Error decoding message"}
return ujson.loads(decode_msg.decode())
def _is_event_type(msg, event_name) -> bool:
return len(msg.get("M", [])) > 0 and type(msg["M"][0]) == dict and msg["M"][0].get("M", None) == event_name
def _is_heartbeat(msg):
return _is_event_type(msg, "heartbeat")
def _is_auth_notification(msg):
return _is_event_type(msg, "authenticationExpiring")
def _is_order_delta(msg) -> bool:
return _is_event_type(msg, "order")
def METHOD_NAME(msg) -> bool:
return _is_event_type(msg, "balance")
def _is_execution_event(msg) -> bool:
return _is_event_type(msg, "execution")
output: Dict[str, Any] = {"event_type": None, "content": None, "error": None}
msg: Dict[str, Any] = ujson.loads(msg)
if _is_auth_notification(msg):
output["event_type"] = "re-authenticate"
elif _is_heartbeat(msg):
output["event_type"] = "heartbeat"
elif METHOD_NAME(msg) or _is_order_delta(msg) or _is_execution_event(msg):
output["event_type"] = msg["M"][0]["M"]
output["content"] = _decode_message(msg["M"][0]["A"][0])
return output
async def listen_for_user_stream(self, output: asyncio.Queue):
while True:
try:
self._websocket_connection = signalr_aio.Connection(BITTREX_WS_FEED, session=None)
self.hub = self._websocket_connection.register_hub("c3")
await self.authenticate()
self.hub.server.invoke("Subscribe", ["heartbeat", "order", "balance", "execution"])
self._websocket_connection.start()
async for raw_message in self._socket_user_stream(self._websocket_connection):
decode: Dict[str, Any] = self._transform_raw_message(raw_message)
self.logger().debug(f"Got ws message {decode}")
if decode.get("error") is not None:
self.logger().error(decode["error"])
continue
content_type = decode.get("event_type")
if content_type is not None:
if content_type in ["balance", "order", "execution"]:
output.put_nowait(decode)
elif content_type == "re-authenticate":
await self.authenticate()
elif content_type == "heartbeat":
self.logger().debug("WS heartbeat")
continue
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error with Bittrex WebSocket connection. " "Retrying after 30 seconds...", exc_info=True
)
await asyncio.sleep(30.0)
async def authenticate(self):
self.logger().info("Authenticating...")
timestamp = int(round(time.time() * 1000))
randomized = str(uuid.uuid4())
challenge = f"{timestamp}{randomized}"
signed_challenge = hmac.new(self._bittrex_auth.secret_key.encode(), challenge.encode(), hashlib.sha512).hexdigest()
self.hub.server.invoke("Authenticate", self._bittrex_auth.api_key, timestamp, randomized, signed_challenge)
return | null |
582 | from unittest import TestCase
from pcs.lib import resource_agent as ra
class ResourceAgentName(TestCase):
def test_full_name_3_parts(self):
self.assertEqual(
ra.ResourceAgentName("standard", "provider", "type").full_name,
"standard:provider:type",
)
def test_full_name_2_parts_none(self):
self.assertEqual(
ra.ResourceAgentName("standard", None, "type").full_name,
"standard:type",
)
def test_full_name_2_parts_empty(self):
self.assertEqual(
ra.ResourceAgentName("standard", "", "type").full_name,
"standard:type",
)
def test_is_stonith_yes(self):
self.assertTrue(
ra.ResourceAgentName("stonith", "pacemaker", "Dummy").is_stonith
)
def test_is_stonith_no(self):
self.assertFalse(
ra.ResourceAgentName("lsb", None, "fence_xvm").is_stonith
)
def test_is_fake_pcmk_agent_yes(self):
self.assertTrue(
ra.ResourceAgentName(
ra.const.FAKE_AGENT_STANDARD, None, "Dummy"
).is_pcmk_fake_agent
)
def test_is_fake_pcmk_agent_no(self):
self.assertFalse(
ra.ResourceAgentName("pacemaker", None, "fenced").is_pcmk_fake_agent
)
def _fixture_metadata(name, actions):
return ra.ResourceAgentMetadata(
name,
agent_exists=True,
ocf_version=ra.const.OCF_1_0,
shortdesc=None,
longdesc=None,
parameters=[],
actions=actions,
)
def _fixture_action(name, automatic, on_target):
return ra.ResourceAgentAction(
name=name,
timeout=None,
interval=None,
role=None,
start_delay=None,
depth=None,
automatic=automatic,
on_target=on_target,
)
class ProvidesUnfencing(TestCase):
def METHOD_NAME(self):
self.assertFalse(
_fixture_metadata(
ra.ResourceAgentName("ocf", "pacemaker", "Dummy"),
[_fixture_action("on", True, True)],
).provides_unfencing
)
def test_not_automatic(self):
self.assertFalse(
_fixture_metadata(
ra.ResourceAgentName("stonith", None, "fence_xvm"),
[_fixture_action("on", False, True)],
).provides_unfencing
)
def test_not_on_target(self):
self.assertFalse(
_fixture_metadata(
ra.ResourceAgentName("stonith", None, "fence_xvm"),
[_fixture_action("on", True, False)],
).provides_unfencing
)
def test_not_action_on(self):
self.assertFalse(
_fixture_metadata(
ra.ResourceAgentName("stonith", None, "fence_xvm"),
[_fixture_action("off", True, True)],
).provides_unfencing
)
def test_true(self):
self.assertTrue(
_fixture_metadata(
ra.ResourceAgentName("stonith", None, "fence_xvm"),
[
_fixture_action("on", False, True),
_fixture_action("on", True, False),
_fixture_action("off", True, True),
_fixture_action("on", True, True),
],
).provides_unfencing
)
class ProvidesPromotability(TestCase):
def test_both_actions_missing(self):
self.assertFalse(
_fixture_metadata(
ra.ResourceAgentName("systemd", None, "pacemaker"),
[_fixture_action("on", False, False)],
).provides_promotability
)
def test_only_promote_action(self):
self.assertFalse(
_fixture_metadata(
ra.ResourceAgentName("ocf", "heartbeat", "Dummy"),
[
_fixture_action("off", False, False),
_fixture_action("promote", False, False),
],
).provides_promotability
)
def test_only_demote_action(self):
self.assertFalse(
_fixture_metadata(
ra.ResourceAgentName("ocf", "heartbeat", "Dummy"),
[
_fixture_action("off", False, False),
_fixture_action("monitor", False, False),
_fixture_action("demote", False, False),
],
).provides_promotability
)
def test_both_actions(self):
self.assertTrue(
_fixture_metadata(
ra.ResourceAgentName("ocf", "pacemaker", "Dummy"),
[
_fixture_action("on", False, False),
_fixture_action("off", False, False),
_fixture_action("monitor", False, False),
_fixture_action("demote", False, False),
_fixture_action("promote", False, False),
],
).provides_promotability
)
class UniqueParameterGroups(TestCase):
@staticmethod
def _fixture_metadata(parameters):
return ra.ResourceAgentMetadata(
ra.ResourceAgentName("ocf", "pacemaker", "Dummy"),
agent_exists=True,
ocf_version=ra.const.OCF_1_0,
shortdesc=None,
longdesc=None,
parameters=parameters,
actions=[],
)
@staticmethod
def _fixture_parameter(name, unique_group):
return ra.ResourceAgentParameter(
name,
shortdesc=None,
longdesc=None,
type="string",
default=None,
enum_values=None,
required=False,
advanced=False,
deprecated=False,
deprecated_by=[],
deprecated_desc=None,
unique_group=unique_group,
reloadable=False,
)
def test_no_groups(self):
self.assertEqual(
self._fixture_metadata(
[
self._fixture_parameter("param_1", None),
self._fixture_parameter("param_2", None),
self._fixture_parameter("param_3", None),
]
).unique_parameter_groups,
{},
)
def test_groups(self):
self.assertEqual(
self._fixture_metadata(
[
self._fixture_parameter("param_1", None),
self._fixture_parameter("param_2", "group_A"),
self._fixture_parameter("param_3", None),
self._fixture_parameter("param_4", ""),
self._fixture_parameter("param_5", "group_B"),
self._fixture_parameter("param_6", "group_A"),
]
).unique_parameter_groups,
{
"group_A": {"param_2", "param_6"},
"group_B": {"param_5"},
},
) | null |
583 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import os
import tempfile
import shutil
import IECore
import IECoreScene
class CurveLineariserTest( unittest.TestCase ) :
def runTest( self, curves ) :
curves2 = IECoreScene.CurveLineariser()( input=curves, verticesPerSegment=1000 )
self.assertTrue( not curves.isSame( curves2 ) )
self.assertEqual( curves2.numCurves(), curves.numCurves() )
self.assertEqual( curves2.basis(), IECore.CubicBasisf.linear() )
self.assertEqual( curves2.periodic(), curves.periodic() )
self.assertEqual( curves.keys(), curves2.keys() )
self.assertTrue( curves2.arePrimitiveVariablesValid() )
e = IECoreScene.CurvesPrimitiveEvaluator( curves )
r = e.createResult()
e2 = IECoreScene.CurvesPrimitiveEvaluator( curves2 )
r2 = e.createResult()
curves["constantwidth"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.001 ) )
curves2["constantwidth"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 0.001 ) )
tempDir = tempfile.mkdtemp()
IECore.ObjectWriter( curves, os.path.join( tempDir, "c.cob" ) ).write()
IECore.ObjectWriter( curves2, os.path.join( tempDir, "c2.cob" ) ).write()
for curveIndex in range( 0, curves.numCurves() ) :
for i in range( 0, 100 ) :
v = float( i ) / 99
s = e.pointAtV( curveIndex, v, r )
s2 = e2.pointAtV( curveIndex, v, r2 )
self.assertTrue( s )
self.assertTrue( s2 )
for k in curves.keys() :
pv = r.primVar( curves[k] )
pv2 = r2.primVar( curves2[k] )
if isinstance( pv, ( float, int ) ) :
self.assertAlmostEqual( pv, pv2 )
elif isinstance( pv, ( imath.V3f, imath.Color3f ) ) :
self.assertTrue( pv.equalWithAbsError( pv2, 0.005 ) )
else :
self.assertEqual( pv, pv2 )
shutil.rmtree( tempDir )
def test3SegmentBSpline( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 6 ] ),
IECore.CubicBasisf.bSpline(),
False,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 2, 1, 0 ),
v( 2, 0, 0 )
]
)
)
self.runTest( c )
def METHOD_NAME( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 8 ] ),
IECore.CubicBasisf.bSpline(),
False,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 2, 1, 0 ),
v( 2, 0, 0 ),
v( 2, 0, 0 )
]
)
)
self.runTest( c )
def test2Curve3SegmentBSpline( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 6, 6 ] ),
IECore.CubicBasisf.bSpline(),
False,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 2, 1, 0 ),
v( 2, 0, 0 ),
v( 0, 2, 0 ),
v( 0, 1, 0 ),
v( 1, 1, 0 ),
v( 1, 2, 0 ),
v( 2, 2, 0 ),
v( 2, 1, 0 )
]
)
)
self.runTest( c )
def testPeriodicBSpline( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 4 ] ),
IECore.CubicBasisf.bSpline(),
True,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
]
)
)
self.runTest( c )
def test2CurvePeriodicBSpline( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.CubicBasisf.bSpline(),
True,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 0, 2, 0 ),
v( 0, 1, 0 ),
v( 1, 1, 0 ),
v( 1, 2, 0 ),
]
)
)
self.runTest( c )
def test3SegmentLinear( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 6 ] ),
IECore.CubicBasisf.linear(),
False,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 2, 1, 0 ),
v( 2, 0, 0 )
]
)
)
self.runTest( c )
def test3SegmentLinearDoubledEndpoints( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 8 ] ),
IECore.CubicBasisf.linear(),
False,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 2, 1, 0 ),
v( 2, 0, 0 ),
v( 2, 0, 0 )
]
)
)
self.runTest( c )
def test2Curve3SegmentLinear( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 6, 6 ] ),
IECore.CubicBasisf.linear(),
False,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 2, 1, 0 ),
v( 2, 0, 0 ),
v( 0, 2, 0 ),
v( 0, 1, 0 ),
v( 1, 1, 0 ),
v( 1, 2, 0 ),
v( 2, 2, 0 ),
v( 2, 1, 0 )
]
)
)
self.runTest( c )
def test3SegmentPeriodicLinear( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 6 ] ),
IECore.CubicBasisf.linear(),
True,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 2, 1, 0 ),
v( 2, 0, 0 )
]
)
)
self.runTest( c )
def test2Curve3SegmentPeriodicLinear( self ) :
v = imath.V3f
c = IECoreScene.CurvesPrimitive(
IECore.IntVectorData( [ 6, 6 ] ),
IECore.CubicBasisf.linear(),
True,
IECore.V3fVectorData(
[
v( 0, 1, 0 ),
v( 0, 0, 0 ),
v( 1, 0, 0 ),
v( 1, 1, 0 ),
v( 2, 1, 0 ),
v( 2, 0, 0 ),
v( 0, 2, 0 ),
v( 0, 1, 0 ),
v( 1, 1, 0 ),
v( 1, 2, 0 ),
v( 2, 2, 0 ),
v( 2, 1, 0 )
]
)
)
self.runTest( c )
if __name__ == "__main__":
unittest.main()
| null |
584 | import math
from logging import getLogger
from simulation.cell import Cell
from simulation.game_logic import SpawnLocationFinder
from simulation.interactables.pickups import ALL_PICKUPS
from simulation.interactables.score_location import ScoreLocation
from simulation.level_settings import DEFAULT_LEVEL_SETTINGS
from simulation.location import Location
from typing import List
LOGGER = getLogger(__name__)
class WorldMap(object):
"""
The non-player world state.
"""
def __init__(self, grid, settings):
"""
:param grid: All types of cells to be inserted into the map.
:param settings: Constant values provided when generating a level/map.
"""
self.grid = grid
self.settings = settings
self._spawn_location_finder = SpawnLocationFinder(self)
@classmethod
def _min_max_from_dimensions(cls, height, width):
"""
The value provided by the user will be an integer both for the width and height
components. We calculate the maximum and minimum dimensions in all directions.
"""
max_x = int(math.floor(width / 2))
min_x = -(width - max_x - 1)
max_y = int(math.floor(height / 2))
min_y = -(height - max_y - 1)
return min_x, max_x, min_y, max_y
@classmethod
def generate_empty_map(cls, height, width, settings):
new_settings = DEFAULT_LEVEL_SETTINGS.copy()
new_settings.update(settings)
(min_x, max_x, min_y, max_y) = WorldMap._min_max_from_dimensions(height, width)
grid = {}
for x in range(min_x, max_x + 1):
for y in range(min_y, max_y + 1):
location = Location(x, y)
grid[location] = Cell(location)
return cls(grid, new_settings)
def all_cells(self) -> List[Cell]:
return self.grid.values()
def interactable_cells(self):
return (cell for cell in self.all_cells() if cell.interactable)
def score_cells(self):
return (
cell
for cell in self.all_cells()
if isinstance(cell.interactable, ScoreLocation)
)
def pickup_cells(self):
return (
cell
for cell in self.all_cells()
if isinstance(cell.interactable, ALL_PICKUPS)
)
def is_on_map(self, location):
try:
self.grid[location]
except KeyError:
return False
return True
def get_cell(self, location) -> Cell:
try:
return self.grid[location]
except KeyError:
# For backwards-compatibility, this throws ValueError
raise ValueError("Location %s is not on the map" % location)
def get_cell_by_coords(self, x, y):
return self.get_cell(Location(x, y))
def clear_cell_actions(self, location):
try:
cell = self.get_cell(location)
cell.actions = []
except ValueError:
return
def max_y(self):
return max(self.grid.keys(), key=lambda c: c.y).y
def min_y(self):
return min(self.grid.keys(), key=lambda c: c.y).y
def max_x(self):
return max(self.grid.keys(), key=lambda c: c.x).x
def min_x(self):
return min(self.grid.keys(), key=lambda c: c.x).x
@property
def num_rows(self):
return self.max_y() - self.min_y() + 1
@property
def num_cols(self):
return self.max_x() - self.min_x() + 1
@property
def num_cells(self):
return self.num_rows * self.num_cols
def can_move_to(self, target_location):
if not self.is_on_map(target_location):
return False
cell = self.get_cell(target_location)
return (
cell.habitable
and (not cell.is_occupied or cell.avatar.is_moving)
and len(cell.moves) <= 1
)
def attackable_avatar(self, target_location):
"""
Return a boolean if the avatar is attackable at the given location (or will be
after next move), else return None.
"""
try:
cell = self.get_cell(target_location)
except ValueError:
return None
if cell.avatar:
return cell.avatar
if len(cell.moves) == 1:
return cell.moves[0].avatar
return None
def get_no_fog_distance(self):
return self.settings["NO_FOG_OF_WAR_DISTANCE"]
def get_partial_fog_distance(self):
return self.settings["PARTIAL_FOG_OF_WAR_DISTANCE"]
def get_random_spawn_location(self):
return self._spawn_location_finder.get_random_spawn_location()
def __repr__(self):
return repr(self.grid)
def __iter__(self):
return (
(
self.get_cell(Location(x, y))
for y in range(self.min_y(), self.max_y() + 1)
)
for x in range(self.min_x(), self.max_x() + 1)
)
# Serialisation Utilities
def get_serialized_south_west_corner(self):
"""
Used in serialising the map size when sent to the front end. Very lightweight as
it consists of two integers.
:return: A dictionary with two values, x and y coordinates for the bottom left
(south-west) corner of the map.
"""
return {"x": self.min_x(), "y": self.min_y()}
def get_serialized_north_east_corner(self):
"""
Used in serialising the map size when sent to the front end. Very lightweight as
it consists of two integers.
:return: A dictionary with two values, x and y coordinates for the top right
(north-west) corner of the map.
"""
return {"x": self.max_x(), "y": self.max_y()}
def serialize_score_location(self):
"""
Used to serialize the score locations on every update.
:return: A single list that contains all score locations. Within
the list there are x and y coordinates.
"""
def get_coords(cell):
return {"location": {"x": cell.location.x, "y": cell.location.y}}
return [
get_coords(cell)
for cell in self.all_cells()
if isinstance(cell.interactable, ScoreLocation)
]
def METHOD_NAME(self):
"""
Used to serialize the obstacle locations on every update.
:return: A list that contains all the obstacle information generated by inner method.
"""
def serialize_obstacle(cell):
return {
"location": {"x": cell.location.x, "y": cell.location.y},
"width": 1,
"height": 1,
"type": "wall",
"orientation": "north",
}
return [
cell.obstacle.serialize(cell)
for cell in self.all_cells()
if not cell.habitable
]
def WorldMapStaticSpawnDecorator(world_map, spawn_location):
world_map._spawn_location_finder.get_random_spawn_location = lambda: spawn_location
return world_map | null |
585 | """Pull request tests meant to be run with pytest."""
import os
from pathlib import Path
import pytest
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.video.fx.scroll import scroll
from moviepy.video.io.VideoFileClip import VideoFileClip
from moviepy.video.tools.interpolators import Trajectory
from moviepy.video.tools.subtitles import SubtitlesClip
from moviepy.video.VideoClip import ColorClip, ImageClip, TextClip
def test_PR_306():
assert TextClip.list("font") != []
assert TextClip.list("color") != []
with pytest.raises(Exception):
TextClip.list("blah")
def test_PR_339(util):
# In caption mode.
TextClip(
text="foo",
color="white",
font=util.FONT,
size=(640, 480),
method="caption",
align="center",
font_size=25,
).close()
# In label mode.
TextClip(text="foo", font=util.FONT, method="label").close()
def test_PR_373(util):
result = Trajectory.load_list("media/traj.txt")
Trajectory.save_list(result, os.path.join(util.TMP_DIR, "traj1.txt"))
result1 = Trajectory.load_list(os.path.join(util.TMP_DIR, "traj1.txt"))
assert len(result[0].tt) == len(result1[0].tt)
for i in range(len(result[0].tt)):
assert result[0].tt[i] == result1[0].tt[i]
assert len(result[0].xx) == len(result1[0].xx)
for i in range(len(result[0].xx)):
assert result[0].xx[i] == result1[0].xx[i]
assert len(result[0].yy) == len(result1[0].yy)
for i in range(len(result[0].yy)):
assert result[0].yy[i] == result1[0].yy[i]
def test_PR_458(util):
clip = ColorClip([1000, 600], color=(60, 60, 60), duration=2)
clip.write_videofile(os.path.join(util.TMP_DIR, "test.mp4"), logger=None, fps=30)
clip.close()
def test_PR_515():
# Won't actually work until video is in download_media
with VideoFileClip("media/fire2.mp4", fps_source="tbr") as clip:
assert clip.fps == 90000
with VideoFileClip("media/fire2.mp4", fps_source="fps") as clip:
assert clip.fps == 10.51
def test_PR_528(util):
with ImageClip("media/vacation_2017.jpg") as clip:
new_clip = scroll(clip, w=1000, x_speed=50)
new_clip = new_clip.with_duration(0.2)
new_clip.fps = 24
new_clip.write_videofile(os.path.join(util.TMP_DIR, "pano.mp4"), logger=None)
def test_PR_529():
with VideoFileClip("media/fire2.mp4") as video_clip:
assert video_clip.rotation == 180
def METHOD_NAME():
"""Test that the max fps of video clips is used for the composite video clip."""
clip1 = ColorClip((640, 480), color=(255, 0, 0)).with_duration(1)
clip2 = ColorClip((640, 480), color=(0, 255, 0)).with_duration(1)
clip1.fps = 24
clip2.fps = 25
composite = CompositeVideoClip([clip1, clip2])
assert composite.fps == 25
def test_PR_1137_video(util, video):
"""Test support for path-like objects as arguments for VideoFileClip."""
with video(start_time=0.2, end_time=0.24) as video:
video.write_videofile(Path(util.TMP_DIR) / "pathlike.mp4", logger=None)
assert isinstance(video.filename, str)
def test_PR_1137_audio(util):
"""Test support for path-like objects as arguments for AudioFileClip."""
with AudioFileClip(Path("media/crunching.mp3")) as audio:
audio.write_audiofile(Path(util.TMP_DIR) / "pathlike.mp3")
assert isinstance(audio.filename, str)
def test_PR_1137_image():
"""Test support for path-like objects as arguments for ImageClip."""
ImageClip(Path("media/vacation_2017.jpg")).close()
def test_PR_1137_subtitles(util):
"""Test support for path-like objects as arguments for SubtitlesClip."""
def make_textclip(txt):
return TextClip(
txt,
font=util.FONT,
font_size=24,
color="white",
stroke_color="black",
stroke_width=0.5,
)
SubtitlesClip(Path("media/subtitles.srt"), make_textclip=make_textclip).close()
if __name__ == "__main__":
pytest.main() | null |
586 | from __future__ import annotations
import asyncio
import enum
import time
from functools import wraps
from typing import Any, Callable, Coroutine, MutableMapping, TypeVar, Protocol
from lru import LRU
R = TypeVar('R')
# Can't use ParamSpec due to https://github.com/python/typing/discussions/946
class CacheProtocol(Protocol[R]):
cache: MutableMapping[str, asyncio.Task[R]]
def __call__(self, *args: Any, **kwds: Any) -> asyncio.Task[R]:
...
def get_key(self, *args: Any, **kwargs: Any) -> str:
...
def invalidate(self, *args: Any, **kwargs: Any) -> bool:
...
def invalidate_containing(self, key: str) -> None:
...
def METHOD_NAME(self) -> tuple[int, int]:
...
class ExpiringCache(dict):
def __init__(self, seconds: float):
self.__ttl: float = seconds
super().__init__()
def __verify_cache_integrity(self):
# Have to do this in two steps...
current_time = time.monotonic()
to_remove = [k for (k, (v, t)) in self.items() if current_time > (t + self.__ttl)]
for k in to_remove:
del self[k]
def __contains__(self, key: str):
self.__verify_cache_integrity()
return super().__contains__(key)
def __getitem__(self, key: str):
self.__verify_cache_integrity()
return super().__getitem__(key)
def __setitem__(self, key: str, value: Any):
super().__setitem__(key, (value, time.monotonic()))
class Strategy(enum.Enum):
lru = 1
raw = 2
timed = 3
def cache(
maxsize: int = 128,
strategy: Strategy = Strategy.lru,
ignore_kwargs: bool = False,
) -> Callable[[Callable[..., Coroutine[Any, Any, R]]], CacheProtocol[R]]:
def decorator(func: Callable[..., Coroutine[Any, Any, R]]) -> CacheProtocol[R]:
if strategy is Strategy.lru:
_internal_cache = LRU(maxsize)
_stats = _internal_cache.METHOD_NAME
elif strategy is Strategy.raw:
_internal_cache = {}
_stats = lambda: (0, 0)
elif strategy is Strategy.timed:
_internal_cache = ExpiringCache(maxsize)
_stats = lambda: (0, 0)
def _make_key(args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
# this is a bit of a cluster fuck
# we do care what 'self' parameter is when we __repr__ it
def _true_repr(o):
if o.__class__.__repr__ is object.__repr__:
return f'<{o.__class__.__module__}.{o.__class__.__name__}>'
return repr(o)
key = [f'{func.__module__}.{func.__name__}']
key.extend(_true_repr(o) for o in args)
if not ignore_kwargs:
for k, v in kwargs.items():
# note: this only really works for this use case in particular
# I want to pass asyncpg.Connection objects to the parameters
# however, they use default __repr__ and I do not care what
# connection is passed in, so I needed a bypass.
if k == 'connection' or k == 'pool':
continue
key.append(_true_repr(k))
key.append(_true_repr(v))
return ':'.join(key)
@wraps(func)
def wrapper(*args: Any, **kwargs: Any):
key = _make_key(args, kwargs)
try:
task = _internal_cache[key]
except KeyError:
_internal_cache[key] = task = asyncio.create_task(func(*args, **kwargs))
return task
else:
return task
def _invalidate(*args: Any, **kwargs: Any) -> bool:
try:
del _internal_cache[_make_key(args, kwargs)]
except KeyError:
return False
else:
return True
def _invalidate_containing(key: str) -> None:
to_remove = []
for k in _internal_cache.keys():
if key in k:
to_remove.append(k)
for k in to_remove:
try:
del _internal_cache[k]
except KeyError:
continue
wrapper.cache = _internal_cache
wrapper.get_key = lambda *args, **kwargs: _make_key(args, kwargs)
wrapper.invalidate = _invalidate
wrapper.METHOD_NAME = _stats
wrapper.invalidate_containing = _invalidate_containing
return wrapper # type: ignore
return decorator | null |
587 | ################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2017-2023 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from django.contrib.contenttypes.models import ContentType
from django.template import Library
from django.utils.translation import gettext as _
from .. import get_concrete_model
from ..utils.translation import get_model_verbose_name
register = Library()
# TODO: {% ctype_for_instance %} ? (ctype_for_model already works for instances...)
# TODO: {% if object|ctype_is:my_ctype %} ?
@register.simple_tag
def ctype_for_model(model):
"""Returns an instance of ContentType for a model.
@param model: Class 'inheriting django.db.models.Model'.
@return: A ContentType instance.
{% ctype_for_model currency_model as currency_ctype %}
<h1>List of {{currency_ctype}}</h1>
"""
return ContentType.objects.get_for_model(model)
@register.simple_tag
def ctype_for_naturalkey(app_label, model):
"""Returns an instance of ContentType for the natural key of a model.
@param app_label: String identifying an app.
@param model: String identifying a model.
@return: A ContentType instance.
{% ctype_for_naturalkey app_label='creme_core' model='currency' as currency_ctype %}
<h1>List of {{currency_ctype}}</h1>
"""
return ContentType.objects.get_by_natural_key(app_label=app_label, model=model)
@register.simple_tag
def ctype_for_swappable(model_setting):
"""Returns an instance of ContentType for a swappable model.
@param model_setting: String identifying a swappable model.
@return: A ContentType instance.
{% ctype_for_swappable 'PERSONS_CONTACT_MODEL' as contact_ctype %}
<h1>List of {{contact_ctype}}</h1>
"""
return ContentType.objects.get_for_model(get_concrete_model(model_setting))
# TODO: ? (replace 'get_meta_value' which seems used only to retrieve verbose_name ?)
# @register.assignment_tag(name='get_model_verbose_name')
# def ctype_verbose_name(model, count):
# return get_model_verbose_name(model, count)
@register.simple_tag
def ctype_counted_instances_label(ctype, count):
""" Return a localized string, in order to display label like '1 Contact' or '3 Organisations'.
@param ctype: A ContentType instance relation to your model.
@param count: An Integer representing the number of instances of "model".
@return: A string.
{% ctype_for_swappable 'PERSONS_CONTACT_MODEL' as contact_ctype %}
{% ctype_counted_instances_label ctype=contact_ctype count=12 as my_label %}
<h1>{{my_label}}</h1>
"""
return _('{count} {model}').format(
count=count,
model=get_model_verbose_name(model=ctype.model_class(), count=count),
)
# TODO: what about the global registry ? take it from the context ?
@register.filter
def ctype_can_be_merged(ctype):
"""Indicates if 2 instances of a specific model can be used by the merging view of Creme.
@param ctype: A ContentType instance corresponding to your model
@return: A boolean.
{% if my_entity.entity_type|ctype_can_be_merged %}
<span>Can be merged !!</span>
{% endif %}
"""
from ..gui.merge import merge_form_registry
return merge_form_registry.get(ctype.model_class()) is not None
# TODO: what about the global registry ? take it from the context ?
@register.filter
def METHOD_NAME(ctype):
"""Indicates if some instances of a specific model can be created from a CSV/XLS/... file.
@param ctype: A ContentType instance corresponding to your model.
@return: A boolean.
{% if my_entity.entity_type|ctype_can_be_mass_imported %}
<span>Can be imported !!</span>
{% endif %}
"""
from ..gui.mass_import import import_form_registry
return import_form_registry.is_registered(ctype)
# TODO: what about the global registry ? take it from the context ?
@register.filter
def ctype_has_quickform(ctype):
from ..gui.quick_forms import quickforms_registry
return quickforms_registry.get_form_class(ctype.model_class()) is not None | null |
588 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDBInstancesOverviewRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dds', '2015-12-01', 'DescribeDBInstancesOverview','dds')
self.set_method('POST')
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_NetworkType(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_InstanceClass(self): # String
return self.get_query_params().get('InstanceClass')
def set_InstanceClass(self, InstanceClass): # String
self.add_query_param('InstanceClass', InstanceClass)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def METHOD_NAME(self): # String
return self.get_query_params().get('InstanceStatus')
def set_InstanceStatus(self, InstanceStatus): # String
self.add_query_param('InstanceStatus', InstanceStatus)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_InstanceIds(self): # String
return self.get_query_params().get('InstanceIds')
def set_InstanceIds(self, InstanceIds): # String
self.add_query_param('InstanceIds', InstanceIds)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
589 | import unittest
from unittest.mock import MagicMock, PropertyMock, patch
import pandas as pd
from hummingbot.client.config.client_config_map import ClientConfigMap
from hummingbot.client.config.config_helpers import ClientConfigAdapter
from hummingbot.connector.exchange.paper_trade.paper_trade_exchange import QuantizationParams
from hummingbot.connector.test_support.mock_paper_exchange import MockPaperExchange
from hummingbot.core.clock import Clock
from hummingbot.core.clock_mode import ClockMode
from hummingbot.strategy.directional_strategy_base import DirectionalStrategyBase
class DirectionalStrategyBaseTest(unittest.TestCase):
level = 0
def handle(self, record):
self.log_records.append(record)
def _is_logged(self, log_level: str, message: str) -> bool:
return any(record.levelname == log_level and record.getMessage().startswith(message)
for record in self.log_records)
def setUp(self):
self.log_records = []
self.start: pd.Timestamp = pd.Timestamp("2019-01-01", tz="UTC")
self.end: pd.Timestamp = pd.Timestamp("2019-01-01 01:00:00", tz="UTC")
self.start_timestamp: float = self.start.timestamp()
self.end_timestamp: float = self.end.timestamp()
self.connector_name: str = "mock_paper_exchange"
self.trading_pair: str = "HBOT-USDT"
self.base_asset, self.quote_asset = self.trading_pair.split("-")
self.base_balance: int = 500
self.quote_balance: int = 5000
self.initial_mid_price: int = 100
self.clock_tick_size = 1
self.clock: Clock = Clock(ClockMode.BACKTEST, self.clock_tick_size, self.start_timestamp, self.end_timestamp)
self.connector: MockPaperExchange = MockPaperExchange(
client_config_map=ClientConfigAdapter(ClientConfigMap())
)
self.connector.set_balanced_order_book(trading_pair=self.trading_pair,
mid_price=100,
min_price=50,
max_price=150,
price_step_size=1,
volume_step_size=10)
self.connector.set_balance(self.base_asset, self.base_balance)
self.connector.set_balance(self.quote_asset, self.quote_balance)
self.connector.set_quantization_param(
QuantizationParams(
self.trading_pair, 6, 6, 6, 6
)
)
self.clock.add_iterator(self.connector)
DirectionalStrategyBase.markets = {self.connector_name: {self.trading_pair}}
DirectionalStrategyBase.candles = []
DirectionalStrategyBase.exchange = self.connector_name
DirectionalStrategyBase.trading_pair = self.trading_pair
self.strategy = DirectionalStrategyBase({self.connector_name: self.connector})
self.strategy.logger().setLevel(1)
self.strategy.logger().addHandler(self)
def test_start(self):
self.assertFalse(self.strategy.ready_to_trade)
self.strategy.start(Clock(ClockMode.BACKTEST), self.start_timestamp)
self.strategy.tick(self.start_timestamp + 10)
self.assertTrue(self.strategy.ready_to_trade)
def test_all_candles_ready(self):
self.assertTrue(self.strategy.all_candles_ready)
def test_is_perpetual(self):
self.assertFalse(self.strategy.is_perpetual)
def test_candles_formatted_list(self):
columns = ["timestamp", "open", "low", "high", "close", "volume"]
candles_df = pd.DataFrame(columns=columns,
data=[[self.start_timestamp, 1, 2, 3, 4, 5],
[self.start_timestamp + 1, 2, 3, 4, 5, 6]])
candles_status = self.strategy.candles_formatted_list(candles_df, columns)
self.assertTrue("timestamp" in candles_status[0])
def test_get_active_executors(self):
self.assertEqual(0, len(self.strategy.get_active_executors()))
def METHOD_NAME(self):
self.assertEqual("Market connectors are not ready.", self.strategy.format_status())
@patch("hummingbot.strategy.directional_strategy_base.DirectionalStrategyBase.get_signal")
def test_format_status(self, signal_mock):
signal_mock.return_value = 0
self.clock.add_iterator(self.strategy)
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
position_executor_mock = MagicMock()
position_executor_mock.to_format_status = MagicMock(return_value=["mock_position_executor"])
self.strategy.stored_executors.append(position_executor_mock)
self.strategy.active_executors.append(position_executor_mock)
self.assertTrue("mock_position_executor" in self.strategy.format_status())
@patch("hummingbot.strategy.directional_strategy_base.DirectionalStrategyBase.get_signal", new_callable=MagicMock)
def test_get_position_config_signal_zero(self, signal):
signal.return_value = 0
self.assertIsNone(self.strategy.get_position_config())
@patch("hummingbot.strategy.directional_strategy_base.DirectionalStrategyBase.get_signal", new_callable=MagicMock)
def test_get_position_config_signal_positive(self, signal):
signal.return_value = 1
self.assertIsNotNone(self.strategy.get_position_config())
def test_time_between_signals_condition(self):
self.strategy.cooldown_after_execution = 10
stored_executor_mock = MagicMock()
stored_executor_mock.close_timestamp = self.start_timestamp
self.strategy.stored_executors = [stored_executor_mock]
# First scenario waiting for delay
type(self.strategy).current_timestamp = PropertyMock(return_value=self.start_timestamp + 5)
self.assertFalse(self.strategy.time_between_signals_condition)
# Second scenario delay passed
type(self.strategy).current_timestamp = PropertyMock(return_value=self.start_timestamp + 15)
self.assertTrue(self.strategy.time_between_signals_condition)
# Third scenario no stored executors
self.strategy.stored_executors = []
self.assertTrue(self.strategy.time_between_signals_condition)
def test_max_active_executors_condition(self):
self.strategy.max_executors = 1
active_executor_mock = MagicMock()
active_executor_mock.is_closed = False
self.strategy.active_executors = [active_executor_mock]
self.assertFalse(self.strategy.max_active_executors_condition)
self.strategy.active_executors = []
self.assertTrue(self.strategy.max_active_executors_condition) | null |
590 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ListSlotRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ListSlot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AdSlotType(self): # String
return self.get_query_params().get('AdSlotType')
def set_AdSlotType(self, AdSlotType): # String
self.add_query_param('AdSlotType', AdSlotType)
def get_UserId(self): # String
return self.get_query_params().get('UserId')
def set_UserId(self, UserId): # String
self.add_query_param('UserId', UserId)
def get_OriginSiteUserId(self): # String
return self.get_query_params().get('OriginSiteUserId')
def set_OriginSiteUserId(self, OriginSiteUserId): # String
self.add_query_param('OriginSiteUserId', OriginSiteUserId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_MediaName(self): # String
return self.get_query_params().get('MediaName')
def set_MediaName(self, MediaName): # String
self.add_query_param('MediaName', MediaName)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def METHOD_NAME(self): # String
return self.get_query_params().get('AdSlotStatus')
def set_AdSlotStatus(self, AdSlotStatus): # String
self.add_query_param('AdSlotStatus', AdSlotStatus)
def get_TenantId(self): # String
return self.get_query_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_query_param('TenantId', TenantId)
def get_AdSlotId(self): # String
return self.get_query_params().get('AdSlotId')
def set_AdSlotId(self, AdSlotId): # String
self.add_query_param('AdSlotId', AdSlotId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_AdSlotCorporateStatus(self): # String
return self.get_query_params().get('AdSlotCorporateStatus')
def set_AdSlotCorporateStatus(self, AdSlotCorporateStatus): # String
self.add_query_param('AdSlotCorporateStatus', AdSlotCorporateStatus)
def get_EndCreateTime(self): # Long
return self.get_query_params().get('EndCreateTime')
def set_EndCreateTime(self, EndCreateTime): # Long
self.add_query_param('EndCreateTime', EndCreateTime)
def get_Business(self): # String
return self.get_query_params().get('Business')
def set_Business(self, Business): # String
self.add_query_param('Business', Business)
def get_MediaId(self): # String
return self.get_query_params().get('MediaId')
def set_MediaId(self, MediaId): # String
self.add_query_param('MediaId', MediaId)
def get_Environment(self): # String
return self.get_query_params().get('Environment')
def set_Environment(self, Environment): # String
self.add_query_param('Environment', Environment)
def get_StartCreateTime(self): # Long
return self.get_query_params().get('StartCreateTime')
def set_StartCreateTime(self, StartCreateTime): # Long
self.add_query_param('StartCreateTime', StartCreateTime)
def get_UserSite(self): # String
return self.get_query_params().get('UserSite')
def set_UserSite(self, UserSite): # String
self.add_query_param('UserSite', UserSite)
def get_AdSlotName(self): # String
return self.get_query_params().get('AdSlotName')
def set_AdSlotName(self, AdSlotName): # String
self.add_query_param('AdSlotName', AdSlotName) | null |
591 | from unittest import TestCase
from pcs_test.tier1.cib_resource.common import get_cib_resources
from pcs_test.tools.cib import get_assert_pcs_effect_mixin
from pcs_test.tools.misc import get_test_resource as rc
from pcs_test.tools.misc import (
get_tmp_file,
write_data_to_tmpfile,
write_file_to_tmpfile,
)
from pcs_test.tools.pcs_runner import PcsRunner
class OperationAdd(TestCase, get_assert_pcs_effect_mixin(get_cib_resources)):
empty_cib = rc("cib-empty.xml")
def METHOD_NAME(self):
self.temp_cib = get_tmp_file("tier1_cib_resource_operation_add")
self.pcs_runner = PcsRunner(self.temp_cib.name)
write_data_to_tmpfile(self.fixture_cib_cache(), self.temp_cib)
def tearDown(self):
self.temp_cib.close()
def fixture_cib_cache(self):
if not hasattr(self.__class__, "cib_cache"):
self.__class__.cib_cache = self.fixture_cib()
return self.__class__.cib_cache
def fixture_cib(self):
write_file_to_tmpfile(self.empty_cib, self.temp_cib)
self.assert_pcs_success(
"resource create --no-default-ops R ocf:heartbeat:Dummy".split()
)
# add to cib:
# <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
# <operations>
# <op id="R-monitor-interval-60s" interval="60s"
# name="monitor"
# />
# </operations>
# </primitive>
self.temp_cib.seek(0)
return self.temp_cib.read()
def test_base_add(self):
self.assert_effect(
"resource op add R start interval=20s".split(),
"""<resources>
<primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
<operations>
<op id="R-monitor-interval-10s" interval="10s"
name="monitor" timeout="20s"
/>
<op id="R-start-interval-20s" interval="20s"
name="start"
/>
</operations>
</primitive>
</resources>""",
)
def test_add_with_OCF_CHECK_LEVEL(self):
# pylint: disable=invalid-name
self.assert_effect(
(
"resource op add R start interval=20s OCF_CHECK_LEVEL=1 "
"description=test-description"
).split(),
"""<resources>
<primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
<operations>
<op id="R-monitor-interval-10s" interval="10s"
name="monitor" timeout="20s"
/>
<op description="test-description" name="start"
id="R-start-interval-20s" interval="20s"
>
<instance_attributes
id="params-R-start-interval-20s"
>
<nvpair
id="R-start-interval-20s-OCF_CHECK_LEVEL-1"
name="OCF_CHECK_LEVEL" value="1"
/>
</instance_attributes>
</op>
</operations>
</primitive>
</resources>""",
)
def test_can_multiple_operation_add(self):
self.assert_effect(
"resource op add R start interval=20s".split(),
"""<resources>
<primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
<operations>
<op id="R-monitor-interval-10s" interval="10s"
name="monitor" timeout="20s"
/>
<op id="R-start-interval-20s" interval="20s"
name="start"
/>
</operations>
</primitive>
</resources>""",
)
self.assert_effect(
"resource op add R stop interval=30s".split(),
"""<resources>
<primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
<operations>
<op id="R-monitor-interval-10s" interval="10s"
name="monitor" timeout="20s"
/>
<op id="R-start-interval-20s" interval="20s"
name="start"
/>
<op id="R-stop-interval-30s" interval="30s"
name="stop"
/>
</operations>
</primitive>
</resources>""",
)
def test_id_specified(self):
self.assert_effect(
"resource op add R start timeout=30 id=abcd".split(),
"""<resources>
<primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
<operations>
<op id="R-monitor-interval-10s" interval="10s"
name="monitor" timeout="20s"
/>
<op id="abcd" interval="0s" name="start" timeout="30" />
</operations>
</primitive>
</resources>""",
)
def test_invalid_id(self):
self.assert_pcs_fail_regardless_of_force(
"resource op add R start timeout=30 id=ab#cd".split(),
"Error: invalid operation id 'ab#cd', '#' is not a valid"
" character for a operation id\n",
)
def test_duplicate_id(self):
self.assert_pcs_fail_regardless_of_force(
"resource op add R start timeout=30 id=R".split(),
"Error: id 'R' is already in use, please specify another one\n",
)
def test_unknown_option(self):
self.assert_pcs_fail(
"resource op add R start timeout=30 requires=quorum".split(),
(
"Error: requires is not a valid op option (use --force to "
"override)\n"
),
) | null |
592 | import gdb
import gdb.xmethod
import gdb.types
def is_wasm_pointer_type(type):
return type.name is not None and type.name.startswith('__wasm_pointer_t')
def is_wasm_reference_type(type):
return type.name is not None and type.name.startswith('__wasm_reference_t')
def is_wasm_rvalue_reference_type(type):
return type.name is not None and type.name.startswith('__wasm_rvalue_reference_t')
def translate_address(ptr, type=None):
if type is None:
type = ptr.type.strip_typedefs().template_argument(0)
frame = gdb.selected_frame()
linear_memory_base = frame.read_register('rsi')
native_address = linear_memory_base + ptr["__address"].cast(linear_memory_base.type)
return native_address.reinterpret_cast(type.pointer())
class WasmPointerWorker_deref(gdb.xmethod.XMethodWorker):
def __init__(self, class_type):
gdb.xmethod.XMethodWorker.__init__(self)
self.class_type = class_type
def get_arg_types(self):
return None
def get_result_type(self, obj):
return obj.type.template_argument(0)
def __call__(self, obj):
return translate_address(obj, self.class_type.template_argument(0)).dereference()
class WasmPointerWorker_arrow(gdb.xmethod.XMethodWorker):
def __init__(self, class_type):
gdb.xmethod.XMethodWorker.__init__(self)
self.class_type = class_type
def get_arg_types(self):
return None
def get_result_type(self, obj):
return obj.type.template_argument(0)
def __call__(self, obj):
return translate_address(obj, self.class_type.template_argument(0))
class WasmPointer_deref(gdb.xmethod.XMethod):
def __init__(self):
gdb.xmethod.XMethod.__init__(self, 'operator*')
def get_worker(self, method_name, class_type):
if method_name == 'operator*':
return WasmPointerWorker_deref(class_type)
class WasmPointer_arrow(gdb.xmethod.XMethod):
def __init__(self):
gdb.xmethod.XMethod.__init__(self, 'operator->')
def get_worker(self, method_name, class_type):
if method_name == 'operator->':
return WasmPointerWorker_arrow(class_type)
class WasmPointerMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self):
gdb.xmethod.XMethodMatcher.__init__(self, "__wasm_pointer_t")
self.methods = [WasmPointer_deref(), WasmPointer_arrow()]
def match(self, class_type, method_name):
class_type = class_type.strip_typedefs()
if not is_wasm_pointer_type(class_type):
return None
result = []
for method in self.methods:
if method.enabled:
worker = method.get_worker(method_name, class_type)
if worker is not None:
result.append(worker)
return result
class WasmPointerTypePrinterImpl(object):
def recognize(self, type):
if is_wasm_pointer_type(type):
return str(type.template_argument(0).pointer())
elif is_wasm_reference_type(type):
return str(type.template_argument(0).reference())
elif is_wasm_rvalue_reference_type(type):
return str(type.template_argument(0)) + " &&"
class WasmPointerTypePrinter(gdb.types.TypePrinter):
def __init__(self):
gdb.types.TypePrinter.__init__(self, "__wasm_pointer_t")
def instantiate(self):
return WasmPointerTypePrinterImpl()
class WasmPointerPrinter(object):
"""Print a wasm pointer"""
def __init__(self, val):
self.val = val
def to_string(self):
return str(self.val["__address"])
def native_pretty_printer(val):
if is_wasm_pointer_type(val.type.strip_typedefs()):
return WasmPointerPrinter(val)
return None
if hasattr(gdb, "Parameter"):
class HideNative(gdb.Parameter):
"""Controls whether native frames and functions are visible"""
def __init__(self, name):
super (HideNative, self).__init__(name, gdb.COMMAND_BREAKPOINTS, gdb.PARAM_BOOLEAN)
self.value = True
else:
class HideNative(object):
def __init__(self, name):
self.value = True
hide_native = HideNative("hide-native")
def is_wasm_frame(frame):
sal = frame.find_sal()
if (not sal.is_valid()):
return False
return sal.symtab is None or not sal.symtab.objfile.is_file
class WasmFilter:
def __init__(self):
self.name = "wasm-only"
self.priority = 100
self.enabled = True
def METHOD_NAME(self, frame_iter):
if hide_native.value:
return (x for x in frame_iter if is_wasm_frame(x.inferior_frame()))
else:
return frame_iter
wasm_filter = WasmFilter()
def is_wasm_address(pc):
sal = gdb.current_progspace().find_pc_line(pc)
if (not sal.is_valid()):
return False
return sal.symtab is None or not sal.symtab.objfile.is_file
def disable_native_breakpoints(breakpoint):
if hide_native.value and breakpoint.visible:
for loc in breakpoint.locations:
if loc.enabled and not is_wasm_address(loc.address):
loc.enabled = False
def register(objfile):
gdb.xmethod.register_xmethod_matcher(objfile, WasmPointerMatcher())
gdb.types.register_type_printer(objfile, WasmPointerTypePrinter())
objfile.pretty_printers.append(native_pretty_printer)
objfile.frame_filters[wasm_filter.name] = wasm_filter
if hasattr(gdb, "Parameter"):
gdb.events.breakpoint_created.connect(disable_native_breakpoints)
gdb.events.breakpoint_modified.connect(disable_native_breakpoints)
gdb.execute("set breakpoint pending on") | null |
593 | #!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import re
import textwrap
def escape_path(word):
return word.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2, break_long_words=False,
break_on_hyphens=False):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None, implicit_outputs=None, pool=None, dyndep=None):
outputs = as_list(outputs)
out_outputs = [escape_path(x) for x in outputs]
all_inputs = [escape_path(x) for x in as_list(inputs)]
if implicit:
implicit = [escape_path(x) for x in as_list(implicit)]
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = [escape_path(x) for x in as_list(order_only)]
all_inputs.append('||')
all_inputs.extend(order_only)
if implicit_outputs:
implicit_outputs = [escape_path(x)
for x in as_list(implicit_outputs)]
out_outputs.append('|')
out_outputs.extend(implicit_outputs)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if pool is not None:
self._line(' pool = %s' % pool)
if dyndep is not None:
self._line(' dyndep = %s' % dyndep)
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if (space < 0 or
self._count_dollars_before_index(text, space) % 2 == 0):
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if (space < 0 or
self._count_dollars_before_index(text, space) % 2 == 0):
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def close(self):
self.output.close()
def as_list(input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
def expand(string, vars, local_vars={}):
"""Expand a string containing $vars as Ninja would.
Note: doesn't handle the full Ninja variable syntax, but it's enough
to make configure.py's use of it work.
"""
def METHOD_NAME(m):
var = m.group(1)
if var == '$':
return '$'
return local_vars.get(var, vars.get(var, ''))
return re.sub(r'\$(\$|\w*)', METHOD_NAME, string) | null |
594 | """Representaion of the Experiment Detail Page."""
from pages.base import Base
from pypom import Region
from selenium.webdriver.common.by import By
class DetailPage(Base):
URL_TEMPLATE = "{experiment_url}"
_begin_signoffs_btn_locator = (By.CSS_SELECTOR, ".proceed-status-color")
_confirm_ship_btn_locator = (By.CSS_SELECTOR, ".proceed-status-color")
_edit_branches_btn_locator = (By.CSS_SELECTOR, "#branches-edit-btn")
_required_checklist_locator = (By.CSS_SELECTOR, ".checkbox")
_save_signoffs_btn_locator = (By.CSS_SELECTOR, "form .btn-success")
_send_to_normandy_btn_locator = (By.CSS_SELECTOR, ".btn-danger")
_page_wait_locator = (By.CSS_SELECTOR, "body.page-detail-view")
def wait_for_page_to_load(self):
self.wait.until(
lambda _: self.find_element(*self._page_wait_locator).is_displayed()
)
return self
@property
def objective_section(self):
return self.ObjectivesRegion(self)
@property
def analysis_section(self):
return self.AnalysisRegion(self)
@property
def required_checklist_section(self):
elements = self.find_elements(*self._required_checklist_locator)
return [self.RequiredChecklist(self, el) for el in elements]
@property
def begin_signoffs_button(self):
element = self.find_element(*self._begin_signoffs_btn_locator)
assert "Begin Sign-Offs" in element.text
return element
@property
def ready_to_ship_button(self):
element = self.find_element(*self._confirm_ship_btn_locator)
assert "Confirm Ready to Ship" in element.text
return element
@property
def send_to_normandy_button(self):
return self.find_element(*self._send_to_normandy_btn_locator)
@property
def save_sign_offs_button(self):
return self.find_element(*self._save_signoffs_btn_locator)
def click_edit(self):
self.find_element(*self._edit_branches_btn_locator).click()
from pages.experiment_design import DesignPage
return DesignPage(self.driver, self.base_url).wait_for_page_to_load()
class ObjectivesRegion(Region):
_edit_btn_locator = (By.CSS_SELECTOR, "#objectives-edit-btn")
_detail_locator = (By.CSS_SELECTOR, "#objectives-section-detail")
def click_edit(self):
self.find_element(*self._edit_btn_locator).click()
from pages.experiment_objective_and_analysis import ObjectiveAndAnalysisPage
return ObjectiveAndAnalysisPage(
self.driver, self.page.base_url
).wait_for_page_to_load()
@property
def text(self):
element = self.find_element(*self._detail_locator)
return element.text
class AnalysisRegion(Region):
_edit_btn_locator = (By.CSS_SELECTOR, "#analysis-edit-btn")
_detail_locator = (
By.CSS_SELECTOR,
"#analysis-section-detail > #analysis-content",
)
_survey_checkbox_locator = (
By.CSS_SELECTOR,
"#analysis-section-detail > #analysis-survey-required",
)
_survey_urls_locator = (
By.CSS_SELECTOR,
"#analysis-section-detail > #analysis-survey-urls",
)
_survey_launch_instructions_locator = (
By.CSS_SELECTOR,
"#analysis-section-detail > #analysis-survey-launch-instructions",
)
def click_edit(self):
self.find_element(*self._edit_btn_locator).click()
from pages.experiment_objective_and_analysis import ObjectiveAndAnalysisPage
return ObjectiveAndAnalysisPage(
self.driver, self.page.base_url
).wait_for_page_to_load()
@property
def text(self):
element = self.find_element(*self._detail_locator)
return element.text
@property
def survery_required(self):
element = self.find_element(*self._survey_checkbox_locator)
return element.text
@property
def survey_urls(self):
element = self.find_element(*self._survey_urls_locator)
return element.text
@property
def survey_launch_instructions(self):
element = self.find_element(*self._survey_launch_instructions_locator)
return element.get_attribute("textContent")
class RequiredChecklist(Region):
_checkbox_locator = (By.CSS_SELECTOR, "input")
_checklist_item_label = (By.CSS_SELECTOR, "label")
@property
def label(self):
return self.find_element(*self._checklist_item_label).text
@property
def METHOD_NAME(self):
return self.find_element(*self._checkbox_locator) | null |
595 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkuis.endpoint import endpoint_data
class CreateUisRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Uis', '2018-08-21', 'CreateUis','uis')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_BandwidthType(self):
return self.get_query_params().get('BandwidthType')
def set_BandwidthType(self,BandwidthType):
self.add_query_param('BandwidthType',BandwidthType)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_Description(self):
return self.get_query_params().get('Description')
def METHOD_NAME(self,Description):
self.add_query_param('Description',Description)
def get_ServiceRegion(self):
return self.get_query_params().get('ServiceRegion')
def set_ServiceRegion(self,ServiceRegion):
self.add_query_param('ServiceRegion',ServiceRegion)
def get_Duration(self):
return self.get_query_params().get('Duration')
def set_Duration(self,Duration):
self.add_query_param('Duration',Duration)
def get_UisProtocol(self):
return self.get_query_params().get('UisProtocol')
def set_UisProtocol(self,UisProtocol):
self.add_query_param('UisProtocol',UisProtocol)
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType)
def get_AccessType(self):
return self.get_query_params().get('AccessType')
def set_AccessType(self,AccessType):
self.add_query_param('AccessType',AccessType)
def get_AutoPay(self):
return self.get_query_params().get('AutoPay')
def set_AutoPay(self,AutoPay):
self.add_query_param('AutoPay',AutoPay)
def get_ConnectionCount(self):
return self.get_query_params().get('ConnectionCount')
def set_ConnectionCount(self,ConnectionCount):
self.add_query_param('ConnectionCount',ConnectionCount)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_Bandwidth(self):
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self,Bandwidth):
self.add_query_param('Bandwidth',Bandwidth)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InternetChargeType(self):
return self.get_query_params().get('InternetChargeType')
def set_InternetChargeType(self,InternetChargeType):
self.add_query_param('InternetChargeType',InternetChargeType)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_PricingCycle(self):
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self,PricingCycle):
self.add_query_param('PricingCycle',PricingCycle)
def get_ConnectionBandwidth(self):
return self.get_query_params().get('ConnectionBandwidth')
def set_ConnectionBandwidth(self,ConnectionBandwidth):
self.add_query_param('ConnectionBandwidth',ConnectionBandwidth | null |
596 | import collections.abc
import os
import warnings
import h5py
import numpy
from ..adapters.utils import IndexersMixin
from ..iterviews import ItemsView, KeysView, ValuesView
from ..structures.core import StructureFamily
from ..utils import node_repr
from .array import ArrayAdapter
SWMR_DEFAULT = bool(int(os.getenv("TILED_HDF5_SWMR_DEFAULT", "0")))
INLINED_DEPTH = int(os.getenv("TILED_HDF5_INLINED_CONTENTS_MAX_DEPTH", "7"))
def from_dataset(dataset):
return ArrayAdapter.from_array(dataset, metadata=getattr(dataset, "attrs", {}))
class HDF5Adapter(collections.abc.Mapping, IndexersMixin):
"""
Read an HDF5 file or a group within one.
This map the structure of an HDF5 file onto a "Tree" of array structures.
Examples
--------
From the root node of a file given a filepath
>>> import h5py
>>> HDF5Adapter.from_file("path/to/file.h5")
From the root node of a file given an h5py.File object
>>> import h5py
>>> file = h5py.File("path/to/file.h5")
>>> HDF5Adapter.from_file(file)
From a group within a file
>>> import h5py
>>> file = h5py.File("path/to/file.h5")
>>> HDF5Adapter(file["some_group']["some_sub_group"])
"""
structure_family = StructureFamily.container
def __init__(
self, node, *, structure=None, metadata=None, specs=None, access_policy=None
):
self._node = node
self._access_policy = access_policy
self.specs = specs or []
self._provided_metadata = metadata or {}
super().__init__()
@classmethod
def from_file(
cls,
file,
*,
structure=None,
metadata=None,
swmr=SWMR_DEFAULT,
libver="latest",
specs=None,
access_policy=None,
):
if not isinstance(file, h5py.File):
file = h5py.File(file, "r", swmr=swmr, libver=libver)
return cls(file, metadata=metadata, specs=specs, access_policy=access_policy)
def __repr__(self):
return node_repr(self, list(self))
@property
def access_policy(self):
return self._access_policy
def structure(self):
return None
def metadata(self):
d = dict(self._node.attrs)
for k, v in list(d.items()):
# Convert any bytes to str.
if isinstance(v, bytes):
d[k] = v.decode()
d.update(self._provided_metadata)
return d
def __iter__(self):
yield from self._node
def __getitem__(self, key):
value = self._node[key]
if isinstance(value, h5py.Group):
return HDF5Adapter(value)
else:
if value.dtype == numpy.dtype("O"):
warnings.warn(
f"The dataset {key} is of object type, using a "
"Python-only feature of h5py that is not supported by "
"HDF5 in general. Read more about that feature at "
"https://docs.h5py.org/en/stable/special.html. "
"Consider using a fixed-length field instead. "
"Tiled will serve an empty placeholder, unless the "
"object is of size 1, where it will attempt to repackage "
"the data into a numpy array."
)
check_str_dtype = h5py.check_string_dtype(value.dtype)
if check_str_dtype.length is None:
dataset_names = value.file[self._node.name + "/" + key][...][()]
if value.size == 1:
arr = numpy.array(dataset_names)
return from_dataset(arr)
return from_dataset(numpy.array([]))
return from_dataset(value)
def __len__(self):
return len(self._node)
def keys(self):
return KeysView(lambda: len(self), self._keys_slice)
def values(self):
return ValuesView(lambda: len(self), self._items_slice)
def items(self):
return ItemsView(lambda: len(self), self._items_slice)
def search(self, query):
"""
Return a Tree with a subset of the mapping.
"""
raise NotImplementedError
def METHOD_NAME(self, fields=None):
if fields is not None:
raise NotImplementedError
return self
# The following two methods are used by keys(), values(), items().
def _keys_slice(self, start, stop, direction):
keys = list(self._node)
if direction < 0:
keys = reversed(keys)
return keys[start:stop]
def _items_slice(self, start, stop, direction):
items = [(key, self[key]) for key in list(self)]
if direction < 0:
items = reversed(items)
return items[start:stop]
def inlined_contents_enabled(self, depth):
return depth <= INLINED_DEPTH | null |
597 | import imdb
from colorama import Fore, Style
from plugin import plugin, require
from functools import lru_cache
app = imdb.IMDb()
def main(jarvis, movie):
movie_id = search_movie(jarvis, movie)
if movie_id is None:
return None
return get_movie_by_id(movie_id)
@lru_cache(maxsize=50, typed=False)
def search_movie(jarvis, movie, all_results=False):
if movie == '':
jarvis.say("Please add movie name!", Fore.RED)
return None
results = app.search_movie(movie, results=10)
if not results:
jarvis.say("Error: Did not find movie!", Fore.RED)
return None
if not all_results:
first = results[0]
return first.movieID
return results
@lru_cache(maxsize=20, typed=False)
def get_movie_by_id(movie_id):
return app.get_movie(movie_id)
@require(network=True)
@plugin('movie cast')
def movie_cast(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['cast']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie director')
def movie_director(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['director']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie plot')
def movie_plot(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
if 'plot outline' in data:
jarvis.say('Plot outline:', Fore.GREEN)
jarvis.say(data['plot outline'])
jarvis.say('')
if 'plot' in data:
jarvis.say('Plot:', Fore.GREEN)
for d in data['plot']:
jarvis.say(d)
@require(network=True)
@plugin('movie producer')
def METHOD_NAME(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['producers']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie rating')
def movie_rating(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
jarvis.say(str(data['rating']))
@require(network=True)
@plugin('movie year')
def movie_year(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
jarvis.say(str(data['year']))
@require(network=True)
@plugin('movie runtime')
def movie_runtime(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
if 'runtimes' in data:
jarvis.say(str(data['runtimes'][0]) + ' minutes')
else:
jarvis.say("No runtime data present")
@require(network=True)
@plugin('movie countries')
def movie_countries(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['countries']:
jarvis.say(str(d))
@require(network=True)
@plugin('movie genres')
def movie_genres(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['genres']:
jarvis.say(d)
@require(network=True)
@plugin('movie info')
def movie_info(jarvis, movie):
"""
Display table with various information
"""
data = main(jarvis, movie)
if data is not None:
get_movie_info(jarvis, data)
@require(network=True)
@plugin('movie search')
def movie_search(jarvis, movie):
""" search for a movie on IMDB"""
results = search_movie(jarvis, movie, all_results=True)
# if results is None or empty
if not results:
return None
# get only movies from the results, filtering out TV series, etc
movie_results = []
for item in results:
if item['kind'] == 'movie':
movie_results.append(item)
if len(movie_results) > 5:
count = 5
else:
count = len(movie_results)
jarvis.say('')
space = ' '
text = 'ID'
text += space * 3 + 'Movie title'
jarvis.say(text, Fore.GREEN)
for i in range(count):
item = movie_results[i]
text = Fore.GREEN + str(i + 1) + space * 4
text += Fore.RESET + item['smart long imdb canonical title']
jarvis.say(text)
jarvis.say('')
jarvis.say('Please enter ID to know more(q - quit):')
input_id = jarvis.input()
# If nothing is entered, just return
if input_id == '':
return None
if len(input_id) != 1:
return jarvis.say(Fore.RED + 'Please enter valid value')
elif input_id in '123456789':
input_id = int(input_id)
elif input_id == 'q':
return None
# if entered input is out of the given list of ID's
if (int(input_id) > count) or (int(input_id) < 1):
return jarvis.say(Fore.RED + 'Please enter id from the given list')
movie_id = movie_results[input_id - 1].movieID
data = get_movie_by_id(movie_id)
get_movie_info(jarvis, data)
def colorized_output(key, value):
"""
pretty print key value pair
"""
green_text = Fore.GREEN + "{:<14}".format(key)
normal_text = Style.RESET_ALL + ": " + str(value)
return green_text + normal_text
def get_movie_info(jarvis, data):
"""
Takes a movie attributes as input and prints them accordingly
"""
jarvis.say('')
jarvis.say(
'What type of information do you want: cast, producers, genres, etc.?')
jarvis.say('Write one after another separated by space, please:')
movie_attributes = jarvis.input()
movie_attributes = movie_attributes.split()
jarvis.say('')
for attribute in movie_attributes:
if attribute in data:
value = data[attribute]
if attribute == 'genres':
value = ', '.join(value)
if attribute == 'cast':
lst = [person['name'] for person in value]
value = ', '.join(lst[0:3])
if isinstance(value, list):
value = value[0]
jarvis.say(colorized_output(attribute.capitalize(), str(value)))
else:
jarvis.say(
colorized_output(
attribute.capitalize(),
'no information retrieved'))
# print IMDB url of the movie
movie_url = app.urls['movie_base'] + 'tt' + data.movieID
jarvis.say(colorized_output('IMDB url', movie_url))
jarvis.say('') | null |
598 | # -*- coding: utf-8 -*-
"""
Webring plugin for Pelican
==========================
A plugin to create a webring in your web site from a list of web feeds.
"""
import re
from six.moves.urllib.request import Request, urlopen
from six.moves.urllib.error import URLError
from collections import namedtuple
import logging
from operator import attrgetter
from pelican import signals, utils
log = logging.getLogger(__name__)
try:
import feedparser
except ImportError:
log.warning('Webring Plugin: Failed to load dependency (feedparser)')
WEBRING_VERSION = '0.1'
WEBRING_FEED_URLS_STR = 'WEBRING_FEED_URLS'
WEBRING_MAX_ARTICLES_STR = 'WEBRING_MAX_ARTICLES'
WEBRING_ARTICLES_PER_FEED_STR = 'WEBRING_ARTICLES_PER_FEED'
WEBRING_SUMMARY_LENGTH_STR = 'WEBRING_SUMMARY_LENGTH'
WEBRING_CLEAN_SUMMARY_HTML_STR = 'WEBRING_CLEAN_SUMMARY_HTML'
Article = namedtuple(
'Article',
['title', 'link', 'date', 'summary',
'source_title', 'source_link', 'source_id'])
def register():
"""Signal registration."""
signals.initialized.connect(initialized)
signals.all_generators_finalized.connect(fetch_feeds)
def initialized(pelican):
from pelican.settings import DEFAULT_CONFIG
DEFAULT_CONFIG.setdefault(WEBRING_FEED_URLS_STR, [])
DEFAULT_CONFIG.setdefault(WEBRING_MAX_ARTICLES_STR, 3)
DEFAULT_CONFIG.setdefault(WEBRING_ARTICLES_PER_FEED_STR, 1)
DEFAULT_CONFIG.setdefault(WEBRING_SUMMARY_LENGTH_STR, 128)
DEFAULT_CONFIG.setdefault(WEBRING_CLEAN_SUMMARY_HTML_STR, True)
if pelican:
for name, value in DEFAULT_CONFIG.items():
if name.startswith('WEBRING'):
pelican.settings.setdefault(name, value)
def fetch_feeds(generators):
settings = get_pelican_settings(generators)
fetched_articles = []
for feed_url in settings[WEBRING_FEED_URLS_STR]:
feed_html = get_feed_html(feed_url)
if feed_html:
fetched_articles.extend(
get_feed_articles(feed_html, feed_url, settings)
)
fetched_articles = sorted(
fetched_articles, key=attrgetter('date'), reverse=True)
max_articles = settings[WEBRING_MAX_ARTICLES_STR]
if len(fetched_articles) > max_articles:
fetched_articles = fetched_articles[:max_articles]
for generator in generators:
generator.context['webring_articles'] = fetched_articles
def get_pelican_settings(generators):
"""All generators contain a reference to the Pelican settings."""
assert len(generators) > 0
return generators[0].settings
def get_feed_html(feed_url):
try:
req = Request(feed_url)
req.add_header(
'User-Agent',
(
'Webring Pelican plugin/{} '
+ '+https://github.com/pelican/pelican-plugins'
).format(WEBRING_VERSION)
)
return urlopen(req).read().decode('utf-8')
except URLError as e:
if hasattr(e, 'reason'):
log.warning('webring plugin: failed to connect to feed url (%s).',
feed_url, e.reason)
if hasattr(e, 'code'):
log.warning('webring plugin: server returned %s error (%s).',
e.code, feed_url)
except ValueError as e:
log.warning('webring plugin: wrong url provided (%s).', e)
def get_feed_articles(feed_html, feed_url, settings):
parsed_feed = feedparser.parse(feed_html)
if parsed_feed.bozo:
log.warning('webring plugin: possible malformed or invalid feed (%s). '
'Error=%s', feed_url, parsed_feed.bozo_exception)
articles = []
for n, entry in enumerate(parsed_feed.entries):
if n == settings[WEBRING_ARTICLES_PER_FEED_STR]:
break
published_dt = get_entry_datetime(entry)
truncated_summary = METHOD_NAME(entry, settings)
articles.append(
Article(
title=entry.get('title', ''),
link=entry.get('link', ''),
date=published_dt,
summary=truncated_summary,
source_title=parsed_feed.feed.get('title', ''),
source_link=parsed_feed.feed.get('link', ''),
source_id=parsed_feed.feed.get('id', '')))
return articles
def get_entry_datetime(entry):
try:
return utils.get_date(entry.get('published', ''))
except ValueError:
log.warning(
'Webring Plugin: Invalid date on feed entry titled "%s"'
% entry.get('title', 'Unknown title'))
return utils.SafeDatetime.now()
def METHOD_NAME(entry, settings):
# https://stackoverflow.com/a/12982689/11441
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
summary = entry.get('description', '')
# feedparser sanitizes html by default, but it can still contain html tags.
if settings[WEBRING_CLEAN_SUMMARY_HTML_STR]:
summary = cleanhtml(summary)
if len(summary) > settings[WEBRING_SUMMARY_LENGTH_STR]:
words = summary.split()
summary = ''
for w in words:
chars_left = settings[WEBRING_SUMMARY_LENGTH_STR] - len(summary)
if chars_left > 0:
summary += w if chars_left < len(w) else w[:chars_left]
summary += ' '
else:
break
summary = summary[:len(summary)-1] + "..."
return summary | null |
599 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class AddClientUserDefineRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'AddClientUserDefineRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ActionType(self): # Integer
return self.get_query_params().get('ActionType')
def set_ActionType(self, ActionType): # Integer
self.add_query_param('ActionType', ActionType)
def get_NewFilePath(self): # String
return self.get_query_params().get('NewFilePath')
def set_NewFilePath(self, NewFilePath): # String
self.add_query_param('NewFilePath', NewFilePath)
def get_Type(self): # Integer
return self.get_query_params().get('Type')
def set_Type(self, Type): # Integer
self.add_query_param('Type', Type)
def get_Platform(self): # String
return self.get_query_params().get('Platform')
def set_Platform(self, Platform): # String
self.add_query_param('Platform', Platform)
def get_RegistryKey(self): # String
return self.get_query_params().get('RegistryKey')
def set_RegistryKey(self, RegistryKey): # String
self.add_query_param('RegistryKey', RegistryKey)
def get_Cmdline(self): # String
return self.get_query_params().get('Cmdline')
def METHOD_NAME(self, Cmdline): # String
self.add_query_param('Cmdline', Cmdline)
def get_FilePath(self): # String
return self.get_query_params().get('FilePath')
def set_FilePath(self, FilePath): # String
self.add_query_param('FilePath', FilePath)
def get_Md5List(self): # String
return self.get_query_params().get('Md5List')
def set_Md5List(self, Md5List): # String
self.add_query_param('Md5List', Md5List)
def get_ParentProcPath(self): # String
return self.get_query_params().get('ParentProcPath')
def set_ParentProcPath(self, ParentProcPath): # String
self.add_query_param('ParentProcPath', ParentProcPath)
def get_ProcPath(self): # String
return self.get_query_params().get('ProcPath')
def set_ProcPath(self, ProcPath): # String
self.add_query_param('ProcPath', ProcPath)
def get_ParentCmdline(self): # String
return self.get_query_params().get('ParentCmdline')
def set_ParentCmdline(self, ParentCmdline): # String
self.add_query_param('ParentCmdline', ParentCmdline)
def get_IP(self): # String
return self.get_query_params().get('IP')
def set_IP(self, IP): # String
self.add_query_param('IP', IP)
def get_RegistryContent(self): # String
return self.get_query_params().get('RegistryContent')
def set_RegistryContent(self, RegistryContent): # String
self.add_query_param('RegistryContent', RegistryContent)
def get_PortStr(self): # String
return self.get_query_params().get('PortStr')
def set_PortStr(self, PortStr): # String
self.add_query_param('PortStr', PortStr)
def get_Port(self): # Integer
return self.get_query_params().get('Port')
def set_Port(self, Port): # Integer
self.add_query_param('Port', Port)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name) | null |