id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
1,000 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class ModifyBackupPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifyBackupPolicy')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_LocalLogRetentionHours(self): # String
return self.get_query_params().get('LocalLogRetentionHours')
def set_LocalLogRetentionHours(self, LocalLogRetentionHours): # String
self.add_query_param('LocalLogRetentionHours', LocalLogRetentionHours)
def get_BackupPriority(self): # Integer
return self.get_query_params().get('BackupPriority')
def set_BackupPriority(self, BackupPriority): # Integer
self.add_query_param('BackupPriority', BackupPriority)
def get_LogBackupFrequency(self): # String
return self.get_query_params().get('LogBackupFrequency')
def set_LogBackupFrequency(self, LogBackupFrequency): # String
self.add_query_param('LogBackupFrequency', LogBackupFrequency)
def get_ArchiveBackupKeepCount(self): # Integer
return self.get_query_params().get('ArchiveBackupKeepCount')
def set_ArchiveBackupKeepCount(self, ArchiveBackupKeepCount): # Integer
self.add_query_param('ArchiveBackupKeepCount', ArchiveBackupKeepCount)
def get_BackupLog(self): # String
return self.get_query_params().get('BackupLog')
def set_BackupLog(self, BackupLog): # String
self.add_query_param('BackupLog', BackupLog)
def get_BackupInterval(self): # String
return self.get_query_params().get('BackupInterval')
def set_BackupInterval(self, BackupInterval): # String
self.add_query_param('BackupInterval', BackupInterval)
def get_HighSpaceUsageProtection(self): # String
return self.get_query_params().get('HighSpaceUsageProtection')
def METHOD_NAME(self, HighSpaceUsageProtection): # String
self.add_query_param('HighSpaceUsageProtection', HighSpaceUsageProtection)
def get_LogBackupLocalRetentionNumber(self): # Integer
return self.get_query_params().get('LogBackupLocalRetentionNumber')
def set_LogBackupLocalRetentionNumber(self, LogBackupLocalRetentionNumber): # Integer
self.add_query_param('LogBackupLocalRetentionNumber', LogBackupLocalRetentionNumber)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_EnableBackupLog(self): # String
return self.get_query_params().get('EnableBackupLog')
def set_EnableBackupLog(self, EnableBackupLog): # String
self.add_query_param('EnableBackupLog', EnableBackupLog)
def get_BackupPolicyMode(self): # String
return self.get_query_params().get('BackupPolicyMode')
def set_BackupPolicyMode(self, BackupPolicyMode): # String
self.add_query_param('BackupPolicyMode', BackupPolicyMode)
def get_PreferredBackupPeriod(self): # String
return self.get_query_params().get('PreferredBackupPeriod')
def set_PreferredBackupPeriod(self, PreferredBackupPeriod): # String
self.add_query_param('PreferredBackupPeriod', PreferredBackupPeriod)
def get_EnableIncrementDataBackup(self): # Boolean
return self.get_query_params().get('EnableIncrementDataBackup')
def set_EnableIncrementDataBackup(self, EnableIncrementDataBackup): # Boolean
self.add_query_param('EnableIncrementDataBackup', EnableIncrementDataBackup)
def get_ReleasedKeepPolicy(self): # String
return self.get_query_params().get('ReleasedKeepPolicy')
def set_ReleasedKeepPolicy(self, ReleasedKeepPolicy): # String
self.add_query_param('ReleasedKeepPolicy', ReleasedKeepPolicy)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_CompressType(self): # String
return self.get_query_params().get('CompressType')
def set_CompressType(self, CompressType): # String
self.add_query_param('CompressType', CompressType)
def get_LocalLogRetentionSpace(self): # String
return self.get_query_params().get('LocalLogRetentionSpace')
def set_LocalLogRetentionSpace(self, LocalLogRetentionSpace): # String
self.add_query_param('LocalLogRetentionSpace', LocalLogRetentionSpace)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ArchiveBackupKeepPolicy(self): # String
return self.get_query_params().get('ArchiveBackupKeepPolicy')
def set_ArchiveBackupKeepPolicy(self, ArchiveBackupKeepPolicy): # String
self.add_query_param('ArchiveBackupKeepPolicy', ArchiveBackupKeepPolicy)
def get_PreferredBackupTime(self): # String
return self.get_query_params().get('PreferredBackupTime')
def set_PreferredBackupTime(self, PreferredBackupTime): # String
self.add_query_param('PreferredBackupTime', PreferredBackupTime)
def get_BackupRetentionPeriod(self): # String
return self.get_query_params().get('BackupRetentionPeriod')
def set_BackupRetentionPeriod(self, BackupRetentionPeriod): # String
self.add_query_param('BackupRetentionPeriod', BackupRetentionPeriod)
def get_BackupMethod(self): # String
return self.get_query_params().get('BackupMethod')
def set_BackupMethod(self, BackupMethod): # String
self.add_query_param('BackupMethod', BackupMethod)
def get_ArchiveBackupRetentionPeriod(self): # String
return self.get_query_params().get('ArchiveBackupRetentionPeriod')
def set_ArchiveBackupRetentionPeriod(self, ArchiveBackupRetentionPeriod): # String
self.add_query_param('ArchiveBackupRetentionPeriod', ArchiveBackupRetentionPeriod)
def get_Category(self): # String
return self.get_query_params().get('Category')
def set_Category(self, Category): # String
self.add_query_param('Category', Category)
def get_LogBackupRetentionPeriod(self): # String
return self.get_query_params().get('LogBackupRetentionPeriod')
def set_LogBackupRetentionPeriod(self, LogBackupRetentionPeriod): # String
self.add_query_param('LogBackupRetentionPeriod', LogBackupRetentionPeriod) | null |
1,001 | import json
import logging
import math
from typing import Optional
import packaging.version
from galaxy.tool_util.cwl.parser import (
tool_proxy,
ToolProxy,
)
from galaxy.tool_util.deps import requirements
from .interface import (
PageSource,
PagesSource,
ToolSource,
)
from .output_actions import ToolOutputActionGroup
from .output_objects import ToolOutput
from .stdio import (
StdioErrorLevel,
ToolStdioExitCode,
)
from .yaml import YamlInputSource
log = logging.getLogger(__name__)
class CwlToolSource(ToolSource):
language = "yaml"
def __init__(self, tool_file=None, strict_cwl_validation=True, tool_proxy: Optional[ToolProxy] = None):
self._cwl_tool_file = tool_file
self._tool_proxy = tool_proxy
self._source_path = tool_file
self._strict_cwl_validation = strict_cwl_validation
@property
def source_path(self):
return self._source_path
@property
def tool_proxy(self) -> ToolProxy:
if self._tool_proxy is None:
self._tool_proxy = tool_proxy(self._source_path, strict_cwl_validation=self._strict_cwl_validation)
return self._tool_proxy
def parse_tool_type(self):
return "cwl"
def parse_id(self):
return self.tool_proxy.galaxy_id()
def parse_name(self):
return self.tool_proxy.label() or self.parse_id()
def parse_command(self):
return "$__cwl_command"
def parse_environment_variables(self):
environment_variables = []
# TODO: Is this even possible from here, should instead this be moved
# into the job.
# for environment_variable_el in environment_variables_el.findall("environment_variable"):
# definition = {
# "name": environment_variable_el.get("name"),
# "template": environment_variable_el.text,
# }
# environment_variables.append(
# definition
# )
return environment_variables
def parse_edam_operations(self):
return []
def METHOD_NAME(self):
return []
def parse_help(self):
return self.tool_proxy.doc()
def parse_sanitize(self):
return False
def parse_strict_shell(self):
return True
def parse_stdio(self):
# TODO: remove duplication with YAML
# New format - starting out just using exit code.
exit_code_lower = ToolStdioExitCode()
exit_code_lower.range_start = -math.inf
exit_code_lower.range_end = -1
exit_code_lower.error_level = StdioErrorLevel.FATAL
exit_code_high = ToolStdioExitCode()
exit_code_high.range_start = 1
exit_code_high.range_end = math.inf
exit_code_lower.error_level = StdioErrorLevel.FATAL
return [exit_code_lower, exit_code_high], []
def parse_interpreter(self):
return None
def parse_version(self):
return "0.0.1"
def parse_description(self):
return self.tool_proxy.description()
def parse_interactivetool(self):
return []
def parse_input_pages(self):
page_source = CwlPageSource(self.tool_proxy)
return PagesSource([page_source])
def parse_outputs(self, tool):
output_instances = self.tool_proxy.output_instances()
outputs = {}
output_defs = []
for output_instance in output_instances:
output_defs.append(self._parse_output(tool, output_instance))
# TODO: parse outputs collections
for output_def in output_defs:
outputs[output_def.name] = output_def
return outputs, {}
def _parse_output(self, tool, output_instance):
name = output_instance.name
# TODO: handle filters, actions, change_format
output = ToolOutput(name)
if "File" in output_instance.output_data_type:
output.format = "_sniff_"
else:
output.format = "expression.json"
output.change_format = []
output.format_source = None
output.metadata_source = ""
output.parent = None
output.label = None
output.count = None
output.filters = []
output.tool = tool
output.hidden = ""
output.dataset_collector_descriptions = []
output.actions = ToolOutputActionGroup(output, None)
return output
def parse_requirements_and_containers(self):
containers = []
docker_identifier = self.tool_proxy.docker_identifier()
if docker_identifier:
containers.append({"type": "docker", "identifier": docker_identifier})
software_requirements = self.tool_proxy.software_requirements()
resource_requirements = self.tool_proxy.resource_requirements()
return requirements.parse_requirements_from_lists(
software_requirements=[{"name": r[0], "version": r[1], "type": "package"} for r in software_requirements],
containers=containers,
resource_requirements=resource_requirements,
)
def parse_profile(self):
return "17.09"
def parse_xrefs(self):
return []
def parse_license(self):
return None
def parse_python_template_version(self):
return packaging.version.Version("3.5")
def to_string(self):
return json.dumps(self.tool_proxy.to_persistent_representation())
class CwlPageSource(PageSource):
def __init__(self, tool_proxy):
cwl_instances = tool_proxy.input_instances()
self._input_list = list(map(self._to_input_source, cwl_instances))
def _to_input_source(self, input_instance):
as_dict = input_instance.to_dict()
return YamlInputSource(as_dict)
def parse_input_sources(self):
return self._input_list
__all__ = (
"CwlToolSource",
"tool_proxy",
) | null |
1,002 | from __future__ import annotations
from typing_extensions import Annotated
from typing import TYPE_CHECKING, Optional
from .utils.translator import translate
from discord.ext import commands
import discord
import io
if TYPE_CHECKING:
from bot import RoboDanny
from .utils.context import Context
GUILD_ID = 81883016288276480
VOICE_ROOM_ID = 633466718035116052
GENERAL_VOICE_ID = 81883016309248000
class Funhouse(commands.Cog):
def __init__(self, bot: RoboDanny):
self.bot: RoboDanny = bot
@property
def METHOD_NAME(self) -> discord.PartialEmoji:
return discord.PartialEmoji(name='\N{MAPLE LEAF}')
def is_outside_voice(self, state: discord.VoiceState) -> bool:
return state.channel is None or state.channel.id != GENERAL_VOICE_ID
def is_inside_voice(self, state: discord.VoiceState) -> bool:
return state.channel is not None and state.channel.id == GENERAL_VOICE_ID
@commands.Cog.listener()
async def on_voice_state_update(self, member: discord.Member, before: discord.VoiceState, after: discord.VoiceState):
if member.guild.id != GUILD_ID:
return
voice_room: Optional[discord.TextChannel] = member.guild.get_channel(VOICE_ROOM_ID) # type: ignore
if voice_room is None:
return
if self.is_outside_voice(before) and self.is_inside_voice(after):
# joined a channel
await voice_room.set_permissions(member, read_messages=True)
elif self.is_outside_voice(after) and self.is_inside_voice(before):
# left the channel
await voice_room.set_permissions(member, read_messages=None)
@commands.command(hidden=True)
async def cat(self, ctx: Context):
"""Gives you a random cat."""
async with ctx.session.get('https://api.thecatapi.com/v1/images/search') as resp:
if resp.status != 200:
return await ctx.send('No cat found :(')
js = await resp.json()
await ctx.send(embed=discord.Embed(title='Random Cat').set_image(url=js[0]['url']))
@commands.command(hidden=True)
async def dog(self, ctx: Context):
"""Gives you a random dog."""
async with ctx.session.get('https://random.dog/woof') as resp:
if resp.status != 200:
return await ctx.send('No dog found :(')
filename = await resp.text()
url = f'https://random.dog/{filename}'
filesize = ctx.guild.filesize_limit if ctx.guild else 8388608
if filename.endswith(('.mp4', '.webm')):
async with ctx.typing():
async with ctx.session.get(url) as other:
if other.status != 200:
return await ctx.send('Could not download dog video :(')
if int(other.headers['Content-Length']) >= filesize:
return await ctx.send(f'Video was too big to upload... See it here: {url} instead.')
fp = io.BytesIO(await other.read())
await ctx.send(file=discord.File(fp, filename=filename))
else:
await ctx.send(embed=discord.Embed(title='Random Dog').set_image(url=url))
@commands.command(hidden=True)
async def translate(self, ctx: Context, *, message: Annotated[Optional[str], commands.clean_content] = None):
"""Translates a message to English using Google translate."""
loop = self.bot.loop
if message is None:
reply = ctx.replied_message
if reply is not None:
message = reply.content
else:
return await ctx.send('Missing a message to translate')
try:
result = await translate(message, session=self.bot.session)
except Exception as e:
return await ctx.send(f'An error occurred: {e.__class__.__name__}: {e}')
embed = discord.Embed(title='Translated', colour=0x4284F3)
embed.add_field(name=f'From {result.source_language}', value=result.original, inline=False)
embed.add_field(name=f'To {result.target_language}', value=result.translated, inline=False)
await ctx.send(embed=embed)
async def setup(bot: RoboDanny):
await bot.add_cog(Funhouse(bot)) | null |
1,003 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateImage','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DiskDeviceMappings(self): # RepeatList
return self.get_query_params().get('DiskDeviceMapping')
def set_DiskDeviceMappings(self, DiskDeviceMapping): # RepeatList
for depth1 in range(len(DiskDeviceMapping)):
if DiskDeviceMapping[depth1].get('SnapshotId') is not None:
self.add_query_param('DiskDeviceMapping.' + str(depth1 + 1) + '.SnapshotId', DiskDeviceMapping[depth1].get('SnapshotId'))
if DiskDeviceMapping[depth1].get('Size') is not None:
self.add_query_param('DiskDeviceMapping.' + str(depth1 + 1) + '.Size', DiskDeviceMapping[depth1].get('Size'))
if DiskDeviceMapping[depth1].get('DiskType') is not None:
self.add_query_param('DiskDeviceMapping.' + str(depth1 + 1) + '.DiskType', DiskDeviceMapping[depth1].get('DiskType'))
if DiskDeviceMapping[depth1].get('Device') is not None:
self.add_query_param('DiskDeviceMapping.' + str(depth1 + 1) + '.Device', DiskDeviceMapping[depth1].get('Device'))
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SnapshotId(self): # String
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self, SnapshotId): # String
self.add_query_param('SnapshotId', SnapshotId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Platform(self): # String
return self.get_query_params().get('Platform')
def set_Platform(self, Platform): # String
self.add_query_param('Platform', Platform)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_BootMode(self): # String
return self.get_query_params().get('BootMode')
def set_BootMode(self, BootMode): # String
self.add_query_param('BootMode', BootMode)
def get_ImageName(self): # String
return self.get_query_params().get('ImageName')
def set_ImageName(self, ImageName): # String
self.add_query_param('ImageName', ImageName)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_Architecture(self): # String
return self.get_query_params().get('Architecture')
def set_Architecture(self, Architecture): # String
self.add_query_param('Architecture', Architecture)
def get_DetectionStrategy(self): # String
return self.get_query_params().get('DetectionStrategy')
def set_DetectionStrategy(self, DetectionStrategy): # String
self.add_query_param('DetectionStrategy', DetectionStrategy)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def METHOD_NAME(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_ImageFamily(self): # String
return self.get_query_params().get('ImageFamily')
def set_ImageFamily(self, ImageFamily): # String
self.add_query_param('ImageFamily', ImageFamily)
def get_ImageVersion(self): # String
return self.get_query_params().get('ImageVersion')
def set_ImageVersion(self, ImageVersion): # String
self.add_query_param('ImageVersion', ImageVersion) | null |
1,004 | ################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2017-2022 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from io import StringIO
from django import template
from django.utils.translation import gettext as _
from creme.creme_core.auth.entity_credentials import EntityCredentials
from creme.creme_core.models import FieldsConfig
from creme.persons import get_organisation_model
from creme.persons.constants import REL_SUB_EMPLOYED_BY, REL_SUB_MANAGES
register = template.Library()
# TODO: a code per country ?
@register.filter
def persons_pretty_address(address):
is_field_hidden = FieldsConfig.objects.get_for_model(address.__class__).is_fieldname_hidden
with StringIO() as sio:
write = sio.write
addr = '' if is_field_hidden('address') else address.address
if addr:
write(addr)
po_box = '' if is_field_hidden('po_box') else address.po_box
if po_box:
if sio.tell():
write('\n')
write(po_box)
zipcode = '' if is_field_hidden('zipcode') else address.zipcode
city = '' if is_field_hidden('city') else address.city
if zipcode or city:
if sio.tell():
write('\n')
if not zipcode:
write(city)
elif not city:
write(zipcode)
else:
write(zipcode)
write(' ')
write(city)
return sio.getvalue()
@register.filter
def persons_pretty_contact(contact):
civ = contact.civility
last_name = contact.last_name.upper()
if civ and civ.shortcut:
return _('{civility} {first_name} {last_name}').format(
civility=civ.shortcut,
first_name=contact.first_name,
last_name=last_name,
)
if contact.first_name:
return _('{first_name} {last_name}').format(
first_name=contact.first_name,
last_name=last_name,
)
return last_name or ''
# NB: only used in opportunities?
@register.simple_tag
def persons_contact_first_employer(contact, user):
info = {}
managed_ids = []
employing_ids = []
for rtype_id, orga_id in contact.relations.filter(
type__in=(REL_SUB_EMPLOYED_BY, REL_SUB_MANAGES),
).values_list('type', 'object_entity'):
if rtype_id == REL_SUB_MANAGES:
managed_ids.append(orga_id)
else:
employing_ids.append(orga_id)
if managed_ids:
orga = EntityCredentials.filter(
user,
get_organisation_model().objects.filter(id__in=managed_ids, is_deleted=False),
).first()
if orga:
info['organisation'] = orga
info['as_manager'] = True
if not info and employing_ids:
orga = EntityCredentials.filter(
user,
get_organisation_model().objects.filter(id__in=employing_ids, is_deleted=False),
).first()
if orga:
info['organisation'] = orga
info['as_manager'] = False
return info
@register.simple_tag
def METHOD_NAME(form, address_fks, zip_fields=True):
if not address_fks:
return None
meta = []
grouped_fields = []
# NB: we expect that AddressesGroup injects corresponding fields in the
# same order (e.g. "city" as first for billing & shipping, then "zipcode"...)
for fk in address_fks:
prefix = f'{fk.name}-'
meta.append({
'title': fk.verbose_name,
# 'prefix': prefix,
'prefix': fk.name, # NB: JQuery |= filter already adds a hyphen
})
grouped_fields.append(
[field for field in form if field.name.startswith(prefix)]
)
return {
'grouped_meta': meta,
'grouped_fields': [*zip(*grouped_fields)] if zip_fields else grouped_fields,
} | null |
1,005 | import pytest
from math import sqrt
from otx.algorithms.common.adapters.mmcv.utils import automatic_bs
from otx.algorithms.common.adapters.mmcv.utils import adapt_batch_size
from otx.algorithms.common.adapters.mmcv.utils.automatic_bs import SubDataset
DEFAULT_BS = 8
DEFAULT_LR = 0.001
TRAINSET_SIZE = 100
class MockBsSearchAlgo:
def __init__(self, train_func, default_bs: int, max_bs: int):
self.train_func = train_func
self.default_bs = default_bs
self.max_bs = max_bs
def auto_decrease_batch_size(self):
self.train_func(self.default_bs)
self.train_func(self.default_bs // 2)
return self.default_bs // 2
def find_big_enough_batch_size(self, drop_last: bool):
self.train_func(self.default_bs)
self.train_func(self.default_bs + 2)
return self.default_bs + 2
@pytest.fixture
def mock_adapt_algo_cls(mocker):
return mocker.patch.object(automatic_bs, "BsSearchAlgo", side_effect=MockBsSearchAlgo)
@pytest.fixture
def common_cfg(mocker):
mock_cfg = mocker.MagicMock()
mock_cfg.runner = {"type": "EpochRunnerWithCancel", "max_epochs": 100}
mock_cfg.custom_hooks = [
{"type": "AdaptiveTrainSchedulingHook", "enable_eval_before_run": True},
{"type": "OTXProgressHook"},
]
mock_cfg.optimizer.lr = DEFAULT_LR
return mock_cfg
def set_mock_cfg_not_action(common_cfg):
common_cfg.data.train_dataloader = {"samples_per_gpu": DEFAULT_BS}
return common_cfg
def set_mock_cfg_action(common_cfg):
common_cfg.data.videos_per_gpu = DEFAULT_BS
common_cfg.domain = "ACTION_CLASSIFICATION"
return common_cfg
@pytest.fixture
def mock_dataset(mocker):
mock_ds = [mocker.MagicMock()]
mock_ds[0].__len__.return_value = TRAINSET_SIZE
return mock_ds
@pytest.mark.parametrize("not_increase", [True, False])
@pytest.mark.parametrize("is_action_task", [True, False])
@pytest.mark.parametrize("is_iter_based_runner", [True, False])
def test_adapt_batch_size(
mocker, mock_adapt_algo_cls, common_cfg, mock_dataset, not_increase, is_action_task, is_iter_based_runner
):
# prepare
mock_train_func = mocker.MagicMock()
new_bs = DEFAULT_BS // 2 if not_increase else DEFAULT_BS + 2
max_eph_name = "max_epochs"
if is_iter_based_runner:
common_cfg.runner = {"type": "IterBasedRunnerWithCancel", "max_iters": 100}
max_eph_name = "max_iters"
if is_action_task:
mock_config = set_mock_cfg_action(common_cfg)
else:
mock_config = set_mock_cfg_not_action(common_cfg)
# execute
adapt_batch_size(mock_train_func, mock_config, mock_dataset, False, not_increase)
# check adapted batch size is applied
if is_action_task:
assert mock_config.data.videos_per_gpu == new_bs
else:
assert mock_config.data.train_dataloader["samples_per_gpu"] == new_bs
# check leanring rate is updated depending on adapted batch size
bs_change_ratio = new_bs / DEFAULT_BS
assert mock_config.optimizer.lr == pytest.approx(DEFAULT_LR * sqrt(bs_change_ratio))
# check adapt function gets proper arguments
assert mock_adapt_algo_cls.call_args.kwargs["default_bs"] == DEFAULT_BS
assert mock_adapt_algo_cls.call_args.kwargs["max_bs"] == TRAINSET_SIZE
# check length of dataset is decreased to reduce time
assert len(mock_train_func.call_args_list[0].kwargs["dataset"][0]) == DEFAULT_BS
assert len(mock_train_func.call_args_list[1].kwargs["dataset"][0]) == new_bs
# check max epoch is set as 1 to reduce time
assert mock_train_func.call_args_list[0].kwargs["cfg"].runner[max_eph_name] == 1
assert mock_train_func.call_args_list[1].kwargs["cfg"].runner[max_eph_name] == 1
# check eval before run is disabled to reduce time
assert not mock_train_func.call_args_list[0].kwargs["cfg"].custom_hooks[0]["enable_eval_before_run"]
assert not mock_train_func.call_args_list[1].kwargs["cfg"].custom_hooks[0]["enable_eval_before_run"]
# check OTXProgressHook is removed
assert len(mock_train_func.call_args_list[0].kwargs["cfg"].custom_hooks) == 1
def METHOD_NAME(mocker, common_cfg, mock_dataset):
# prepare
mock_train_func = mocker.MagicMock()
mock_config = set_mock_cfg_not_action(common_cfg)
mocker.patch.object(automatic_bs, "cuda_available", return_value=False)
# execute
adapt_batch_size(mock_train_func, mock_config, mock_dataset, False, True)
# check train function ins't called.
mock_train_func.assert_not_called()
class TestSubDataset:
@pytest.fixture(autouse=True)
def set_up(self, mocker):
self.num_samples = 3
self.fullset = mocker.MagicMock()
self.sub_dataset = SubDataset(self.fullset, self.num_samples)
def test_init(self, mocker):
fullset = mocker.MagicMock()
subset = SubDataset(fullset, 3)
# test for class incremental case. If below assert can't be passed, ClsIncrSampler can't work well.
assert len(subset.img_indices["new"]) / len(subset.img_indices["old"]) + 1 <= self.num_samples
@pytest.mark.parametrize("num_samples", [-1, 0])
def test_init_w_wrong_num_samples(self, mocker, num_samples):
fullset = mocker.MagicMock()
with pytest.raises(ValueError):
SubDataset(fullset, num_samples)
def test_len(self):
assert len(self.sub_dataset) == self.num_samples
def test_getitem(self):
self.sub_dataset[0]
self.fullset.__getitem__.assert_called_once_with(0)
def test_getattr(self):
self.fullset.data = "data"
assert self.sub_dataset.data == "data"
def test_flag(self):
assert len(self.sub_dataset.flag) == self.num_samples | null |
1,006 | # coding=utf-8
# **************************************************************************
# *
# * Authors: Estrella Fernandez Gimenez ([email protected])
# * Carlos Oscar Sanchez Sorzano
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia, CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address '[email protected]'
# *
# **************************************************************************
import numpy as np
from pyworkflow import VERSION_2_0
from pyworkflow.protocol.params import PointerParam
from pwem import emlib
from pwem.protocols import ProtAnalysis3D
import pwem.emlib.metadata as md
from xmipp3.convert import readSetOfMicrographs, writeSetOfMicrographs
class XmippProtAnalyzeLocalCTF(ProtAnalysis3D):
"""Assigns to each micrograph a coefficient (R2) which evaluates the result of the
local defocus adjustment and displays the local defocus for all the particles in each micrograph."""
_label = 'analyze local defocus'
_lastUpdateVersion = VERSION_2_0
def __init__(self, **args):
ProtAnalysis3D.__init__(self, **args)
# --------------------------- DEFINE param functions --------------------------------------------
def _defineParams(self, form):
form.addSection(label='Input')
form.addParam('inputMics', PointerParam, label="Input micrographs",
pointerClass='SetOfMicrographs')
form.addParam('inputSet', PointerParam, label="Input images",
pointerClass='SetOfParticles', help="Set of particles with assigned local defocus")
# --------------------------- INSERT steps functions --------------------------------------------
def _insertAllSteps(self):
self._insertFunctionStep("analyzeDefocus")
self._insertFunctionStep("createOutputStep")
# --------------------------- STEPS functions ---------------------------------------------------
def analyzeDefocus(self):
"""compute R2 coefficient of each micrograph and prepare data to be later displayed in the viewer as a 3D
representation of the distribution of particles in the micrograph"""
micIds = []
particleIds = []
x = []
y = []
defocusU = []
defocusV = []
for particle in self.inputSet.get():
micIds.append(particle.getMicId())
particleIds.append(particle.getObjId())
xi, yi = particle.getCoordinate().getPosition()
x.append(xi)
y.append(yi)
defocusU.append(particle.getCTF().getDefocusU())
defocusV.append(particle.getCTF().getDefocusV())
uniqueMicIds = list(set(micIds))
self.R2 = {}
md = emlib.MetaData()
for micId in uniqueMicIds:
idx = [i for i, j in enumerate(micIds) if j == micId]
defocusUbyId = []
defocusVbyId = []
meanDefocusbyId = []
xbyId = []
ybyId = []
particleIdsbyMicId = []
for idxi in idx:
defocusUbyId.append(defocusU[idxi])
defocusVbyId.append(defocusV[idxi])
meanDefocus = (defocusU[idxi]+defocusV[idxi])/2
meanDefocusbyId.append(meanDefocus)
xbyId.append(x[idxi])
ybyId.append(y[idxi])
particleIdsbyMicId.append(particleIds[idxi])
# defocus = c*y + b*x + a = A * X; A=[x(i),y(i)]
A = np.column_stack([np.ones(len(xbyId)), xbyId, ybyId])
polynomial, _, _, _ = np.linalg.lstsq(A, meanDefocusbyId, rcond=None)
residuals = 0
for Ai, bi in zip(A, meanDefocusbyId):
residuali = bi - (Ai[0]*polynomial[0] + Ai[1]*polynomial[1] + Ai[2]*polynomial[2])
residuals += residuali*residuali
meanDefocusbyIdArray = np.asarray(meanDefocusbyId)
coefficients = np.asarray(polynomial)
den = sum((meanDefocusbyIdArray - meanDefocusbyIdArray.mean()) ** 2)
if den == 0:
self.R2[micId] = 0
else:
self.R2[micId] = 1 - residuals / den
mdBlock = emlib.MetaData()
for xi, yi, deltafi, parti in zip(xbyId, ybyId, meanDefocusbyId, particleIdsbyMicId):
objId = mdBlock.addObject()
mdBlock.setValue(emlib.MDL_ITEM_ID, parti, objId)
mdBlock.setValue(emlib.MDL_XCOOR, xi, objId)
mdBlock.setValue(emlib.MDL_YCOOR, yi, objId)
mdBlock.setValue(emlib.MDL_CTF_DEFOCUSA, deltafi, objId)
estimatedVal = coefficients[2]*yi + coefficients[1]*xi + coefficients[0]
residuali = deltafi - estimatedVal
mdBlock.setValue(emlib.MDL_CTF_DEFOCUS_RESIDUAL, residuali, objId)
mdBlock.write("mic_%d@%s" % (micId, self._getExtraPath("micrographDefoci.xmd")), emlib.MD_APPEND)
objId = md.addObject()
md.setValue(emlib.MDL_CTF_DEFOCUS_COEFS, coefficients.tolist(), objId)
md.write(self._getExtraPath("micrographCoef.xmd"), emlib.MD_APPEND)
def createOutputStep(self):
"""create as output a setOfParticles and add the columns of corresponding computed metadata"""
inputMicSet = self.inputMics.get()
fnMics = self._getExtraPath('input_mics.xmd')
writeSetOfMicrographs(inputMicSet, fnMics)
mdMics = md.MetaData(fnMics)
for objId in mdMics:
micId = mdMics.getValue(emlib.MDL_ITEM_ID, objId)
if micId in self.R2:
micR2 = float(self.R2[micId])
mdMics.setValue(emlib.MDL_CTF_DEFOCUS_R2, micR2, objId)
mdMics.write(fnMics)
outputSet = self._createSetOfMicrographs()
outputSet.copyInfo(inputMicSet)
readSetOfMicrographs(fnMics, outputSet, extraLabels=[emlib.MDL_CTF_DEFOCUS_R2])
self._defineOutputs(outputMicrographs=outputSet)
self._defineSourceRelation(self.inputSet, outputSet)
self._defineSourceRelation(inputMicSet, outputSet)
# --------------------------- INFO functions --------------------------------------------
def _summary(self):
summary = []
summary.append("Local defocus analyzed for %i particles" % self.inputSet.get().getSize())
return summary
def METHOD_NAME(self):
methods = []
methods.append("The results obtained when local CTF is calculated are analyzed here. The adjust coefficients, "
"residues and R2 are calculated for each micrograph.")
return methods | null |
1,007 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkclickhouse.endpoint import endpoint_data
class CreateDBInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'clickhouse', '2019-11-11', 'CreateDBInstance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DBClusterDescription(self): # String
return self.get_query_params().get('DBClusterDescription')
def set_DBClusterDescription(self, DBClusterDescription): # String
self.add_query_param('DBClusterDescription', DBClusterDescription)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_SourceDBClusterId(self): # String
return self.get_query_params().get('SourceDBClusterId')
def set_SourceDBClusterId(self, SourceDBClusterId): # String
self.add_query_param('SourceDBClusterId', SourceDBClusterId)
def get_DbNodeStorageType(self): # String
return self.get_query_params().get('DbNodeStorageType')
def set_DbNodeStorageType(self, DbNodeStorageType): # String
self.add_query_param('DbNodeStorageType', DbNodeStorageType)
def get_DBClusterCategory(self): # String
return self.get_query_params().get('DBClusterCategory')
def set_DBClusterCategory(self, DBClusterCategory): # String
self.add_query_param('DBClusterCategory', DBClusterCategory)
def get_EncryptionType(self): # String
return self.get_query_params().get('EncryptionType')
def set_EncryptionType(self, EncryptionType): # String
self.add_query_param('EncryptionType', EncryptionType)
def get_DBClusterNetworkType(self): # String
return self.get_query_params().get('DBClusterNetworkType')
def set_DBClusterNetworkType(self, DBClusterNetworkType): # String
self.add_query_param('DBClusterNetworkType', DBClusterNetworkType)
def get_Period(self): # String
return self.get_query_params().get('Period')
def set_Period(self, Period): # String
self.add_query_param('Period', Period)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_DBClusterVersion(self): # String
return self.get_query_params().get('DBClusterVersion')
def set_DBClusterVersion(self, DBClusterVersion): # String
self.add_query_param('DBClusterVersion', DBClusterVersion)
def get_DBClusterClass(self): # String
return self.get_query_params().get('DBClusterClass')
def set_DBClusterClass(self, DBClusterClass): # String
self.add_query_param('DBClusterClass', DBClusterClass)
def get_BackupSetID(self): # String
return self.get_query_params().get('BackupSetID')
def set_BackupSetID(self, BackupSetID): # String
self.add_query_param('BackupSetID', BackupSetID)
def get_EncryptionKey(self): # String
return self.get_query_params().get('EncryptionKey')
def set_EncryptionKey(self, EncryptionKey): # String
self.add_query_param('EncryptionKey', EncryptionKey)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBNodeGroupCount(self): # String
return self.get_query_params().get('DBNodeGroupCount')
def set_DBNodeGroupCount(self, DBNodeGroupCount): # String
self.add_query_param('DBNodeGroupCount', DBNodeGroupCount)
def get_UsedTime(self): # String
return self.get_query_params().get('UsedTime')
def set_UsedTime(self, UsedTime): # String
self.add_query_param('UsedTime', UsedTime)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_DBNodeStorage(self): # String
return self.get_query_params().get('DBNodeStorage')
def set_DBNodeStorage(self, DBNodeStorage): # String
self.add_query_param('DBNodeStorage', DBNodeStorage)
def get_VPCId(self): # String
return self.get_query_params().get('VPCId')
def set_VPCId(self, VPCId): # String
self.add_query_param('VPCId', VPCId)
def METHOD_NAME(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType) | null |
1,008 | # Support for a manual controlled stepper
#
# Copyright (C) 2019-2021 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import stepper, chelper
from . import force_move
class ManualStepper:
def __init__(self, config):
self.printer = config.get_printer()
if config.get('endstop_pin', None) is not None:
self.can_home = True
self.rail = stepper.PrinterRail(
config, need_position_minmax=False, default_position_endstop=0.)
self.steppers = self.rail.get_steppers()
else:
self.can_home = False
self.rail = stepper.PrinterStepper(config)
self.steppers = [self.rail]
self.velocity = config.getfloat('velocity', 5., above=0.)
self.accel = self.homing_accel = config.getfloat('accel', 0., minval=0.)
self.next_cmd_time = 0.
# Setup iterative solver
ffi_main, ffi_lib = chelper.get_ffi()
self.trapq = ffi_main.gc(ffi_lib.trapq_alloc(), ffi_lib.trapq_free)
self.trapq_append = ffi_lib.trapq_append
self.trapq_finalize_moves = ffi_lib.trapq_finalize_moves
self.rail.setup_itersolve('cartesian_stepper_alloc', b'x')
self.rail.set_trapq(self.trapq)
# Register commands
stepper_name = config.get_name().split()[1]
gcode = self.printer.lookup_object('gcode')
gcode.register_mux_command('MANUAL_STEPPER', "STEPPER",
stepper_name, self.cmd_MANUAL_STEPPER,
desc=self.cmd_MANUAL_STEPPER_help)
def sync_print_time(self):
toolhead = self.printer.lookup_object('toolhead')
print_time = toolhead.get_last_move_time()
if self.next_cmd_time > print_time:
toolhead.dwell(self.next_cmd_time - print_time)
else:
self.next_cmd_time = print_time
def do_enable(self, enable):
self.sync_print_time()
stepper_enable = self.printer.lookup_object('stepper_enable')
if enable:
for s in self.steppers:
se = stepper_enable.lookup_enable(s.get_name())
se.motor_enable(self.next_cmd_time)
else:
for s in self.steppers:
se = stepper_enable.lookup_enable(s.get_name())
se.motor_disable(self.next_cmd_time)
self.sync_print_time()
def do_set_position(self, setpos):
self.rail.set_position([setpos, 0., 0.])
def do_move(self, movepos, speed, accel, sync=True):
self.sync_print_time()
cp = self.rail.get_commanded_position()
dist = movepos - cp
axis_r, accel_t, cruise_t, cruise_v = force_move.calc_move_time(
dist, speed, accel)
self.trapq_append(self.trapq, self.next_cmd_time,
accel_t, cruise_t, accel_t,
cp, 0., 0., axis_r, 0., 0.,
0., cruise_v, accel)
self.next_cmd_time = self.next_cmd_time + accel_t + cruise_t + accel_t
self.rail.generate_steps(self.next_cmd_time)
self.trapq_finalize_moves(self.trapq, self.next_cmd_time + 99999.9)
toolhead = self.printer.lookup_object('toolhead')
toolhead.note_kinematic_activity(self.next_cmd_time)
if sync:
self.sync_print_time()
def do_homing_move(self, movepos, speed, accel, triggered, check_trigger):
if not self.can_home:
raise self.printer.command_error(
"No endstop for this manual stepper")
self.homing_accel = accel
pos = [movepos, 0., 0., 0.]
endstops = self.rail.get_endstops()
phoming = self.printer.lookup_object('homing')
phoming.manual_home(self, endstops, pos, speed,
triggered, check_trigger)
cmd_MANUAL_STEPPER_help = "Command a manually configured stepper"
def cmd_MANUAL_STEPPER(self, gcmd):
enable = gcmd.get_int('ENABLE', None)
if enable is not None:
self.do_enable(enable)
setpos = gcmd.get_float('SET_POSITION', None)
if setpos is not None:
self.do_set_position(setpos)
speed = gcmd.get_float('SPEED', self.velocity, above=0.)
accel = gcmd.get_float('ACCEL', self.accel, minval=0.)
homing_move = gcmd.get_int('STOP_ON_ENDSTOP', 0)
if homing_move:
movepos = gcmd.get_float('MOVE')
self.do_homing_move(movepos, speed, accel,
homing_move > 0, abs(homing_move) == 1)
elif gcmd.get_float('MOVE', None) is not None:
movepos = gcmd.get_float('MOVE')
sync = gcmd.get_int('SYNC', 1)
self.do_move(movepos, speed, accel, sync)
elif gcmd.get_int('SYNC', 0):
self.sync_print_time()
# Toolhead wrappers to support homing
def flush_step_generation(self):
self.sync_print_time()
def get_position(self):
return [self.rail.get_commanded_position(), 0., 0., 0.]
def set_position(self, newpos, homing_axes=()):
self.do_set_position(newpos[0])
def get_last_move_time(self):
self.sync_print_time()
return self.next_cmd_time
def dwell(self, delay):
self.next_cmd_time += max(0., delay)
def drip_move(self, newpos, speed, drip_completion):
self.do_move(newpos[0], speed, self.homing_accel)
def get_kinematics(self):
return self
def get_steppers(self):
return self.steppers
def calc_position(self, stepper_positions):
return [stepper_positions[self.rail.get_name()], 0., 0.]
def METHOD_NAME(config):
return ManualStepper(config) | null |
1,009 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import random
import re
import warnings
from pathlib import Path
from typing import Any, List, Optional
import nats_bench
import numpy as np
import torch
import yaml
from overrides import overrides
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.search_space import (
BayesOptSearchSpace,
EvolutionarySearchSpace,
)
class NatsbenchTssSearchSpace(EvolutionarySearchSpace, BayesOptSearchSpace):
"""Search space for NATS-Bench-TSS."""
OPS = ["none", "avg_pool_3x3", "nor_conv_1x1", "nor_conv_3x3", "skip_connect"]
try:
from xautodl.models import get_cell_based_tiny_net
except ImportError:
raise ImportError(
"xautodl installation not found. "
"Please install it using `pip install git+https://github.com/D-X-Y/AutoDL-Projects/`"
)
def __init__(self, natsbench_location: str, base_dataset: str, seed: Optional[int] = 1) -> None:
"""Initialize the search space.
Args:
natsbench_location: Path to the NATS-Bench-TSS dataset.
base_dataset: Base dataset used for training the models.
seed: Random seed.
"""
self.natsbench_location = Path(natsbench_location)
self.base_dataset = base_dataset
assert base_dataset in [
"cifar10",
"cifar100",
"ImageNet16-120",
], "`base_dataset` must be one of ['cifar10', 'cifar100', 'ImageNet16-120']"
if not self.natsbench_location.exists():
raise FileNotFoundError(
"The provided path to `natsbench_location` (" f"{self.natsbench_location.absolute()}) does not exist"
)
self.api = nats_bench.create(natsbench_location, "tss", fast_mode=True, verbose=False)
self.rng = random.Random(seed)
self.archid_pattern = re.compile("natsbench-tss-([0-9]+)")
def _get_op_list(self, string: str) -> List[str]:
"""Reused from https://github.com/naszilla/naszilla/blob/master/naszilla/nas_bench_201/cell_201.py."""
# Given a string, get the list of operations
tokens = string.split("|")
ops = [t.split("~")[0] for i, t in enumerate(tokens) if i not in [0, 2, 5, 9]]
return ops
def _get_string_from_ops(self, ops: List[str]) -> str:
"""Reused from https://github.com/naszilla/naszilla/blob/master/naszilla/nas_bench_201/cell_201.py."""
# Given a list of operations, get the string
strings = ["|"]
nodes = [0, 0, 1, 0, 1, 2]
for i, op in enumerate(ops):
strings.append(op + "~{}|".format(nodes[i]))
if i < len(nodes) - 1 and nodes[i + 1] == 0:
strings.append("+|")
return "".join(strings)
def model_from_natsbench_tss(self, natsbench_id: int) -> Any:
"""Get a model from NATS-Bench-TSS dataset.
Args:
natsbench_id: NATS-Bench-TSS identifier.
Returns:
Model from NATS-Bench-TSS dataset.
"""
config = self.api.get_net_config(natsbench_id, self.base_dataset)
return self.get_cell_based_tiny_net(config)
@overrides
def save_arch(self, model: ArchaiModel, path: str) -> None:
yaml.safe_dump({"archid": model.archid, **model.metadata}, open(path, "w", encoding="utf-8"))
@overrides
def load_arch(self, path: str) -> ArchaiModel:
metadata = yaml.safe_load(open(path, encoding="utf-8"))
natsbenchid = self.archid_pattern.match(metadata["archid"])
if not natsbenchid:
raise ValueError(f'Architecture {metadata["archid"]} does not belong to `NatsbenchTssSearchSpace`. ')
if metadata["dataset"] != self.base_dataset:
warnings.warn(
f'Architecture loaded from {path} was saved using a different dataset ({metadata["dataset"]})'
f" than `NatsbenchTssSearchSpace` base dataset ({self.base_dataset})"
)
idx = int(natsbenchid.group(1))
return ArchaiModel(
arch=self.model_from_natsbench_tss(idx),
archid=f"natsbench-tss-{idx}",
metadata={"dataset": self.base_dataset},
)
@overrides
def load_model_weights(self, model: ArchaiModel, path: str) -> None:
model.arch.load_state_dict(torch.load(path))
@overrides
def save_model_weights(self, model: ArchaiModel, path: str) -> None:
torch.save(model.arch.state_dict(), path)
@overrides
def METHOD_NAME(self) -> ArchaiModel:
idx = self.rng.randint(0, len(self.api))
return ArchaiModel(
arch=self.model_from_natsbench_tss(idx),
archid=f"natsbench-tss-{idx}",
metadata={"dataset": self.base_dataset},
)
@overrides
def mutate(self, model: ArchaiModel) -> ArchaiModel:
"""Reused from https://github.com/naszilla/naszilla/blob/master/naszilla/nas_bench_201/cell_201.py."""
# First get the string representation of the current architecture
natsbenchid = self.archid_pattern.match(model.archid)
if not natsbenchid:
raise ValueError(f"Architecture {model.archid} does not belong to the `NatsbenchTssSearchSpace`. ")
natsbenchid = int(natsbenchid.group(1))
string_rep = self.api.get_net_config(natsbenchid, self.base_dataset)["arch_str"]
nbhd_strs = []
ops = self._get_op_list(string_rep)
for i in range(len(ops)):
available = [op for op in self.OPS if op != ops[i]]
for op in available:
new_ops = ops.copy()
new_ops[i] = op
new_arch_str = self._get_string_from_ops(new_ops)
nbhd_strs.append(new_arch_str)
# Picks one neighbor architecture as the mutation
mutation_str = random.choice(nbhd_strs)
mutation_natsbenchid = self.api.archstr2index[mutation_str]
return ArchaiModel(
arch=self.model_from_natsbench_tss(mutation_natsbenchid),
archid=f"natsbench-tss-{mutation_natsbenchid}",
metadata={"dataset": self.base_dataset},
)
@overrides
def crossover(self, arch_list: List[ArchaiModel]) -> ArchaiModel:
raise NotImplementedError
@overrides
def encode(self, arch: ArchaiModel) -> np.ndarray:
enc_dict = {
"none": [0, 0, 0, 0],
"avg_pool_3x3": [1, 0, 0, 0],
"nor_conv_1x1": [0, 1, 0, 0],
"nor_conv_3x3": [0, 0, 1, 0],
"skip_connect": [0, 0, 0, 1],
}
# Gets string repr for `arch`
natsbenchid = self.archid_pattern.match(arch.archid)
if not natsbenchid:
raise ValueError(f"Architecture {arch.archid} does not belong" " to `NatsbenchTssSearchSpace`. ")
arch_str = self.api[int(natsbenchid.group(1))]
arch_ops = re.findall(r"([^\|\~\+]+)~\d", arch_str)
return np.hstack([np.array(enc_dict[op_name]) for op_name in arch_ops]) | null |
1,010 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkgpdb.endpoint import endpoint_data
class DescribeDBInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'gpdb', '2016-05-03', 'DescribeDBInstances')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DBInstanceModes(self): # Array
return self.get_query_params().get('DBInstanceModes')
def set_DBInstanceModes(self, DBInstanceModes): # Array
for index1, value1 in enumerate(DBInstanceModes):
self.add_query_param('DBInstanceModes.' + str(index1 + 1), value1)
def get_DBInstanceStatuses(self): # Array
return self.get_query_params().get('DBInstanceStatuses')
def set_DBInstanceStatuses(self, DBInstanceStatuses): # Array
for index1, value1 in enumerate(DBInstanceStatuses):
self.add_query_param('DBInstanceStatuses.' + str(index1 + 1), value1)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DBInstanceDescription(self): # String
return self.get_query_params().get('DBInstanceDescription')
def set_DBInstanceDescription(self, DBInstanceDescription): # String
self.add_query_param('DBInstanceDescription', DBInstanceDescription)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_DBInstanceIds(self): # String
return self.get_query_params().get('DBInstanceIds')
def set_DBInstanceIds(self, DBInstanceIds): # String
self.add_query_param('DBInstanceIds', DBInstanceIds)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBInstanceCategories(self): # Array
return self.get_query_params().get('DBInstanceCategories')
def set_DBInstanceCategories(self, DBInstanceCategories): # Array
for index1, value1 in enumerate(DBInstanceCategories):
self.add_query_param('DBInstanceCategories.' + str(index1 + 1), value1)
def get_InstanceDeployTypes(self): # Array
return self.get_query_params().get('InstanceDeployTypes')
def set_InstanceDeployTypes(self, InstanceDeployTypes): # Array
for index1, value1 in enumerate(InstanceDeployTypes):
self.add_query_param('InstanceDeployTypes.' + str(index1 + 1), value1)
def get_InstanceNetworkType(self): # String
return self.get_query_params().get('InstanceNetworkType')
def METHOD_NAME(self, InstanceNetworkType): # String
self.add_query_param('InstanceNetworkType', InstanceNetworkType) | null |
1,011 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as f
from overrides import overrides
from tqdm import tqdm
from archai.discrete_search.api.predictor import MeanVar, Predictor
class PredictiveDNNEnsemble(Predictor):
"""Deep Neural Network Ensemble predictor.
Predicts the outcome of a set of expensive objectives using an ensemble of MLP models.
"""
def __init__(
self,
num_ensemble_members: Optional[int] = 5,
num_layers: Optional[int] = 5,
width: Optional[int] = 64,
lr: Optional[float] = 1e-4,
num_tr_steps: Optional[int] = 2_000,
replace_nan_value: float = -1.0,
device: Optional[str] = "cuda",
) -> None:
"""Initialize the predictor.
Args:
num_ensemble_members: Number of ensemble members.
num_layers: Number of layers of each member.
width: Number of neurons in each hidden layer.
lr: Learning rate of each ensemble mmember.
num_tr_steps: Number of training steps of each member.
replace_nan_value: Value to replace NaNs (often used to represent an unused
architecture parameters). Default to -1.0.
device: Device to use for training.
"""
self.num_ensemble_members = num_ensemble_members
self.num_layers = num_layers
self.width = width
self.lr = lr
self.num_tr_steps = num_tr_steps
self.replace_nan_value = replace_nan_value
self.is_fit = False
self.device = device
self.X_meanvar = None
self.y_meanvar = None
def METHOD_NAME(self) -> None:
"""Moves the predictor to CUDA."""
for m in self.ensemble:
m.cuda()
self.device = "cuda"
def to_cpu(self) -> None:
"""Moves the predictor to CPU."""
for m in self.ensemble:
m.cpu()
self.device = "cpu"
@overrides
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
assert len(X.shape) == 2
assert len(y.shape) == 2
_, num_features = X.shape
_, num_objectives = y.shape
X = np.nan_to_num(X, nan=self.replace_nan_value)
self.X_meansd = np.mean(X, axis=0), np.std(X, axis=0)
self.y_meansd = np.mean(y, axis=0), np.std(y, axis=0)
# Initialize ensemble models
self.ensemble = [
FFEnsembleMember(num_objectives, num_features, self.num_layers, self.width).to(self.device)
for _ in range(self.num_ensemble_members)
]
# Normalizes features and targets
X = (X.copy() - self.X_meansd[0]) / (self.X_meansd[1] + 1e-7)
y = (y.copy() - self.y_meansd[0]) / (self.y_meansd[1] + 1e-7)
Xt = torch.tensor(X, dtype=torch.float32).to(self.device)
yt = torch.tensor(y, dtype=torch.float32).to(self.device)
# TODO: should we be splitting data into
# train and val?
for member in tqdm(self.ensemble, desc="Training DNN Ensemble..."):
criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.Adam(member.parameters(), lr=self.lr)
member.train()
for t in range(self.num_tr_steps):
y_pred = member(Xt)
loss = criterion(y_pred.squeeze(), yt.squeeze())
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.is_fit = True
@overrides
def predict(self, X: np.ndarray) -> MeanVar:
assert len(X.shape) == 2
assert self.is_fit, "PredictiveDNNEnsemble: predict called before fit!"
X = (X.copy() - self.X_meansd[0]) / (self.X_meansd[1] + 1e-7)
Xt = torch.tensor(X, dtype=torch.float32).to(self.device)
preds = []
with torch.no_grad():
for member in self.ensemble:
member.eval()
pred = member(Xt).to("cpu").numpy()
preds.append(pred * (self.y_meansd[1] + 1e-7) + self.y_meansd[0])
preds = np.array(preds)
return MeanVar(mean=np.mean(preds, axis=0), var=np.var(preds, axis=0))
class FFEnsembleMember(nn.Module):
"""Feedforward ensemble member."""
def __init__(
self,
num_objectives: Optional[int] = 1,
input_feat_len: Optional[int] = 128,
num_layers: Optional[int] = 10,
width: Optional[int] = 20,
) -> None:
"""Initialize the ensemble member.
Args:
num_objectives: Number of objectives.
input_feat_len: Length of input features.
num_layers: Number of layers.
width: Width of each layer.
"""
super(FFEnsembleMember, self).__init__()
self.input_feat_len = input_feat_len
self.num_layers = num_layers
self.width = width
self.linears = nn.ModuleList([nn.Linear(self.input_feat_len, width)])
self.linears.extend([nn.Linear(width, width) for i in range(1, self.num_layers - 1)])
self.output = nn.Linear(width, num_objectives)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for layer in self.linears:
x = f.relu(layer(x))
return self.output(x) | null |
1,012 | # Generated by Django 2.2.10 on 2020-04-10 13:22
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
from wagtail.core.rich_text import RichText
def page_to_streamfield(page):
changed = False
if page.description.raw_text and not page.description:
page.description = [('paragraph', RichText(page.description.raw_text))]
changed = True
return page, changed
def METHOD_NAME(revision_data):
changed = False
description = revision_data.get('description')
if description:
try:
json.loads(description)
except ValueError:
revision_data['description'] = json.dumps(
[{
"value": description,
"type": "paragraph"
}],
cls=DjangoJSONEncoder)
changed = True
else:
# It's already valid JSON. Leave it.
pass
return revision_data, changed
def page_to_richtext(page):
changed = False
if page.description.raw_text is None:
raw_text = ''.join([
child.value.source for child in page.description
if child.block_type == 'paragraph'
])
page.description = raw_text
changed = True
return page, changed
def pagerevision_to_richtext(revision_data):
changed = False
description = revision_data.get('description', 'definitely non-JSON string')
if description:
try:
description_data = json.loads(description)
except ValueError:
# It's not apparently a StreamField. Leave it.
pass
else:
raw_text = ''.join([
child['value'] for child in description_data
if child['type'] == 'paragraph'
])
revision_data['description'] = raw_text
changed = True
return revision_data, changed
def convert(apps, schema_editor, page_converter, pagerevision_converter):
"""
convert richtextfield to streamfield or viseversa.
"""
for PageModel in [apps.get_model("cms", "ProgramPage"), apps.get_model("cms", "BenefitsPage")]:
for page in PageModel.objects.all():
page, changed = page_converter(page)
if changed:
page.save()
for revision in page.revisions.all():
revision_data = json.loads(revision.content_json)
revision_data, changed = pagerevision_converter(revision_data)
if changed:
revision.content_json = json.dumps(revision_data, cls=DjangoJSONEncoder)
revision.save()
def convert_to_streamfield(apps, schema_editor):
return convert(apps, schema_editor, page_to_streamfield, METHOD_NAME)
def convert_to_richtext(apps, schema_editor):
return convert(apps, schema_editor, page_to_richtext, pagerevision_to_richtext)
class Migration(migrations.Migration):
dependencies = [
('cms', '0047_courseteam_remove_constraint'),
]
operations = [
migrations.AlterField(
model_name='benefitspage',
name='content',
field=wagtail.core.fields.StreamField([('rich_text', wagtail.core.blocks.RichTextBlock()), ('image_with_link', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(help_text='The image to display.', label='Image', required=True)), ('link', wagtail.core.blocks.URLBlock(help_text='Absolute URL to the image, like https://example.com/some_image.jpg', label='Link', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Center'), ('right', 'Right'), ('left', 'Left')], max_length=10)), ('width', wagtail.core.blocks.IntegerBlock(required=False)), ('height', wagtail.core.blocks.IntegerBlock(required=False))], blank=True, help_text='Upload an image with a clickable link'))], blank=True, help_text='The content of the benefits page'),
),
migrations.AlterField(
model_name='benefitspage',
name='description',
field=wagtail.core.fields.StreamField([('paragraph', wagtail.core.blocks.RichTextBlock(blank=True)), ('image_with_link', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(help_text='The image to display.', label='Image', required=True)), ('link', wagtail.core.blocks.URLBlock(help_text='Absolute URL to the image, like https://example.com/some_image.jpg', label='Link', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Center'), ('right', 'Right'), ('left', 'Left')], max_length=10)), ('width', wagtail.core.blocks.IntegerBlock(required=False)), ('height', wagtail.core.blocks.IntegerBlock(required=False))], blank=True, help_text='Upload an image with a clickable link'))], blank=True, help_text='The description shown on the benefits page'),
),
migrations.AlterField(
model_name='programpage',
name='description',
field=wagtail.core.fields.StreamField([('paragraph', wagtail.core.blocks.RichTextBlock(blank=True)), ('image_with_link', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(help_text='The image to display.', label='Image', required=True)), ('link', wagtail.core.blocks.URLBlock(help_text='Absolute URL to the image, like https://example.com/some_image.jpg', label='Link', required=True)), ('align', wagtail.core.blocks.ChoiceBlock(choices=[('center', 'Center'), ('right', 'Right'), ('left', 'Left')], max_length=10)), ('width', wagtail.core.blocks.IntegerBlock(required=False)), ('height', wagtail.core.blocks.IntegerBlock(required=False))], blank=True, help_text='Upload an image with a clickable link'))], blank=True, help_text='The description shown on the program page'),
),
migrations.RunPython(convert_to_streamfield, convert_to_richtext ),
] | null |
1,013 | """
Fencepost-simple graph structure implementation.
"""
# Currently (2013.7.12) only used in easing the parsing of graph datatype data.
class SimpleGraphNode:
"""
Node representation.
"""
def __init__(self, index, **data):
"""
:param index: index of this node in some parent list
:type index: int
:param data: any extra data that needs to be saved
:type data: (variadic dictionary)
"""
# a bit application specific (could be 'id')
self.index = index
self.data = data
class SimpleGraphEdge:
"""
Edge representation.
"""
def __init__(self, source_index, target_index, **data):
"""
:param source_index: index of the edge's source node in some parent list
:type source_index: int
:param target_index: index of the edge's target node in some parent list
:type target_index: int
:param data: any extra data that needs to be saved
:type data: (variadic dictionary)
"""
self.source_index = source_index
self.target_index = target_index
self.data = data
class SimpleGraph:
"""
Each node is unique (by id) and stores its own index in the node list/odict.
Each edge is represented as two indeces into the node list/odict.
Both nodes and edges allow storing extra information if needed.
Allows:
multiple edges between two nodes
self referential edges (an edge from a node to itself)
These graphs are not specifically directed but since source and targets on the
edges are listed - it could easily be used that way.
"""
def __init__(self, nodes=None, edges=None):
# use an odict so that edge indeces actually match the final node list indeces
self.nodes = nodes or {}
self.edges = edges or []
def add_node(self, node_id, **data):
"""
Adds a new node only if it doesn't already exist.
:param node_id: some unique identifier
:type node_id: (hashable)
:param data: any extra data that needs to be saved
:type data: (variadic dictionary)
:returns: the new node
"""
if node_id in self.nodes:
return self.nodes[node_id]
node_index = len(self.nodes)
new_node = SimpleGraphNode(node_index, **data)
self.nodes[node_id] = new_node
return new_node
def add_edge(self, source_id, target_id, **data):
"""
Adds a new node only if it doesn't already exist.
:param source_id: the id of the source node
:type source_id: (hashable)
:param target_id: the id of the target node
:type target_id: (hashable)
:param data: any extra data that needs to be saved for the edge
:type data: (variadic dictionary)
:returns: the new node
..note: that, although this will create new nodes if necessary, there's
no way to pass `data` to them - so if you need to assoc. more data with
the nodes, use `add_node` first.
"""
# adds target_id to source_id's edge list
# adding source_id and/or target_id to nodes if not there already
if source_id not in self.nodes:
self.add_node(source_id)
if target_id not in self.nodes:
self.add_node(target_id)
new_edge = SimpleGraphEdge(self.nodes[source_id].index, self.nodes[target_id].index, **data)
self.edges.append(new_edge)
return new_edge
def gen_node_dicts(self):
"""
Returns a generator that yields node dictionaries in the form:
{ 'id': <the nodes unique id>, 'data': <any additional node data> }
"""
for node_id, node in self.nodes.items():
yield {"id": node_id, "data": node.data}
def gen_edge_dicts(self):
"""
Returns a generator that yields node dictionaries in the form::
{
'source': <the index of the source node in the graph's node list>,
'target': <the index of the target node in the graph's node list>,
'data' : <any additional edge data>
}
"""
for edge in self.edges:
yield {"source": edge.source_index, "target": edge.target_index, "data": edge.data}
def METHOD_NAME(self):
"""
Returns a dictionary of the form::
{ 'nodes': <a list of node dictionaries>, 'edges': <a list of node dictionaries> }
"""
return {"nodes": list(self.gen_node_dicts()), "edges": list(self.gen_edge_dicts())} | null |
1,014 | # Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
"""JLAP reader."""
from __future__ import annotations
import logging
from collections import UserList
from hashlib import blake2b
from pathlib import Path
from typing import Iterable, Iterator
log = logging.getLogger(__name__)
DIGEST_SIZE = 32 # 160 bits a minimum 'for security' length?
DEFAULT_IV = b"\0" * DIGEST_SIZE
def keyed_hash(data: bytes, key: bytes):
"""Keyed hash."""
return blake2b(data, key=key, digest_size=DIGEST_SIZE)
def line_and_pos(lines: Iterable[bytes], pos=0) -> Iterator[tuple[int, bytes]]:
r"""
:param lines: iterator over input split by '\n', with '\n' removed.
:param pos: initial position
"""
for line in lines:
yield pos, line
pos += len(line) + 1
class JLAP(UserList):
@classmethod
def from_lines(cls, lines: Iterable[bytes], iv: bytes, pos=0, verify=True):
r"""
:param lines: iterator over input split by b'\n', with b'\n' removed
:param pos: initial position
:param iv: initialization vector (first line of .jlap stream, hex
decoded). Ignored if pos==0.
:param verify: assert last line equals computed checksum of previous
line. Useful for writing new .jlap files if False.
:raises ValueError: if trailing and computed checksums do not match
:return: list of (offset, line, checksum)
"""
# save initial iv in case there were no new lines
buffer: list[tuple[int, str, str]] = [(-1, iv.hex(), iv.hex())]
initial_pos = pos
for pos, line in line_and_pos(lines, pos=pos):
if pos == 0:
iv = bytes.fromhex(line.decode("utf-8"))
buffer = [(0, iv.hex(), iv.hex())]
else:
iv = keyed_hash(line, iv).digest()
buffer.append((pos, line.decode("utf-8"), iv.hex()))
log.debug("%d bytes read", pos - initial_pos) # maybe + length of last line
if verify:
if buffer[-1][1] != buffer[-2][-1]:
raise ValueError("checksum mismatch")
else:
log.info("Checksum OK")
return cls(buffer)
@classmethod
def from_path(cls, path: Path | str, verify=True):
# in binary mode, line separator is hardcoded as \n
with Path(path).open("rb") as p:
return cls.from_lines(
(line.rstrip(b"\n") for line in p), b"", verify=verify
)
def add(self, line: str):
"""
Add line to buffer, following checksum rules.
Buffer must not be empty.
(Remember to pop trailing checksum and possibly trailing metadata line, if
appending to a complete jlap file)
Less efficient than creating a new buffer from many lines and our last iv,
and extending.
:return: self
"""
if "\n" in line:
raise ValueError("\\n not allowed in line")
pos, last_line, iv = self[-1]
# include last line's utf-8 encoded length, plus 1 in pos?
pos += len(last_line.encode("utf-8")) + 1
self.extend(
JLAP.from_lines(
(line.encode("utf-8"),), bytes.fromhex(iv), pos, verify=False
)[1:]
)
return self
def terminate(self):
"""
Add trailing checksum to buffer.
:return: self
"""
_, _, iv = self[-1]
self.add(iv)
return self
def write(self, path: Path):
"""Write buffer to path."""
with Path(path).open("w", encoding="utf-8", newline="\n") as p:
return p.write("\n".join(b[1] for b in self))
@property
def METHOD_NAME(self):
"""All lines except the first, and last two."""
return self[1:-2]
@property
def penultimate(self):
"""Next-to-last line. Should contain the footer."""
return self[-2]
@property
def last(self):
"""Last line. Should contain the trailing checksum."""
return self[-1] | null |
1,015 | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for file_utils."""
import os
import time
from unittest import mock
from absl.testing import flagsaver
from etils import epath
import pytest
from tensorflow_datasets import testing
from tensorflow_datasets.core import naming
from tensorflow_datasets.core.utils import file_utils
from tensorflow_datasets.core.utils import read_config
def test_default_data_dir():
data_dir = file_utils.get_default_data_dir(given_data_dir=None)
assert data_dir
assert isinstance(data_dir, str)
def test_list_dataset_variants_with_configs(mock_fs: testing.MockFs):
data_dir = epath.Path('/a')
dataset_dir = data_dir / 'ds'
configs_and_versions = {
'x': ['1.0.0', '1.0.1'],
'y': ['2.0.0'],
}
for config, versions in configs_and_versions.items():
for version in versions:
mock_fs.add_file(dataset_dir / config / version / 'dataset_info.json')
mock_fs.add_file(dataset_dir / config / version / 'features.json')
references = sorted(
file_utils.list_dataset_variants(
dataset_name='my_ds', dataset_dir=dataset_dir
)
)
assert references == [
naming.DatasetReference(
dataset_name='my_ds', config='x', version='1.0.0', data_dir=data_dir
),
naming.DatasetReference(
dataset_name='my_ds', config='x', version='1.0.1', data_dir=data_dir
),
naming.DatasetReference(
dataset_name='my_ds', config='y', version='2.0.0', data_dir=data_dir
),
]
def test_list_dataset_variants_with_configs_no_versions(
mock_fs: testing.MockFs,
):
data_dir = epath.Path('/a')
dataset_dir = data_dir / 'ds'
configs_and_versions = {
'x': ['1.0.0', '1.0.1'],
'y': ['2.0.0'],
}
for config, versions in configs_and_versions.items():
for version in versions:
mock_fs.add_file(dataset_dir / config / version / 'dataset_info.json')
mock_fs.add_file(dataset_dir / config / version / 'features.json')
references = sorted(
file_utils.list_dataset_variants(
dataset_name='my_ds', dataset_dir=dataset_dir, include_versions=False
)
)
assert references == [
naming.DatasetReference(
dataset_name='my_ds', config='x', data_dir=data_dir
),
naming.DatasetReference(
dataset_name='my_ds', config='y', data_dir=data_dir
),
]
def test_list_dataset_variants_without_configs(mock_fs: testing.MockFs):
data_dir = epath.Path('/a')
dataset_dir = data_dir / 'ds'
# Version 1.0.0 doesn't have features.json, because it was generated with an
# old version of TFDS.
mock_fs.add_file(dataset_dir / '1.0.0' / 'dataset_info.json')
mock_fs.add_file(dataset_dir / '1.0.1' / 'dataset_info.json')
mock_fs.add_file(dataset_dir / '1.0.1' / 'features.json')
# List dirs including datasets generated by old TFDS versions.
references = sorted(
file_utils.list_dataset_variants(
dataset_name='my_ds',
dataset_dir=dataset_dir,
include_versions=True,
include_old_tfds_version=True,
)
)
assert references == [
naming.DatasetReference(
dataset_name='my_ds', version='1.0.0', data_dir=data_dir
),
naming.DatasetReference(
dataset_name='my_ds', version='1.0.1', data_dir=data_dir
),
]
# List dirs excluding datasets generated by old TFDS versions.
references = sorted(
file_utils.list_dataset_variants(
dataset_name='my_ds',
dataset_dir=dataset_dir,
include_versions=True,
include_old_tfds_version=False,
)
)
assert references == [
naming.DatasetReference(
dataset_name='my_ds', version='1.0.1', data_dir=data_dir
)
]
def test_list_datasets_in_data_dir(mock_fs: testing.MockFs):
data_dir = epath.Path('/a')
mock_fs.add_file(data_dir / 'ds1/config1/1.0.0/dataset_info.json')
mock_fs.add_file(data_dir / 'ds1/config1/1.0.0/features.json')
mock_fs.add_file(data_dir / 'ds1/config1/2.0.0/dataset_info.json')
mock_fs.add_file(data_dir / 'ds1/config1/2.0.0/features.json')
mock_fs.add_file(data_dir / 'ds1/config2/1.0.0/dataset_info.json')
mock_fs.add_file(data_dir / 'ds1/config2/1.0.0/features.json')
mock_fs.add_file(data_dir / 'ds2/1.0.0/dataset_info.json')
mock_fs.add_file(data_dir / 'ds2/1.0.0/features.json')
# The following are problematic and should thus be ignored.
mock_fs.add_file(
os.path.join(data_dir, 'invalid-name/1.0.0/features.json'), content='x'
)
mock_fs.add_file(
os.path.join(data_dir, 'invalid_version1/1.a.b/features.json'),
content='x',
)
mock_fs.add_file(
os.path.join(data_dir, 'invalid_version2/1.2.3.4/features.json'),
content='x',
)
references = sorted(
file_utils.list_datasets_in_data_dir(data_dir=epath.Path(data_dir))
)
data_dir = epath.Path('/a')
assert references == [
naming.DatasetReference(
dataset_name='ds1',
config='config1',
version='1.0.0',
data_dir=data_dir,
),
naming.DatasetReference(
dataset_name='ds1',
config='config1',
version='2.0.0',
data_dir=data_dir,
),
naming.DatasetReference(
dataset_name='ds1',
config='config2',
version='1.0.0',
data_dir=data_dir,
),
naming.DatasetReference(
dataset_name='ds2', version='1.0.0', data_dir=data_dir
),
]
def test_list_datasets_in_data_dir_with_namespace(mock_fs: testing.MockFs):
namespace = 'ns'
data_dir = epath.Path('/a')
mock_fs.add_file(data_dir / 'ds1/config1/1.0.0/dataset_info.json')
mock_fs.add_file(data_dir / 'ds1/config1/1.0.0/features.json')
references = sorted(
file_utils.list_datasets_in_data_dir(
data_dir=epath.Path(data_dir),
namespace=namespace,
include_configs=True,
include_versions=True,
)
)
data_dir = epath.Path('/a')
assert references == [
naming.DatasetReference(
dataset_name='ds1',
namespace=namespace,
config='config1',
version='1.0.0',
data_dir=data_dir,
),
]
@pytest.mark.parametrize(
['filename', 'result'],
[
('abc', False),
('dataset_info.json', True),
('features.json', True),
('mnist-test.tfrecord-00000-of-00001', True),
('mnist-test.arrayrecord-00000-of-00001', True),
],
)
def test_looks_like_a_tfds_file(filename, result):
assert file_utils._looks_like_a_tfds_file(filename) == result
@pytest.mark.parametrize(
['path', 'glob_result', 'expected'],
[
('/a/*', ['/a/b', '/a/c'], ['/a/b', '/a/c']),
('/a/b', None, ['/a/b']),
('a/*', None, ['a/*']),
('/a/b@*', None, ['/a/b@*']),
],
)
def METHOD_NAME(path, glob_result, expected):
with mock.patch.object(epath, 'Path') as mock_epath:
mock_epath.return_value.expanduser.return_value = path
mock_epath.return_value.glob.return_value = glob_result
actual = file_utils.expand_glob(path)
if glob_result is not None:
mock_epath.return_value.glob.assert_called_once_with(path[1:])
else:
mock_epath.return_value.glob.assert_not_called()
actual = [os.fspath(p) for p in actual]
assert actual == expected
if __name__ == '__main__':
testing.test_main() | null |
1,016 | import pytest
from unittest import mock
from operator import attrgetter
from django.core.management import call_command
from osf_tests.factories import (
PreprintProviderFactory,
PreprintFactory,
ProjectFactory,
RegistrationProviderFactory,
RegistrationFactory,
)
@pytest.mark.django_db
class TestRecatalogMetadata:
@pytest.fixture
def mock_update_share_task(self):
with mock.patch('osf.management.commands.recatalog_metadata.task__update_share') as _shmock:
yield _shmock
@pytest.fixture
def preprint_provider(self):
return PreprintProviderFactory()
@pytest.fixture
def preprints(self, preprint_provider):
return sorted_by_id([
PreprintFactory(provider=preprint_provider)
for _ in range(7)
])
@pytest.fixture
def registration_provider(self):
return RegistrationProviderFactory()
@pytest.fixture
def registrations(self, registration_provider):
return sorted_by_id([
RegistrationFactory(provider=registration_provider)
for _ in range(7)
])
@pytest.fixture
def projects(self, registrations):
return sorted_by_id([
ProjectFactory()
for _ in range(7)
] + [
registration.registered_from
for registration in registrations
])
@pytest.fixture
def files(self, preprints):
_files = sorted_by_id([
preprint.primary_file
for preprint in preprints
])
for _file in _files:
_file.get_guid(create=True)
return _files
@pytest.fixture
def METHOD_NAME(self, preprints, registrations, projects):
return sorted_by_id(list(set([
project.creator
for project in projects
] + [
registration.creator
for registration in registrations
] + [
preprint.creator
for preprint in preprints
])))
def test_recatalog_metadata(self, mock_update_share_task, preprint_provider, preprints, registration_provider, registrations, projects, files, METHOD_NAME):
# test preprints
call_command(
'recatalog_metadata',
'--preprints',
'--providers',
preprint_provider._id,
)
assert mock_update_share_task.apply_async.mock_calls == expected_apply_async_calls(preprints)
mock_update_share_task.reset_mock()
# test registrations
call_command(
'recatalog_metadata',
'--registrations',
'--providers',
registration_provider._id,
)
assert mock_update_share_task.apply_async.mock_calls == expected_apply_async_calls(registrations)
mock_update_share_task.reset_mock()
# test projects
call_command(
'recatalog_metadata',
'--projects',
'--all-providers',
)
assert mock_update_share_task.apply_async.mock_calls == expected_apply_async_calls(projects)
mock_update_share_task.reset_mock()
# test files
call_command(
'recatalog_metadata',
'--files',
'--all-providers',
)
assert mock_update_share_task.apply_async.mock_calls == expected_apply_async_calls(files)
mock_update_share_task.reset_mock()
# test users
call_command(
'recatalog_metadata',
'--users',
'--all-providers',
)
assert mock_update_share_task.apply_async.mock_calls == expected_apply_async_calls(METHOD_NAME)
mock_update_share_task.reset_mock()
# test chunking
call_command(
'recatalog_metadata',
'--registrations',
'--all-providers',
f'--start-id={registrations[1].id}',
'--chunk-size=3',
'--chunk-count=1',
)
assert mock_update_share_task.apply_async.mock_calls == expected_apply_async_calls(registrations[1:4])
mock_update_share_task.reset_mock()
# slightly different chunking
call_command(
'recatalog_metadata',
'--registrations',
'--all-providers',
f'--start-id={registrations[2].id}',
'--chunk-size=2',
'--chunk-count=2',
)
assert mock_update_share_task.apply_async.mock_calls == expected_apply_async_calls(registrations[2:6])
###
# local utils
def expected_apply_async_calls(items):
return [
mock.call(kwargs={
'guid': _item.guids.values_list('_id', flat=True).first(),
'is_backfill': True,
})
for _item in items
]
def sorted_by_id(things_with_ids):
return sorted(
things_with_ids,
key=attrgetter('id')
) | null |
1,017 | # -*- coding: utf-8 -*-
from datetime import timedelta
from django.utils import timezone
from nose.tools import * # noqa
from tests.base import OsfTestCase
from osf.models import NodeLog
from osf_tests.factories import RegistrationFactory, UserFactory
from scripts.embargo_registrations import main
class TestRetractRegistrations(OsfTestCase):
def setUp(self):
super(TestRetractRegistrations, self).setUp()
self.user = UserFactory()
self.registration = RegistrationFactory(creator=self.user)
self.registration.embargo_registration(
self.user,
timezone.now() + timedelta(days=10)
)
self.registration.save()
def test_new_embargo_should_be_unapproved(self):
assert_true(self.registration.is_pending_embargo)
assert_false(self.registration.embargo_end_date)
main(dry_run=False)
assert_true(self.registration.is_pending_embargo)
assert_false(self.registration.embargo_end_date)
def test_should_not_activate_pending_embargo_less_than_48_hours_old(self):
self.registration.embargo.initiation_date = timezone.now() - timedelta(hours=47)
self.registration.embargo.save()
assert_false(self.registration.embargo_end_date)
main(dry_run=False)
self.registration.embargo.refresh_from_db()
self.registration.refresh_from_db()
assert_true(self.registration.is_pending_embargo)
assert_false(self.registration.embargo_end_date)
def test_should_activate_pending_embargo_that_is_48_hours_old(self):
self.registration.embargo.initiation_date = timezone.now() - timedelta(hours=48)
self.registration.embargo.save()
assert_true(self.registration.is_pending_embargo)
assert_false(self.registration.embargo_end_date)
main(dry_run=False)
self.registration.embargo.refresh_from_db()
self.registration.refresh_from_db()
assert_true(self.registration.is_embargoed)
assert_true(self.registration.embargo_end_date)
def METHOD_NAME(self):
self.registration.embargo.initiation_date = timezone.now() - timedelta(days=365)
self.registration.embargo.save()
assert_true(self.registration.is_pending_embargo)
assert_false(self.registration.embargo_end_date)
main(dry_run=False)
self.registration.embargo.refresh_from_db()
self.registration.refresh_from_db()
assert_true(self.registration.is_embargoed)
assert_false(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
def test_embargo_past_end_date_should_be_completed(self):
self.registration.embargo.accept()
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.is_pending_embargo)
self.registration.embargo.end_date = timezone.now() - timedelta(days=1)
self.registration.embargo.save()
assert_false(self.registration.is_public)
main(dry_run=False)
self.registration.embargo.refresh_from_db()
self.registration.refresh_from_db()
assert_true(self.registration.is_public)
assert_false(self.registration.embargo_end_date)
assert_false(self.registration.is_pending_embargo)
assert_equal(self.registration.embargo.state, 'completed')
def test_embargo_before_end_date_should_not_be_completed(self):
self.registration.embargo.accept()
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.is_pending_embargo)
self.registration.embargo.end_date = timezone.now() + timedelta(days=1)
self.registration.embargo.save()
assert_false(self.registration.is_public)
main(dry_run=False)
self.registration.embargo.refresh_from_db()
assert_false(self.registration.is_public)
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.is_pending_embargo)
def test_embargo_approval_adds_to_parent_projects_log(self):
assert_false(
self.registration.registered_from.logs.filter(
action=NodeLog.EMBARGO_APPROVED
).exists()
)
self.registration.embargo.initiation_date = timezone.now() - timedelta(days=365)
self.registration.embargo.save()
main(dry_run=False)
assert_true(
self.registration.registered_from.logs.filter(
action=NodeLog.EMBARGO_APPROVED
).exists()
)
def test_embargo_completion_adds_to_parent_projects_log(self):
assert_false(
self.registration.registered_from.logs.filter(
action=NodeLog.EMBARGO_COMPLETED
).exists()
)
self.registration.embargo.accept()
self.registration.embargo.end_date = timezone.now() - timedelta(days=1)
self.registration.embargo.save()
main(dry_run=False)
assert_true(
self.registration.registered_from.logs.filter(
action=NodeLog.EMBARGO_COMPLETED
).exists()
) | null |
1,018 | """
Accessible models can be read and copied but not modified or deleted.
Owned models can be modified and deleted.
"""
from typing import (
Any,
Optional,
Type,
TYPE_CHECKING,
)
from galaxy import (
exceptions,
model,
)
if TYPE_CHECKING:
from sqlalchemy.orm import Query
class AccessibleManagerMixin:
"""
A security interface to check if a User can read/view an item's.
This can also be thought of as 'read but not modify' privileges.
"""
# declare what we are using from base ModelManager
model_class: Type[Any]
def by_id(self, id: int):
...
# don't want to override by_id since consumers will also want to fetch w/o any security checks
def is_accessible(self, item: "Query", user: model.User, **kwargs: Any) -> bool:
"""
Return True if the item accessible to user.
"""
# override in subclasses
raise exceptions.NotImplemented("Abstract interface Method")
def METHOD_NAME(self, id: int, user: model.User, **kwargs: Any) -> "Query":
"""
Return the item with the given id if it's accessible to user,
otherwise raise an error.
:raises exceptions.ItemAccessibilityException:
"""
item = self.by_id(id)
return self.error_unless_accessible(item, user, **kwargs)
def error_unless_accessible(self, item: "Query", user, **kwargs):
"""
Raise an error if the item is NOT accessible to user, otherwise return the item.
:raises exceptions.ItemAccessibilityException:
"""
if self.is_accessible(item, user, **kwargs):
return item
raise exceptions.ItemAccessibilityException(f"{self.model_class.__name__} is not accessible by user")
# TODO:?? are these even useful?
def list_accessible(self, user, **kwargs):
"""
Return a list of items accessible to the user, raising an error if ANY
are inaccessible.
:raises exceptions.ItemAccessibilityException:
"""
raise exceptions.NotImplemented("Abstract interface Method")
# NOTE: this will be a large, inefficient list if filters are not passed in kwargs
# items = ModelManager.list( self, trans, **kwargs )
# return [ self.error_unless_accessible( trans, item, user ) for item in items ]
def filter_accessible(self, user, **kwargs):
"""
Return a list of items accessible to the user.
"""
raise exceptions.NotImplemented("Abstract interface Method")
# NOTE: this will be a large, inefficient list if filters are not passed in kwargs
# items = ModelManager.list( self, trans, **kwargs )
# return filter( lambda item: self.is_accessible( trans, item, user ), items )
class OwnableManagerMixin:
"""
A security interface to check if a User is an item's owner.
Some resources are associated with the User that created or imported them
and these Users can be considered the models' owner.
This can also be thought of as write/edit privileges.
"""
# declare what we are using from base ModelManager
model_class: Type[Any]
def by_id(self, id: int):
...
def is_owner(self, item: model.Base, user: Optional[model.User], **kwargs: Any) -> bool:
"""
Return True if user owns the item.
"""
# override in subclasses
raise exceptions.NotImplemented("Abstract interface Method")
def get_owned(self, id: int, user: Optional[model.User], **kwargs: Any) -> Any:
"""
Return the item with the given id if owned by the user,
otherwise raise an error.
:raises exceptions.ItemOwnershipException:
"""
item = self.by_id(id)
return self.error_unless_owner(item, user, **kwargs)
def error_unless_owner(self, item, user: Optional[model.User], **kwargs: Any):
"""
Raise an error if the item is NOT owned by user, otherwise return the item.
:raises exceptions.ItemAccessibilityException:
"""
if self.is_owner(item, user, **kwargs):
return item
raise exceptions.ItemOwnershipException(f"{self.model_class.__name__} is not owned by user")
def list_owned(self, user, **kwargs):
"""
Return a list of items owned by the user, raising an error if ANY
are not.
:raises exceptions.ItemAccessibilityException:
"""
raise exceptions.NotImplemented("Abstract interface Method")
# just alias to by_user (easier/same thing)
# return self.by_user( trans, user, **kwargs )
def filter_owned(self, user, **kwargs):
"""
Return a list of items owned by the user.
"""
# just alias to list_owned
return self.list_owned(user, **kwargs)
def get_mutable(self, id: int, user: Optional[model.User], **kwargs: Any) -> Any:
"""
Return the item with the given id if the user can mutate it,
otherwise raise an error. The user must be the owner of the item.
:raises exceptions.ItemOwnershipException:
"""
item = self.get_owned(id, user, **kwargs)
self.error_unless_mutable(item)
return item
def error_unless_mutable(self, item):
"""
Raise an error if the item is NOT mutable.
Items purged or archived are considered immutable.
:raises exceptions.ItemImmutableException:
"""
if getattr(item, "purged", False) or getattr(item, "archived", False):
raise exceptions.ItemImmutableException(f"{self.model_class.__name__} is immutable") | null |
1,019 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateSnapshotRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateSnapshot','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_InstantAccess(self): # Boolean
return self.get_query_params().get('InstantAccess')
def set_InstantAccess(self, InstantAccess): # Boolean
self.add_query_param('InstantAccess', InstantAccess)
def METHOD_NAME(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_SnapshotName(self): # String
return self.get_query_params().get('SnapshotName')
def set_SnapshotName(self, SnapshotName): # String
self.add_query_param('SnapshotName', SnapshotName)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_InstantAccessRetentionDays(self): # Integer
return self.get_query_params().get('InstantAccessRetentionDays')
def set_InstantAccessRetentionDays(self, InstantAccessRetentionDays): # Integer
self.add_query_param('InstantAccessRetentionDays', InstantAccessRetentionDays)
def get_StorageLocationArn(self): # String
return self.get_query_params().get('StorageLocationArn')
def set_StorageLocationArn(self, StorageLocationArn): # String
self.add_query_param('StorageLocationArn', StorageLocationArn)
def get_DiskId(self): # String
return self.get_query_params().get('DiskId')
def set_DiskId(self, DiskId): # String
self.add_query_param('DiskId', DiskId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_RetentionDays(self): # Integer
return self.get_query_params().get('RetentionDays')
def set_RetentionDays(self, RetentionDays): # Integer
self.add_query_param('RetentionDays', RetentionDays)
def get_Category(self): # String
return self.get_query_params().get('Category')
def set_Category(self, Category): # String
self.add_query_param('Category', Category) | null |
1,020 | ## @file
# This is an XML API that uses a syntax similar to XPath, but it is written in
# standard python so that no extra python packages are required to use it.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
'''
XmlRoutines
'''
##
# Import Modules
#
import xml.dom.minidom
import re
import codecs
from Logger.ToolError import PARSER_ERROR
import Logger.Log as Logger
## Create a element of XML
#
# @param Name
# @param String
# @param NodeList
# @param AttributeList
#
def CreateXmlElement(Name, String, NodeList, AttributeList):
Doc = xml.dom.minidom.Document()
Element = Doc.createElement(Name)
if String != '' and String is not None:
Element.appendChild(Doc.createTextNode(String))
for Item in NodeList:
if isinstance(Item, type([])):
Key = Item[0]
Value = Item[1]
if Key != '' and Key is not None and Value != '' and Value is not None:
Node = Doc.createElement(Key)
Node.appendChild(Doc.createTextNode(Value))
Element.appendChild(Node)
else:
Element.appendChild(Item)
for Item in AttributeList:
Key = Item[0]
Value = Item[1]
if Key != '' and Key is not None and Value != '' and Value is not None:
Element.setAttribute(Key, Value)
return Element
## Get a list of XML nodes using XPath style syntax.
#
# Return a list of XML DOM nodes from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
def XmlList(Dom, String):
if String is None or String == "" or Dom is None or Dom == "":
return []
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Nodes = [Dom]
Index = 0
End = len(TagList) - 1
while Index <= End:
ChildNodes = []
for Node in Nodes:
if Node.nodeType == Node.ELEMENT_NODE and Node.tagName == \
TagList[Index]:
if Index < End:
ChildNodes.extend(Node.childNodes)
else:
ChildNodes.append(Node)
Nodes = ChildNodes
ChildNodes = []
Index += 1
return Nodes
## Get a single XML node using XPath style syntax.
#
# Return a single XML DOM node from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM node.
# @param String A XPath style path.
#
def XmlNode(Dom, String):
if String is None or String == "" or Dom is None or Dom == "":
return None
if Dom.nodeType == Dom.DOCUMENT_NODE:
Dom = Dom.documentElement
if String[0] == "/":
String = String[1:]
TagList = String.split('/')
Index = 0
End = len(TagList) - 1
ChildNodes = [Dom]
while Index <= End:
for Node in ChildNodes:
if Node.nodeType == Node.ELEMENT_NODE and \
Node.tagName == TagList[Index]:
if Index < End:
ChildNodes = Node.childNodes
else:
return Node
break
Index += 1
return None
## Get a single XML element using XPath style syntax.
#
# Return a single XML element from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.
#
def XmlElement(Dom, String):
try:
return XmlNode(Dom, String).firstChild.data.strip()
except BaseException:
return ""
## Get a single XML element using XPath style syntax.
#
# Similar with XmlElement, but do not strip all the leading and tailing space
# and newline, instead just remove the newline and spaces introduced by
# toprettyxml()
#
# @param Dom The root XML DOM object.
# @param Strin A XPath style path.
#
def XmlElement2(Dom, String):
try:
HelpStr = XmlNode(Dom, String).firstChild.data
gRemovePrettyRe = re.compile(r"""(?:(\n *) )(.*)\1""", re.DOTALL)
HelpStr = re.sub(gRemovePrettyRe, r"\2", HelpStr)
return HelpStr
except BaseException:
return ""
## Get a single XML element of the current node.
#
# Return a single XML element specified by the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
def XmlElementData(Dom):
try:
return Dom.firstChild.data.strip()
except BaseException:
return ""
## Get a list of XML elements using XPath style syntax.
#
# Return a list of XML elements from the root Dom specified by XPath String.
# If the input Dom or String is not valid, then an empty list is returned.
#
# @param Dom The root XML DOM object.
# @param String A XPath style path.
#
def XmlElementList(Dom, String):
return list(map(XmlElementData, XmlList(Dom, String)))
## Get the XML attribute of the current node.
#
# Return a single XML attribute named Attribute from the current root Dom.
# If the input Dom or Attribute is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
# @param Attribute The name of Attribute.
#
def XmlAttribute(Dom, Attribute):
try:
return Dom.getAttribute(Attribute)
except BaseException:
return ''
## Get the XML node name of the current node.
#
# Return a single XML node name from the current root Dom.
# If the input Dom is not valid, then an empty string is returned.
#
# @param Dom The root XML DOM object.
#
def METHOD_NAME(Dom):
try:
return Dom.nodeName.strip()
except BaseException:
return ''
## Parse an XML file.
#
# Parse the input XML file named FileName and return a XML DOM it stands for.
# If the input File is not a valid XML file, then an empty string is returned.
#
# @param FileName The XML file name.
#
def XmlParseFile(FileName):
try:
XmlFile = codecs.open(FileName, 'rb')
Dom = xml.dom.minidom.parse(XmlFile)
XmlFile.close()
return Dom
except BaseException as XExcept:
XmlFile.close()
Logger.Error('\nUPT', PARSER_ERROR, XExcept, File=FileName, RaiseError=True) | null |
1,021 | #!/usr/bin/env python
"""Build a mulled images for all recent conda recipe updates that don't have existing images.
Examples:
Build mulled images for recent bioconda changes with:
mulled-build-channel build
Build, test, and publish images with the follow command:
mulled-build-channel all
See recent changes that would be built with:
mulled-build-channel list
"""
import os
import subprocess
import sys
import time
import requests
from ._cli import arg_parser
from .mulled_build import (
add_build_arguments,
args_to_mull_targets_kwds,
build_target,
conda_versions,
get_affected_packages,
mull_targets,
)
from .util import (
quay_versions,
version_sorted,
)
def _fetch_repo_data(args):
repo_data = args.repo_data
channel = args.channel
if not os.path.exists(repo_data):
platform_tag = "osx-64" if sys.platform == "darwin" else "linux-64"
subprocess.check_call(
[
"wget",
"--quiet",
f"https://conda.anaconda.org/{channel}/{platform_tag}/repodata.json.bz2",
"-O",
f"{repo_data}.bz2",
]
)
subprocess.check_call(["bzip2", "-d", f"{repo_data}.bz2"])
return repo_data
def _new_versions(quay, conda):
"""Calculate the versions that are in conda but not on quay.io."""
sconda = set(conda)
squay = set(quay) if quay else set()
return sconda - squay # sconda.symmetric_difference(squay)
def run_channel(args, build_last_n_versions=1):
"""Build list of involucro commands (as shell snippet) to run."""
session = requests.session()
for pkg_name, pkg_tests in get_affected_packages(args):
repo_data = _fetch_repo_data(args)
c = conda_versions(pkg_name, repo_data)
# only package the most recent N versions
c = version_sorted(c)[:build_last_n_versions]
if not args.force_rebuild:
time.sleep(1)
q = quay_versions(args.namespace, pkg_name, session)
versions = _new_versions(q, c)
else:
versions = c
for tag in versions:
target = build_target(pkg_name, tag=tag)
targets = [target]
mull_targets(targets, test=pkg_tests, **args_to_mull_targets_kwds(args))
def get_pkg_names(args):
"""Print package names that would be affected."""
print("\n".join(pkg_name for pkg_name, pkg_tests in get_affected_packages(args)))
def METHOD_NAME(parser):
"""Add arguments only used if running mulled over a whole conda channel."""
parser.add_argument(
"--repo-data",
dest="repo_data",
required=True,
help='Published repository data. If you want to build all containers for bioconda, this parameter needs to be set to "bioconda"',
)
parser.add_argument(
"--diff-hours",
dest="diff_hours",
default="25",
help="If finding all recently changed recipes, use this number of hours.",
)
parser.add_argument("--recipes-dir", dest="recipes_dir", default="./bioconda-recipes")
parser.add_argument(
"--force-rebuild", dest="force_rebuild", action="store_true", help="Rebuild package even if already published."
)
def main(argv=None):
"""Main entry-point for the CLI tool."""
parser = arg_parser(argv, globals())
METHOD_NAME(parser)
add_build_arguments(parser)
parser.add_argument("command", metavar="COMMAND", help="Command (list, build-and-test, build, all)")
parser.add_argument(
"--targets", dest="targets", default=None, help="Build a single container with specific package(s)."
)
parser.add_argument(
"--repository-name",
dest="repository_name",
default=None,
help="Name of a single container (leave blank to auto-generate based on packages).",
)
args = parser.parse_args()
if args.command == "list":
get_pkg_names(args)
else:
run_channel(args)
__all__ = ("main",)
if __name__ == "__main__":
main() | null |
1,022 | from typing import List, Dict, Optional
from boa3.internal.neo.vm.VMCode import VMCode
class VMCodeMap:
def __init__(self):
self._vm_code_list: List[VMCode] = []
self._vm_code_addresses: List[int] = []
# optimization so it's not needed to iterate over everything in search of targets
self._vm_code_with_target: List[VMCode] = []
def __len__(self) -> int:
return self._vm_code_list.__len__()
def clear(self):
self._vm_code_addresses.clear()
self._vm_code_list.clear()
def get_code_map(self) -> Dict[int, VMCode]:
size = len(self)
return {self._vm_code_addresses[index]: self._vm_code_list[index] for index in range(size)}
def METHOD_NAME(self) -> List[VMCode]:
return self._vm_code_list
def get_code_with_target_list(self) -> List[VMCode]:
return self._vm_code_with_target
def get_bytecode_size(self) -> int:
if len(self) < 1:
return 0
return self._vm_code_addresses[-1] + self._vm_code_list[-1].size
def insert_code(self, vm_code: VMCode, has_target: bool = False):
if vm_code not in self._vm_code_list:
self._vm_code_addresses.append(self.get_bytecode_size())
self._vm_code_list.append(vm_code)
if has_target:
self._vm_code_with_target.append(vm_code)
def get_code(self, address: int) -> Optional[VMCode]:
try:
index = self._vm_code_addresses.index(address)
except ValueError:
# the address is not int the list
if address >= self.get_bytecode_size():
# the address is not in the bytecode
return None
# if the address is not the start of a instruction, gets the last instruction before given address
code_address = 0
for addr in self._vm_code_addresses:
if addr > address:
break
code_address = addr
index = self._vm_code_addresses.index(code_address)
return self._vm_code_list[index]
def get_start_address(self, vm_code: VMCode) -> int:
try:
index = self._vm_code_list.index(vm_code)
return self._vm_code_addresses[index]
except ValueError:
return 0
def get_end_address(self, vm_code: VMCode) -> int:
try:
index = self._vm_code_list.index(vm_code) + 1
if index == len(self._vm_code_list):
return self.get_bytecode_size()
else:
return self._vm_code_addresses[index] - 1
except ValueError:
return 0
def get_addresses(self, start_address: int, end_address: int) -> List[int]:
if start_address > end_address:
start_address, end_address = end_address, start_address
addresses = []
for address in range(start_address, end_address + 1):
if address in self._vm_code_addresses:
addresses.append(address)
return addresses
def get_addresses_from_codes(self, codes: List[VMCode]) -> List[int]:
if len(codes) < 1:
return []
addresses = []
for vm_code in codes:
try:
index = self._vm_code_list.index(vm_code)
addresses.append(self._vm_code_addresses[index])
except ValueError:
continue
return addresses
def get_opcodes(self, addresses: List[int]) -> List[VMCode]:
codes = []
for address in sorted(addresses):
try:
index = self._vm_code_addresses.index(address)
codes.append(self._vm_code_list[index])
except ValueError:
# address not in list
continue
return codes
def update_addresses(self, start_address: int = 0):
next_address = -1
final_size = len(self._vm_code_list)
if len(self._vm_code_addresses) > final_size:
self._vm_code_addresses = self._vm_code_addresses[:final_size]
for index in range(final_size):
address = self._vm_code_addresses[index]
if address >= start_address:
if next_address < 0:
if index > 0:
new_address = self._vm_code_addresses[index - 1]
next_address = new_address + self._vm_code_list[index - 1].size
else:
next_address = 0
if next_address != address:
if index < len(self._vm_code_addresses):
self._vm_code_addresses[index] = next_address
else:
self._vm_code_addresses.append(next_address)
next_address += self._vm_code_list[index].size
def move_to_end(self, first_code_address: int, last_code_address: int) -> Optional[int]:
if last_code_address < first_code_address:
return
if (len(self._vm_code_addresses) > 0 and
last_code_address == self._vm_code_addresses[-1]):
# there's nothing to change if it's moving the all the codes
return
first_index = -1
last_index = 0
for index, address in enumerate(self._vm_code_addresses):
if first_code_address <= address and first_index < 0:
first_index = index
elif address > last_code_address:
last_index = index
break
if first_index >= 0:
# if the first index was not set, there's nothing to move
if last_index < first_index:
last_index = len(self._vm_code_addresses)
self._vm_code_list[first_index:] = (self._vm_code_list[last_index:] +
self._vm_code_list[first_index:last_index])
self.update_addresses(first_code_address)
index = self.get_bytecode_size()
return index
def remove_opcodes_by_addresses(self, addresses: List[int]):
was_changed = False
# reversed so we only need to update addresses once after all are removed
for code_address in sorted(addresses, reverse=True):
try:
index = self._vm_code_addresses.index(code_address)
code = self._vm_code_list.pop(index)
was_changed = True
self._vm_code_with_target.remove(code)
except ValueError:
# don't stop the loop if an address is not found
continue
if was_changed:
self.update_addresses(min(addresses)) | null |
1,023 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CheckImportDataAddressRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hcs-mgw', '2017-10-24', 'CheckImportDataAddress')
self.set_method('POST')
def get_InvPath(self):
return self.get_query_params().get('InvPath')
def set_InvPath(self,InvPath):
self.add_query_param('InvPath',InvPath)
def get_AccessMethod(self):
return self.get_query_params().get('AccessMethod')
def set_AccessMethod(self,AccessMethod):
self.add_query_param('AccessMethod',AccessMethod)
def get_InvAccessKeyId(self):
return self.get_query_params().get('InvAccessKeyId')
def set_InvAccessKeyId(self,InvAccessKeyId):
self.add_query_param('InvAccessKeyId',InvAccessKeyId)
def get_AccessKeySecret(self):
return self.get_query_params().get('AccessKeySecret')
def set_AccessKeySecret(self,AccessKeySecret):
self.add_query_param('AccessKeySecret',AccessKeySecret)
def get_ListFilePath(self):
return self.get_query_params().get('ListFilePath')
def set_ListFilePath(self,ListFilePath):
self.add_query_param('ListFilePath',ListFilePath)
def get_InvDomain(self):
return self.get_query_params().get('InvDomain')
def set_InvDomain(self,InvDomain):
self.add_query_param('InvDomain',InvDomain)
def get_AccessKey(self):
return self.get_query_params().get('AccessKey')
def set_AccessKey(self,AccessKey):
self.add_query_param('AccessKey',AccessKey)
def get_AddressType(self):
return self.get_query_params().get('AddressType')
def set_AddressType(self,AddressType):
self.add_query_param('AddressType',AddressType)
def get_Direction(self):
return self.get_query_params().get('Direction')
def set_Direction(self,Direction):
self.add_query_param('Direction',Direction)
def get_Address(self):
return self.get_query_params().get('Address')
def set_Address(self,Address):
self.add_query_param('Address',Address)
def get_AccessProxy(self):
return self.get_query_params().get('AccessProxy')
def set_AccessProxy(self,AccessProxy):
self.add_query_param('AccessProxy',AccessProxy)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_AliasName(self):
return self.get_query_params().get('AliasName')
def METHOD_NAME(self,AliasName):
self.add_query_param('AliasName',AliasName)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_Domain(self):
return self.get_query_params().get('Domain')
def set_Domain(self,Domain):
self.add_query_param('Domain',Domain)
def get_Appid(self):
return self.get_query_params().get('Appid')
def set_Appid(self,Appid):
self.add_query_param('Appid',Appid)
def get_InvSecretKey(self):
return self.get_query_params().get('InvSecretKey')
def set_InvSecretKey(self,InvSecretKey):
self.add_query_param('InvSecretKey',InvSecretKey)
def get_MgwRegionId(self):
return self.get_query_params().get('MgwRegionId')
def set_MgwRegionId(self,MgwRegionId):
self.add_query_param('MgwRegionId',MgwRegionId)
def get_SubAddress(self):
return self.get_query_params().get('SubAddress')
def set_SubAddress(self,SubAddress):
self.add_query_param('SubAddress',SubAddress | null |
1,024 | # coding=utf-8
# Copyright 2018-2023 EvaDB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from evadb.catalog.catalog_type import ColumnType, NdArrayType, TableType
from evadb.catalog.models.column_catalog import ColumnCatalogEntry
from evadb.catalog.models.function_catalog import FunctionCatalogEntry
from evadb.catalog.models.function_io_catalog import FunctionIOCatalogEntry
from evadb.catalog.models.index_catalog import IndexCatalogEntry
from evadb.catalog.models.table_catalog import TableCatalogEntry
class CatalogModelsTest(unittest.TestCase):
def test_df_column(self):
df_col = ColumnCatalogEntry("name", ColumnType.TEXT, is_nullable=False)
df_col.array_dimensions = [1, 2]
df_col.table_id = 1
self.assertEqual(df_col.array_type, None)
self.assertEqual(df_col.array_dimensions, [1, 2])
self.assertEqual(df_col.is_nullable, False)
self.assertEqual(df_col.name, "name")
self.assertEqual(df_col.type, ColumnType.TEXT)
self.assertEqual(df_col.table_id, 1)
self.assertEqual(df_col.row_id, None)
def test_df_equality(self):
df_col = ColumnCatalogEntry("name", ColumnType.TEXT, is_nullable=False)
self.assertEqual(df_col, df_col)
df_col1 = ColumnCatalogEntry("name2", ColumnType.TEXT, is_nullable=False)
self.assertNotEqual(df_col, df_col1)
df_col1 = ColumnCatalogEntry("name", ColumnType.INTEGER, is_nullable=False)
self.assertNotEqual(df_col, df_col1)
df_col1 = ColumnCatalogEntry("name", ColumnType.INTEGER, is_nullable=True)
self.assertNotEqual(df_col, df_col1)
df_col1 = ColumnCatalogEntry("name", ColumnType.INTEGER, is_nullable=False)
self.assertNotEqual(df_col, df_col1)
df_col._array_dimensions = [2, 4]
df_col1 = ColumnCatalogEntry(
"name", ColumnType.INTEGER, is_nullable=False, array_dimensions=[1, 2]
)
self.assertNotEqual(df_col, df_col1)
df_col._table_id = 1
df_col1 = ColumnCatalogEntry(
"name",
ColumnType.INTEGER,
is_nullable=False,
array_dimensions=[2, 4],
table_id=2,
)
self.assertNotEqual(df_col, df_col1)
def test_table_catalog_entry_equality(self):
column_1 = ColumnCatalogEntry("frame_id", ColumnType.INTEGER, False)
column_2 = ColumnCatalogEntry("frame_label", ColumnType.INTEGER, False)
col_list = [column_1, column_2]
table_catalog_entry = TableCatalogEntry(
"name", "evadb_dataset", table_type=TableType.VIDEO_DATA, columns=col_list
)
self.assertEqual(table_catalog_entry, table_catalog_entry)
table_catalog_entry1 = TableCatalogEntry(
"name2", "evadb_dataset", table_type=TableType.VIDEO_DATA, columns=col_list
)
self.assertNotEqual(table_catalog_entry, table_catalog_entry1)
def test_function(self):
function = FunctionCatalogEntry(
"function", "fasterRCNN", "ObjectDetection", "checksum"
)
self.assertEqual(function.row_id, None)
self.assertEqual(function.impl_file_path, "fasterRCNN")
self.assertEqual(function.name, "function")
self.assertEqual(function.type, "ObjectDetection")
self.assertEqual(function.checksum, "checksum")
def test_function_hash(self):
function1 = FunctionCatalogEntry(
"function", "fasterRCNN", "ObjectDetection", "checksum"
)
function2 = FunctionCatalogEntry(
"function", "fasterRCNN", "ObjectDetection", "checksum"
)
self.assertEqual(hash(function1), hash(function2))
def test_function_equality(self):
function = FunctionCatalogEntry(
"function", "fasterRCNN", "ObjectDetection", "checksum"
)
self.assertEqual(function, function)
function2 = FunctionCatalogEntry(
"function2", "fasterRCNN", "ObjectDetection", "checksum"
)
self.assertNotEqual(function, function2)
function3 = FunctionCatalogEntry(
"function", "fasterRCNN2", "ObjectDetection", "checksum"
)
self.assertNotEqual(function, function3)
function4 = FunctionCatalogEntry(
"function2", "fasterRCNN", "ObjectDetection3", "checksum"
)
self.assertNotEqual(function, function4)
def METHOD_NAME(self):
function_io = FunctionIOCatalogEntry(
"name", ColumnType.NDARRAY, True, NdArrayType.UINT8, [2, 3], True, 1
)
self.assertEqual(function_io.row_id, None)
self.assertEqual(function_io.function_id, 1)
self.assertEqual(function_io.is_input, True)
self.assertEqual(function_io.is_nullable, True)
self.assertEqual(function_io.array_type, NdArrayType.UINT8)
self.assertEqual(function_io.array_dimensions, [2, 3])
self.assertEqual(function_io.name, "name")
self.assertEqual(function_io.type, ColumnType.NDARRAY)
def test_function_io_equality(self):
function_io = FunctionIOCatalogEntry(
"name", ColumnType.FLOAT, True, None, [2, 3], True, 1
)
self.assertEqual(function_io, function_io)
function_io2 = FunctionIOCatalogEntry(
"name2", ColumnType.FLOAT, True, None, [2, 3], True, 1
)
self.assertNotEqual(function_io, function_io2)
function_io2 = FunctionIOCatalogEntry(
"name", ColumnType.INTEGER, True, None, [2, 3], True, 1
)
self.assertNotEqual(function_io, function_io2)
function_io2 = FunctionIOCatalogEntry(
"name", ColumnType.FLOAT, False, None, [2, 3], True, 1
)
self.assertNotEqual(function_io, function_io2)
function_io2 = FunctionIOCatalogEntry(
"name", ColumnType.FLOAT, True, None, [2, 3, 4], True, 1
)
self.assertNotEqual(function_io, function_io2)
function_io2 = FunctionIOCatalogEntry(
"name", ColumnType.FLOAT, True, None, [2, 3], False, 1
)
self.assertNotEqual(function_io, function_io2)
function_io2 = FunctionIOCatalogEntry(
"name", ColumnType.FLOAT, True, None, [2, 3], True, 2
)
self.assertNotEqual(function_io, function_io2)
def test_index(self):
index = IndexCatalogEntry("index", "FaissSavePath", "HNSW")
self.assertEqual(index.row_id, None)
self.assertEqual(index.name, "index")
self.assertEqual(index.save_file_path, "FaissSavePath")
self.assertEqual(index.type, "HNSW")
def test_index_hash(self):
index1 = IndexCatalogEntry("index", "FaissSavePath", "HNSW")
index2 = IndexCatalogEntry("index", "FaissSavePath", "HNSW")
self.assertEqual(hash(index1), hash(index2))
def test_index_equality(self):
index = IndexCatalogEntry("index", "FaissSavePath", "HNSW")
self.assertEqual(index, index)
index2 = IndexCatalogEntry("index2", "FaissSavePath", "HNSW")
self.assertNotEqual(index, index2)
index3 = IndexCatalogEntry("index", "FaissSavePath3", "HNSW")
self.assertNotEqual(index, index3)
index4 = IndexCatalogEntry("index", "FaissSavePath", "HNSW4")
self.assertNotEqual(index, index4) | null |
1,025 | from __future__ import annotations
__all__ = ("Progress",)
from types import TracebackType
from typing import Optional, Type
from warnings import warn
from .._docstring import add_example
from .._utils import rand_hex
from ..session import Session, require_active_session
@add_example()
class Progress:
"""
Initialize a progress bar.
Parameters
----------
min
The value that represents the starting point of the progress bar. Must be less
than ``max``.
max
The value that represents the end of the progress bar. Must be greater than
``min``.
session
A :class:`~shiny.Session` instance. If not provided, it is inferred via
:func:`~shiny.session.get_current_session`.
"""
_style = "notification"
min: int
max: int
value: float | None
def __init__(
self, min: int = 0, max: int = 1, session: Optional[Session] = None
) -> None:
self.min = min
self.max = max
self.value = None
self._id = rand_hex(8)
self._closed = False
self._session = require_active_session(session)
msg = {"id": self._id, "style": self._style}
self._session._send_progress("open", msg)
def __enter__(self) -> "Progress":
return self
def __exit__(
self,
exctype: Optional[Type[BaseException]],
excinst: Optional[BaseException],
exctb: Optional[TracebackType],
) -> None:
self.close()
def set(
self,
value: Optional[float] = None,
message: Optional[str] = None,
detail: Optional[str] = None,
) -> None:
"""
Updates the progress panel. When called the first time, the progress panel is
displayed.
Parameters
----------
self
The object instance
value
The value at which to set the progress bar, relative to ``min`` and ``max``.
``None`` hides the progress bar, if it is currently visible.
message
The message to be displayed to the user or ``None`` to hide the current
message (if any).
detail
The detail message to be displayed to the user or ``None`` to hide the
current detail message (if any). The detail message will be shown with a
de-emphasized appearance relative to message.
"""
if self._closed:
warn("Attempting to set progress, but progress already closed.")
return None
self.value = value
if value:
# Normalize value to number between 0 and 1
value = min(1, max(0, (value - self.min) / (self.max - self.min)))
msg = {
"id": self._id,
"message": message,
"detail": detail,
"value": value,
"style": self._style,
}
self._session._send_progress(
"update", {k: v for k, v in msg.items() if v is not None}
)
def METHOD_NAME(
self,
amount: float = 0.1,
message: Optional[str] = None,
detail: Optional[str] = None,
) -> None:
"""
Increment the progress bar.
Like ``set``, this updates the progress panel. The difference is that ``inc``
increases the progress bar by amount, instead of setting it to a specific value.
Parameters
----------
self
The object instance
amount
The amount to increment in progress.
message
The message to be displayed to the user or ``None`` to hide the current
message (if any).
detail
The detail message to be displayed to the user or ``None`` to hide the current
detail message (if any). The detail message will be shown with a
de-emphasized appearance relative to message.
"""
if self.value is None:
self.value = self.min
value = min(self.value + amount, self.max)
self.set(value, message, detail)
def close(self) -> None:
"""
Close the progress bar. You can also use the Progress object as a context
manager, which will cause the progress bar to close on exit.
Parameters
----------
self
The object instance
Note
----
Removes the progress panel. Future calls to set and close will be ignored.
"""
if self._closed:
warn("Attempting to close progress, but progress already closed.")
return None
self._session._send_progress("close", {"id": self._id, "style": self._style})
self._closed = True | null |
1,026 | import pytest
from ethpm.exceptions import (
EthPMValidationError,
)
from ethpm.validation.manifest import (
extract_contract_types_from_deployments,
validate_manifest_against_schema,
validate_manifest_deployments,
validate_meta_object,
validate_raw_manifest_format,
)
from ethpm.validation.package import (
validate_manifest_version,
validate_package_name,
)
def test_validate_raw_manifest_configuration_validates_strict_manifests(
all_strict_manifests,
):
assert validate_raw_manifest_format(all_strict_manifests) is None
def test_validate_raw_manifest_format_invalidates_pretty_manifests(
all_pretty_manifests,
):
with pytest.raises(EthPMValidationError):
validate_raw_manifest_format(all_pretty_manifests)
@pytest.mark.parametrize(
"manifest",
(
# not alphabetical
'{"x":"y","a":"b"}',
# not UTF-8
'{"\x80":"b","c":"d"}',
# newlines
'{"a":"b",\n"c":"d"}',
'{"a":"b","c":"d"}\n',
# whitespace
'{"a":"b","c": "d"}',
),
)
def test_validate_raw_manifest_format_invalidates_invalid_manifests(tmpdir, manifest):
p = tmpdir.mkdir("invalid").join("manifest.json")
p.write(manifest)
invalid_manifest = p.read()
with pytest.raises(EthPMValidationError):
validate_raw_manifest_format(invalid_manifest)
def METHOD_NAME(all_manifests):
assert validate_manifest_against_schema(all_manifests) is None
def test_validate_manifest_invalidates(invalid_manifest):
with pytest.raises(EthPMValidationError, match="Manifest invalid for schema"):
validate_manifest_against_schema(invalid_manifest)
def test_validate_manifest_deployments_catches_missing_contract_type_references(
manifest_with_conflicting_deployments,
):
with pytest.raises(
EthPMValidationError, match="Manifest missing references to contracts"
):
validate_manifest_deployments(manifest_with_conflicting_deployments)
def test_validate_deployments_for_single_deployment(safe_math_lib_package):
assert validate_manifest_deployments(safe_math_lib_package.manifest) is None
def test_validate_deployments_without_deployment(manifest_with_no_deployments):
assert validate_manifest_deployments(manifest_with_no_deployments) is None
@pytest.mark.parametrize(
"data,expected",
(
([], set()),
([{"some": {"contractType": "one"}}], {"one"}),
(
[{"some": {"contractType": "one"}, "other": {"contractType": "two"}}],
{"one", "two"},
),
),
)
def test_extract_contract_types_from_deployments(data, expected):
actual = extract_contract_types_from_deployments(data)
assert actual == expected
def test_validate_manifest_version_validates_version_three_string():
assert validate_manifest_version("ethpm/3") is None
@pytest.mark.parametrize("version", (2, 3, "2", "3", b"3"))
def test_validate_manifest_version_invalidates_incorrect_versions(version):
with pytest.raises(EthPMValidationError):
validate_manifest_version(version)
@pytest.mark.parametrize(
"meta,extra_fields",
(
(
{
"license": "MIT",
"authors": ["[email protected]"],
"description": "A Package that does things.",
"keywords": ["ethpm", "package"],
"links": {"documentation": "ipfs://Qm..."},
},
False,
),
(
{
"license": "MIT",
"authors": ["[email protected]"],
"description": "A Package that does things.",
"keywords": ["ethpm", "package"],
"links": {"documentation": "ipfs://Qm..."},
"x-hash": "0x...",
},
True,
),
),
)
def test_validate_meta_object_validates(meta, extra_fields):
result = validate_meta_object(meta, allow_extra_meta_fields=extra_fields)
assert result is None
@pytest.mark.parametrize(
"meta,extra_fields",
(
# With allow_extra_meta_fields=False
({"invalid": "field"}, False),
({"license": 123}, False),
({"license": "MIT", "authors": "[email protected]"}, False),
(
{
"license": "MIT",
"authors": ["[email protected]"],
"description": ["description", "of", "package"],
},
False,
),
(
{
"license": "MIT",
"authors": ["[email protected]"],
"description": "description",
"keywords": "singlekw",
},
False,
),
(
{
"license": "MIT",
"authors": ["[email protected]"],
"description": "description",
"keywords": ["auth", "package"],
"links": ["ipfs://Qm"],
},
False,
),
(
{
"license": "MIT",
"authors": ["[email protected]"],
"description": "description",
"keywords": ["auth", "package"],
"links": {"documentation": "ipfs://Qm"},
"extra": "field",
},
False,
),
(
{
"license": "MIT",
"authors": ["[email protected]"],
"description": "description",
"keywords": ["auth", "package"],
"links": {"documentation": "ipfs://Qm"},
"x-hash": "0x",
},
False,
),
# With allow_extra_meta_fields=True
# Improperly formatted "x" field
({"license": "MIT", "extra": "field"}, True),
),
)
def test_validate_meta_object_invalidates(meta, extra_fields):
with pytest.raises(EthPMValidationError):
validate_meta_object(meta, allow_extra_meta_fields=extra_fields)
@pytest.mark.parametrize(
"package_name",
(
"valid",
"Valid",
"pkg1",
"pkg_1",
"pkg-1",
"wallet0",
"wallet_",
"wallet-",
"x" * 256,
),
)
def test_validate_package_name_with_valid_package_names(package_name):
assert validate_package_name(package_name) is None
@pytest.mark.parametrize(
"package_name",
(
"",
"0",
"_invalid",
"-invalid",
".invalid",
"wallet.bad",
"x" * 257,
),
)
def test_validate_package_name_raises_exception_for_invalid_names(package_name):
with pytest.raises(EthPMValidationError):
validate_package_name(package_name) | null |
1,027 | """Djlint test config."""
# pylint: disable=W0621,C0116
import difflib
import os
import re
import shutil
import tempfile
from pathlib import Path
from types import SimpleNamespace
from typing import Dict, Generator, List, Optional, TextIO
import pytest
from _pytest.reports import BaseReport
from _pytest.terminal import TerminalReporter
from click.testing import CliRunner
from colorama import Fore, Style
from src.djlint import main as djlint
from src.djlint.settings import Config
class MyReporter(TerminalReporter): # type: ignore
"""Override default reporter to print more interesting details."""
def METHOD_NAME(self):
"""Override summary."""
failed = self.stats.get("failed", [])
if failed:
self.write_sep("=", "Short Test Summary")
for rep in failed:
self.write_line(f"failed {rep.nodeid}")
def summary_failures(self) -> None:
"""Override failure printer."""
if self.config.option.tbstyle != "no":
reports: List[BaseReport] = self.getreports("failed")
if not reports:
return
self.write_sep("=", "FAILURES")
if self.config.option.tbstyle == "line":
for rep in reports:
line = self._getcrashline(rep)
self.write_line(line)
else:
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, red=True, bold=True)
# modified version of _outrep_summary()
# only show error if not assertion error,
# otherwise our print function shows the diff better.
if not re.search(r"AssertionError:", rep.longreprtext):
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
# self._tw.sep("-", secname)
line_content = content
if content[-1:] == "\n":
line_content = content[:-1]
self._tw.line(line_content)
# continue original code
self._handle_teardown_sections(rep.nodeid)
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
vanilla_reporter = config.pluginmanager.getplugin("terminalreporter")
my_reporter = MyReporter(config)
config.pluginmanager.unregister(vanilla_reporter)
config.pluginmanager.register(my_reporter, "terminalreporter")
@pytest.fixture()
def runner() -> CliRunner:
"""Click runner for djlint tests."""
return CliRunner()
@pytest.fixture()
def tmp_file() -> Generator:
"""Create a temp file for formatting."""
# pylint: disable=R1732
tmp = tempfile.NamedTemporaryFile(delete=False)
yield tmp
tmp.close()
os.unlink(tmp.name)
def printer(expected, source, actual):
width, _ = shutil.get_terminal_size()
expected_text = "Expected"
actual_text = "Actual"
diff_text = "Diff"
source_text = "Source"
expected_width = int((width - len(expected_text) - 2) / 2)
actual_width = int((width - len(actual_text) - 2) / 2)
diff_width = int((width - len(diff_text) - 2) / 2)
source_width = int((width - len(source_text) - 2) / 2)
color = {"-": Fore.YELLOW, "+": Fore.GREEN, "@": Style.BRIGHT + Fore.BLUE}
print()
print(
f"{Fore.BLUE}{Style.BRIGHT}{'─' * source_width} {source_text} {'─' * source_width}{Style.RESET_ALL}"
)
print()
print(source)
print()
print(
f"{Fore.BLUE}{Style.BRIGHT}{'─' * expected_width} {expected_text} {'─' * expected_width}{Style.RESET_ALL}"
)
print()
print(expected)
print()
print(
f"{Fore.BLUE}{Style.BRIGHT}{'─' * actual_width} {actual_text} {'─' * actual_width}{Style.RESET_ALL}"
)
print()
print(actual)
print()
print(
f"{Fore.BLUE}{Style.BRIGHT}{'─' * diff_width} {diff_text} {'─' * diff_width}{Style.RESET_ALL}"
)
print()
for diff in list(difflib.unified_diff(expected.split("\n"), actual.split("\n")))[
2:
]:
print(f"{ color.get(diff[:1], Style.RESET_ALL)}{diff}{Style.RESET_ALL}")
def lint_printer(source, expected, actual):
width, _ = shutil.get_terminal_size()
expected_text = "Expected Rules"
actual_text = "Actual"
source_text = "Source"
expected_width = int((width - len(expected_text) - 2) / 2)
actual_width = int((width - len(actual_text) - 2) / 2)
source_width = int((width - len(source_text) - 2) / 2)
print()
print(
f"{Fore.BLUE}{Style.BRIGHT}{'─' * source_width} {source_text} {'─' * source_width}{Style.RESET_ALL}"
)
print()
print(source)
print()
print(
f"{Fore.BLUE}{Style.BRIGHT}{'─' * expected_width} {expected_text} {'─' * expected_width}{Style.RESET_ALL}"
)
print()
for x in expected:
print(
f"{Fore.RED}{Style.BRIGHT}{x['code']}{Style.RESET_ALL} {x['line']} {x['match']}"
)
print(f' {x["message"]}')
print()
print(
f"{Fore.BLUE}{Style.BRIGHT}{'─' * actual_width} {actual_text} {'─' * actual_width}{Style.RESET_ALL}"
)
print()
for x in actual:
print(
f"{Fore.RED}{Style.BRIGHT}{x['code']}{Style.RESET_ALL} {x['line']} {x['match']}"
)
print(f' {x["message"]}')
print()
if len(actual) == 0:
print(f"{Fore.YELLOW}No codes found.{Style.RESET_ALL}")
print()
else:
print(f"{Fore.YELLOW}{actual}{Style.RESET_ALL}")
print()
def write_to_file(the_file: str, the_text: bytes) -> None:
"""Shortcode for write some bytes to a file."""
with open(the_file, mode="w+b") as open_file:
open_file.write(the_text)
def reformat(
the_file: TextIO, runner: CliRunner, the_text: bytes, profile: str = "html"
) -> SimpleNamespace:
write_to_file(the_file.name, the_text)
result = runner.invoke(djlint, [the_file.name, "--profile", profile, "--reformat"])
return SimpleNamespace(
**{
"text": Path(the_file.name).read_text(encoding="utf8"),
"exit_code": result.exit_code,
}
)
def config_builder(args: Optional[Dict] = None) -> Config:
if args:
return Config("dummy/source.html", **args)
return Config("dummy/source.html")
@pytest.fixture()
def basic_config() -> Config:
"""Return a config object with default basic options."""
return Config("dummy/source.html")
@pytest.fixture()
def django_config() -> Config:
"""Return a config object with django profile."""
return Config("dummy/source.html", profile="django")
@pytest.fixture()
def jinja_config() -> Config:
"""Return a config object with jinja."""
return Config("dummy/source.html", profile="jinja")
@pytest.fixture()
def handlebars_config() -> Config:
"""Return a config object with handlebars."""
return Config("dummy/source.html", profile="handlebars")
@pytest.fixture()
def nunjucks_config() -> Config:
"""Return a config object with nunjucks."""
return Config("dummy/source.html", profile="nunjucks") | null |
1,028 | """Test schedulers."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from otx.algorithms.segmentation.adapters.mmseg.models.schedulers import (
ConstantScalarScheduler,
PolyScalarScheduler,
StepScalarScheduler,
)
class TestSchedulers:
"""Test schedulers."""
def test_constant_scalar_scheduler(self):
"""Test constant scalar scheduler.
Learning rate should not change over time.
"""
scheduler = ConstantScalarScheduler(scale=30.0)
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) == 30.0
assert scheduler(2, 10) == 30.0
def test_constant_scalar_scheduler_invalid_scale(self):
"""Test constant scalar scheduler with invalid scale."""
with pytest.raises(AssertionError):
ConstantScalarScheduler(scale=-1.0)
@pytest.mark.xfail
def test_constant_scalar_scheduler_invalid_step(self):
"""Test constant scalar scheduler with invalid step.
TODO: ConstantScalarScheculer should be modified to raise this error
"""
scheduler = ConstantScalarScheduler(scale=30.0)
with pytest.raises(AssertionError):
scheduler(-1, 1)
def test_poly_scalar_scheduler_by_epoch_false(self):
"""Test poly scalar scheduler."""
# By epoch is False
scheduler = PolyScalarScheduler(
start_scale=30.0,
end_scale=0.0,
num_iters=100,
power=0.9,
by_epoch=False,
)
# learning rate should decrease over time
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) < 30.0
assert scheduler(2, 1) < scheduler(1, 1)
assert scheduler(3, 1) < scheduler(2, 1)
assert scheduler(50, 10) == scheduler(50, 1) # as this is not by epoch
# learning rate should not change after num_iters
assert scheduler(100, 1) == 0.0
assert scheduler(101, 1) == 0.0
assert scheduler(102, 1) == 0.0
def test_poly_scalar_scheduler_by_epoch_true(self):
scheduler = PolyScalarScheduler(
start_scale=30.0,
end_scale=0.0,
num_iters=100,
power=0.9,
by_epoch=True,
)
# learning rate should decrease over time
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) < 30.0
assert scheduler(2, 1) < scheduler(1, 1)
assert scheduler(3, 1) < scheduler(2, 1)
assert scheduler(50, 10) != scheduler(50, 1) # as this is by epoch
# learning rate should not change after num_iters
assert scheduler(100, 1) == 0.0
assert scheduler(101, 1) == 0.0
assert scheduler(102, 1) == 0.0
def METHOD_NAME(self):
"""Test step scalar scheduler."""
# By epoch is False
scheduler = StepScalarScheduler(
scales=[30.0, 20.0, 10.0, 5.0],
num_iters=[2, 3, 4],
by_epoch=False,
)
# learning rate should decrease over time as a step function
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) == 30.0
assert scheduler(2, 1) < scheduler(1, 1)
assert scheduler(3, 1) < scheduler(2, 1)
assert scheduler(50, 10) == scheduler(50, 1)
assert scheduler(5, 2) == 5.0
assert scheduler(5, 0) == scheduler(10, 1)
assert scheduler(10, 1) == 5.0 # steps greater than total num_iters
def test_step_scalar_scheduler_by_epoch_true(self):
# By epoch is True
scheduler = StepScalarScheduler(
scales=[30.0, 20.0, 10.0, 5.0],
num_iters=[2, 3, 4],
by_epoch=True,
)
# learning rate should decrease over time as a step function
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) == 30.0
assert scheduler(2, 1) < scheduler(1, 1)
assert scheduler(3, 1) < scheduler(2, 1)
assert scheduler(9, 5) == 30.0
assert scheduler(5, 2) == 20.0
assert scheduler(5, 2) < scheduler(10, 11) | null |
1,029 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDBInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dds', '2015-12-01', 'DescribeDBInstances','dds')
self.set_method('POST')
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_NetworkType(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_ReplicationFactor(self): # String
return self.get_query_params().get('ReplicationFactor')
def set_ReplicationFactor(self, ReplicationFactor): # String
self.add_query_param('ReplicationFactor', ReplicationFactor)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def METHOD_NAME(self): # String
return self.get_query_params().get('Expired')
def set_Expired(self, Expired): # String
self.add_query_param('Expired', Expired)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_Engine(self): # String
return self.get_query_params().get('Engine')
def set_Engine(self, Engine): # String
self.add_query_param('Engine', Engine)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DBNodeType(self): # String
return self.get_query_params().get('DBNodeType')
def set_DBNodeType(self, DBNodeType): # String
self.add_query_param('DBNodeType', DBNodeType)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_DBInstanceDescription(self): # String
return self.get_query_params().get('DBInstanceDescription')
def set_DBInstanceDescription(self, DBInstanceDescription): # String
self.add_query_param('DBInstanceDescription', DBInstanceDescription)
def get_DBInstanceStatus(self): # String
return self.get_query_params().get('DBInstanceStatus')
def set_DBInstanceStatus(self, DBInstanceStatus): # String
self.add_query_param('DBInstanceStatus', DBInstanceStatus)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_ExpireTime(self): # String
return self.get_query_params().get('ExpireTime')
def set_ExpireTime(self, ExpireTime): # String
self.add_query_param('ExpireTime', ExpireTime)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_ConnectionDomain(self): # String
return self.get_query_params().get('ConnectionDomain')
def set_ConnectionDomain(self, ConnectionDomain): # String
self.add_query_param('ConnectionDomain', ConnectionDomain)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBInstanceType(self): # String
return self.get_query_params().get('DBInstanceType')
def set_DBInstanceType(self, DBInstanceType): # String
self.add_query_param('DBInstanceType', DBInstanceType)
def get_DBInstanceClass(self): # String
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self, DBInstanceClass): # String
self.add_query_param('DBInstanceClass', DBInstanceClass)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
1,030 | """Djlint html linter."""
import importlib
from pathlib import Path
from typing import Dict, List
import regex as re
from .helpers import (
inside_ignored_linter_block,
inside_ignored_rule,
overlaps_ignored_block,
)
from .settings import Config
flags = {
"re.A": re.A,
"re.ASCII": re.ASCII,
"re.I": re.I,
"re.IGNORECASE": re.IGNORECASE,
"re.M": re.M,
"re.MULTILINE": re.MULTILINE,
"re.S": re.S,
"re.DOTALL": re.DOTALL,
"re.X": re.X,
"re.VERBOSE": re.VERBOSE,
"re.L": re.L,
"re.LOCALE": re.LOCALE,
}
def build_flags(flag_list: str) -> int:
"""Build list of regex flags."""
split_flags = flag_list.split("|")
combined_flags = 0
for flag in split_flags:
combined_flags |= flags[flag.strip()]
return combined_flags
def get_line(start: int, line_ends: List) -> str:
"""Get the line number and index of match."""
line = list(filter(lambda pair: pair["end"] > start, line_ends))[0]
# pylint: disable=C0209
return "%d:%d" % (line_ends.index(line) + 1, start - line["start"])
def linter(config: Config, html: str, filename: str, filepath: str) -> Dict:
"""Lint a html string."""
errors: dict = {filename: []}
# build list of line ends for file
line_ends = [
{"start": m.start(), "end": m.end()}
for m in re.finditer(r"(?:.*\n)|(?:[^\n]+$)", html)
]
ignored_rules: List[str] = []
# remove ignored rules for file
for pattern, rules in config.per_file_ignores.items():
if re.search(pattern, filepath, re.VERBOSE):
ignored_rules += [x.strip() for x in rules.split(",")]
for rule in config.linter_rules:
rule = rule["rule"]
# skip ignored rules
if rule["name"] in ignored_rules:
continue
# rule based on python module
if "python_module" in rule:
rule_module = importlib.import_module(rule["python_module"])
module_errors = rule_module.run(
rule=rule,
config=config,
html=html,
filepath=filepath,
line_ends=line_ends,
)
assert isinstance(module_errors, list), (
f"Error: {rule['name']} python_module run() should return a list of "
"dict with keys: code, line, match, message."
)
errors[filename].extend(module_errors)
# rule based on patterns
else:
for pattern in rule["patterns"]:
for match in re.finditer(
re.compile(
pattern, flags=build_flags(rule.get("flags", "re.DOTALL"))
),
html,
):
if (
overlaps_ignored_block(config, html, match) is False
and inside_ignored_rule(config, html, match, rule["name"])
is False
and inside_ignored_linter_block(config, html, match) is False
):
errors[filename].append(
{
"code": rule["name"],
"line": get_line(match.start(), line_ends),
"match": match.group().strip()[:20],
"message": rule["message"],
}
)
# remove duplicate matches
for filename, error_dict in errors.items():
unique_errors = []
for dict_ in error_dict:
if dict_ not in unique_errors:
unique_errors.append(dict_)
errors[filename] = unique_errors
return errors
def METHOD_NAME(config: Config, this_file: Path) -> Dict:
"""Check file for formatting errors."""
filename = str(this_file)
html = this_file.read_text(encoding="utf8")
return linter(config, html, filename, this_file.as_posix()) | null |
1,031 | from __future__ import annotations
import asyncio
import traceback
from typing import List, Tuple
import aiotools
import pytest
from redis.asyncio import Redis
from redis.exceptions import ConnectionError as RedisConnectionError
from redis.exceptions import TimeoutError as RedisTimeoutError
from ai.backend.common import redis_helper
from ai.backend.common.types import HostPortPair, RedisConnectionInfo
from .docker import DockerRedisNode
from .utils import interrupt
@pytest.mark.redis
@pytest.mark.asyncio
@pytest.mark.xfail
@pytest.mark.parametrize("disruption_method", ["stop", "pause"])
async def test_blist(redis_container: tuple[str, HostPortPair], disruption_method: str) -> None:
do_pause = asyncio.Event()
paused = asyncio.Event()
do_unpause = asyncio.Event()
unpaused = asyncio.Event()
received_messages: List[str] = []
async def pop(r: RedisConnectionInfo, key: str) -> None:
try:
async with aiotools.aclosing(
redis_helper.blpop(r, key, reconnect_poll_interval=0.2),
) as agen:
async for raw_msg in agen:
msg = raw_msg.decode()
received_messages.append(msg)
except asyncio.CancelledError:
pass
except Exception:
traceback.print_exc()
addr = redis_container[1]
r = RedisConnectionInfo(
Redis.from_url(url=f"redis://{addr.host}:{addr.port}", socket_timeout=0.2),
service_name=None,
)
assert isinstance(r.client, Redis)
await r.client.delete("bl1")
pop_task = asyncio.create_task(pop(r, "bl1"))
interrupt_task = asyncio.create_task(
interrupt(
disruption_method,
DockerRedisNode("node", addr.port, redis_container[0]),
do_pause=do_pause,
do_unpause=do_unpause,
paused=paused,
unpaused=unpaused,
)
)
await asyncio.sleep(0)
for i in range(2):
print(f"pushing {i} to bl1")
await r.client.rpush("bl1", str(i))
await asyncio.sleep(0.1)
do_pause.set()
await paused.wait()
for i in range(2):
# The Redis server is dead temporarily...
if disruption_method == "stop":
with pytest.raises(RedisConnectionError):
await r.client.rpush("bl1", str(2 + i))
elif disruption_method == "pause":
with pytest.raises((asyncio.TimeoutError, RedisTimeoutError)):
await r.client.rpush("bl1", str(2 + i))
else:
raise RuntimeError("should not reach here")
await asyncio.sleep(0.1)
do_unpause.set()
await unpaused.wait()
for i in range(2):
await r.client.rpush("bl1", str(4 + i))
await asyncio.sleep(0.1)
await interrupt_task
pop_task.cancel()
await pop_task
assert pop_task.done()
all_messages = set(map(int, received_messages))
assert set(range(0, 2)) < all_messages
assert set(range(5, 6)) < all_messages # more msgs may be lost during restart
assert all_messages <= set(range(0, 6))
@pytest.mark.redis
@pytest.mark.asyncio
@pytest.mark.xfail
@pytest.mark.parametrize("disruption_method", ["stop", "pause"])
async def test_blist_with_retrying_rpush(
redis_container: Tuple[str, HostPortPair], disruption_method: str
) -> None:
do_pause = asyncio.Event()
paused = asyncio.Event()
do_unpause = asyncio.Event()
unpaused = asyncio.Event()
received_messages: List[str] = []
async def pop(r: RedisConnectionInfo, key: str) -> None:
try:
async with aiotools.aclosing(
redis_helper.blpop(r, key, reconnect_poll_interval=0.2),
) as agen:
async for raw_msg in agen:
msg = raw_msg.decode()
received_messages.append(msg)
except asyncio.CancelledError:
pass
addr = redis_container[1]
r = RedisConnectionInfo(
Redis.from_url(url=f"redis://{addr.host}:{addr.port}", socket_timeout=0.2),
service_name=None,
)
assert isinstance(r.client, Redis)
await r.client.delete("bl1")
pop_task = asyncio.create_task(pop(r, "bl1"))
interrupt_task = asyncio.create_task(
interrupt(
disruption_method,
DockerRedisNode("node", addr.port, redis_container[0]),
do_pause=do_pause,
do_unpause=do_unpause,
paused=paused,
unpaused=unpaused,
)
)
await asyncio.sleep(0)
for i in range(2):
await redis_helper.execute(r, lambda r: r.rpush("bl1", str(i)))
await asyncio.sleep(0.1)
do_pause.set()
await paused.wait()
async def METHOD_NAME():
await asyncio.sleep(2.0)
do_unpause.set()
wakeup_task = asyncio.create_task(METHOD_NAME())
for i in range(2):
await redis_helper.execute(r, lambda r: r.rpush("bl1", str(2 + i)))
await asyncio.sleep(0.1)
await wakeup_task
await unpaused.wait()
for i in range(2):
await redis_helper.execute(r, lambda r: r.rpush("bl1", str(4 + i)))
await asyncio.sleep(0.1)
await interrupt_task
pop_task.cancel()
await pop_task
assert pop_task.done()
all_messages = set(map(int, received_messages))
assert set(range(0, 2)) < all_messages
assert set(range(5, 6)) < all_messages # more msgs may be lost during restart
assert all_messages <= set(range(0, 6)) | null |
1,032 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ModifyFlowForWebRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ModifyFlowForWeb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CronExpr(self):
return self.get_query_params().get('CronExpr')
def set_CronExpr(self,CronExpr):
self.add_query_param('CronExpr',CronExpr)
def get_Periodic(self):
return self.get_query_params().get('Periodic')
def set_Periodic(self,Periodic):
self.add_query_param('Periodic',Periodic)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_AlertUserGroupBizId(self):
return self.get_query_params().get('AlertUserGroupBizId')
def set_AlertUserGroupBizId(self,AlertUserGroupBizId):
self.add_query_param('AlertUserGroupBizId',AlertUserGroupBizId)
def get_Lifecycle(self):
return self.get_query_params().get('Lifecycle')
def set_Lifecycle(self,Lifecycle):
self.add_query_param('Lifecycle',Lifecycle)
def get_HostName(self):
return self.get_query_params().get('HostName')
def set_HostName(self,HostName):
self.add_query_param('HostName',HostName)
def get_CreateCluster(self):
return self.get_query_params().get('CreateCluster')
def set_CreateCluster(self,CreateCluster):
self.add_query_param('CreateCluster',CreateCluster)
def get_EndSchedule(self):
return self.get_query_params().get('EndSchedule')
def set_EndSchedule(self,EndSchedule):
self.add_query_param('EndSchedule',EndSchedule)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_AlertConf(self):
return self.get_query_params().get('AlertConf')
def set_AlertConf(self,AlertConf):
self.add_query_param('AlertConf',AlertConf)
def get_ProjectId(self):
return self.get_query_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_query_param('ProjectId',ProjectId)
def get_ParentFlowList(self):
return self.get_query_params().get('ParentFlowList')
def set_ParentFlowList(self,ParentFlowList):
self.add_query_param('ParentFlowList',ParentFlowList)
def METHOD_NAME(self):
return self.get_query_params().get('LogArchiveLocation')
def set_LogArchiveLocation(self,LogArchiveLocation):
self.add_query_param('LogArchiveLocation',LogArchiveLocation)
def get_AlertDingDingGroupBizId(self):
return self.get_query_params().get('AlertDingDingGroupBizId')
def set_AlertDingDingGroupBizId(self,AlertDingDingGroupBizId):
self.add_query_param('AlertDingDingGroupBizId',AlertDingDingGroupBizId)
def get_StartSchedule(self):
return self.get_query_params().get('StartSchedule')
def set_StartSchedule(self,StartSchedule):
self.add_query_param('StartSchedule',StartSchedule)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_Graph(self):
return self.get_query_params().get('Graph')
def set_Graph(self,Graph):
self.add_query_param('Graph',Graph)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Namespace(self):
return self.get_query_params().get('Namespace')
def set_Namespace(self,Namespace):
self.add_query_param('Namespace',Namespace)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status)
def get_ParentCategory(self):
return self.get_query_params().get('ParentCategory')
def set_ParentCategory(self,ParentCategory):
self.add_query_param('ParentCategory',ParentCategory | null |
1,033 | """Tests to find local Singularity image."""
import shutil
from pathlib import Path
from typing import Any
import pytest
from cwltool.main import main
from .util import (
get_data,
get_main_output,
needs_singularity,
needs_singularity_2_6,
needs_singularity_3_or_newer,
working_directory,
)
@needs_singularity_2_6
def test_singularity_pullfolder(tmp_path: Path, monkeypatch: Any) -> None:
"""Test singularity respects SINGULARITY_PULLFOLDER."""
workdir = tmp_path / "working_dir_new"
workdir.mkdir()
with working_directory(workdir):
pullfolder = tmp_path / "pullfolder"
pullfolder.mkdir()
result_code, stdout, stderr = get_main_output(
[
"--singularity",
get_data("tests/sing_pullfolder_test.cwl"),
"--message",
"hello",
],
extra_env={"SINGULARITY_PULLFOLDER": str(pullfolder)},
monkeypatch=monkeypatch,
)
print(stdout)
print(stderr)
assert result_code == 0
image = pullfolder / "debian.img"
assert image.exists()
@needs_singularity
def test_singularity_workflow(tmp_path: Path) -> None:
with working_directory(tmp_path):
error_code, _, stderr = get_main_output(
[
"--singularity",
"--default-container",
"docker.io/debian:stable-slim",
"--debug",
get_data("tests/wf/hello-workflow.cwl"),
"--usermessage",
"hello",
]
)
assert "completed success" in stderr, stderr
assert error_code == 0
def test_singularity_iwdr(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
singularity_dir = tmp_path / "singularity"
singularity_dir.mkdir()
monkeypatch.setenv("CWL_SINGULARITY_CACHE", str(singularity_dir))
result_code = main(
[
"--singularity",
"--default-container",
"docker.io/debian:stable-slim",
get_data("tests/wf/iwdr-entry.cwl"),
"--message",
"hello",
]
)
singularity_installed = bool(shutil.which("singularity"))
if singularity_installed:
assert result_code == 0
else:
assert result_code != 0
@needs_singularity
def test_singularity_incorrect_image_pull() -> None:
result_code, _, stderr = get_main_output(
[
"--singularity",
"--default-container",
"non-existant-weird-image",
get_data("tests/wf/hello-workflow.cwl"),
"--usermessage",
"hello",
]
)
assert result_code != 0
@needs_singularity
def test_singularity_local(tmp_path: Path) -> None:
workdir = tmp_path / "working_dir"
workdir.mkdir()
with working_directory(workdir):
result_code, stdout, stderr = get_main_output(
[
"--singularity",
get_data("tests/sing_pullfolder_test.cwl"),
"--message",
"hello",
]
)
assert result_code == 0
@needs_singularity_2_6
def test_singularity2_docker_image_id_in_tool(tmp_path: Path) -> None:
workdir = tmp_path / "working_dir"
workdir.mkdir()
with working_directory(workdir):
result_code, stdout, stderr = get_main_output(
[
"--singularity",
get_data("tests/sing_pullfolder_test.cwl"),
"--message",
"hello",
]
)
result_code1, stdout, stderr = get_main_output(
[
"--singularity",
get_data("tests/debian_image_id.cwl"),
"--message",
"hello",
]
)
assert result_code1 == 0
@needs_singularity_3_or_newer
def METHOD_NAME(tmp_path: Path) -> None:
workdir = tmp_path / "working_dir"
workdir.mkdir()
with working_directory(workdir):
result_code, stdout, stderr = get_main_output(
[
"--singularity",
get_data("tests/sing_pullfolder_test.cwl"),
"--message",
"hello",
]
)
assert result_code == 0
result_code1, stdout, stderr = get_main_output(
[
"--singularity",
get_data("tests/debian_image_id2.cwl"),
"--message",
"hello",
]
)
assert result_code1 == 0 | null |
1,034 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudapi.endpoint import endpoint_data
class CreateApiRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CloudAPI', '2016-07-14', 'CreateApi','apigateway')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_WebSocketApiType(self): # String
return self.get_query_params().get('WebSocketApiType')
def set_WebSocketApiType(self, WebSocketApiType): # String
self.add_query_param('WebSocketApiType', WebSocketApiType)
def get_ErrorCodeSamples(self): # String
return self.get_query_params().get('ErrorCodeSamples')
def set_ErrorCodeSamples(self, ErrorCodeSamples): # String
self.add_query_param('ErrorCodeSamples', ErrorCodeSamples)
def METHOD_NAME(self): # String
return self.get_query_params().get('AppCodeAuthType')
def set_AppCodeAuthType(self, AppCodeAuthType): # String
self.add_query_param('AppCodeAuthType', AppCodeAuthType)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_DisableInternet(self): # Boolean
return self.get_query_params().get('DisableInternet')
def set_DisableInternet(self, DisableInternet): # Boolean
self.add_query_param('DisableInternet', DisableInternet)
def get_BackendId(self): # String
return self.get_query_params().get('BackendId')
def set_BackendId(self, BackendId): # String
self.add_query_param('BackendId', BackendId)
def get_ConstantParameters(self): # String
return self.get_query_params().get('ConstantParameters')
def set_ConstantParameters(self, ConstantParameters): # String
self.add_query_param('ConstantParameters', ConstantParameters)
def get_AuthType(self): # String
return self.get_query_params().get('AuthType')
def set_AuthType(self, AuthType): # String
self.add_query_param('AuthType', AuthType)
def get_AllowSignatureMethod(self): # String
return self.get_query_params().get('AllowSignatureMethod')
def set_AllowSignatureMethod(self, AllowSignatureMethod): # String
self.add_query_param('AllowSignatureMethod', AllowSignatureMethod)
def get_ServiceParameters(self): # String
return self.get_query_params().get('ServiceParameters')
def set_ServiceParameters(self, ServiceParameters): # String
self.add_query_param('ServiceParameters', ServiceParameters)
def get_FailResultSample(self): # String
return self.get_query_params().get('FailResultSample')
def set_FailResultSample(self, FailResultSample): # String
self.add_query_param('FailResultSample', FailResultSample)
def get_SystemParameters(self): # String
return self.get_query_params().get('SystemParameters')
def set_SystemParameters(self, SystemParameters): # String
self.add_query_param('SystemParameters', SystemParameters)
def get_ServiceParametersMap(self): # String
return self.get_query_params().get('ServiceParametersMap')
def set_ServiceParametersMap(self, ServiceParametersMap): # String
self.add_query_param('ServiceParametersMap', ServiceParametersMap)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_OpenIdConnectConfig(self): # String
return self.get_query_params().get('OpenIdConnectConfig')
def set_OpenIdConnectConfig(self, OpenIdConnectConfig): # String
self.add_query_param('OpenIdConnectConfig', OpenIdConnectConfig)
def get_RequestParameters(self): # String
return self.get_query_params().get('RequestParameters')
def set_RequestParameters(self, RequestParameters): # String
self.add_query_param('RequestParameters', RequestParameters)
def get_ResultDescriptions(self): # String
return self.get_query_params().get('ResultDescriptions')
def set_ResultDescriptions(self, ResultDescriptions): # String
self.add_query_param('ResultDescriptions', ResultDescriptions)
def get_Visibility(self): # String
return self.get_query_params().get('Visibility')
def set_Visibility(self, Visibility): # String
self.add_query_param('Visibility', Visibility)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_ServiceConfig(self): # String
return self.get_query_params().get('ServiceConfig')
def set_ServiceConfig(self, ServiceConfig): # String
self.add_query_param('ServiceConfig', ServiceConfig)
def get_ResultType(self): # String
return self.get_query_params().get('ResultType')
def set_ResultType(self, ResultType): # String
self.add_query_param('ResultType', ResultType)
def get_ApiName(self): # String
return self.get_query_params().get('ApiName')
def set_ApiName(self, ApiName): # String
self.add_query_param('ApiName', ApiName)
def get_ResultSample(self): # String
return self.get_query_params().get('ResultSample')
def set_ResultSample(self, ResultSample): # String
self.add_query_param('ResultSample', ResultSample)
def get_BackendEnable(self): # Boolean
return self.get_query_params().get('BackendEnable')
def set_BackendEnable(self, BackendEnable): # Boolean
self.add_query_param('BackendEnable', BackendEnable)
def get_ForceNonceCheck(self): # Boolean
return self.get_query_params().get('ForceNonceCheck')
def set_ForceNonceCheck(self, ForceNonceCheck): # Boolean
self.add_query_param('ForceNonceCheck', ForceNonceCheck)
def get_RequestConfig(self): # String
return self.get_query_params().get('RequestConfig')
def set_RequestConfig(self, RequestConfig): # String
self.add_query_param('RequestConfig', RequestConfig)
def get_ResultBodyModel(self): # String
return self.get_query_params().get('ResultBodyModel')
def set_ResultBodyModel(self, ResultBodyModel): # String
self.add_query_param('ResultBodyModel', ResultBodyModel) | null |
1,035 | def format_call(call):
if hasattr(call, "format"):
return call.format()
return call
def METHOD_NAME(name_list, call_list):
return "\n".join(
[
" {0}. '{1}': {2}".format(i, x[0], format_call(x[1]))
for i, x in enumerate(zip(name_list, call_list))
]
)
class Queue:
def __init__(self, call_list_builder=None):
if not call_list_builder:
call_list_builder = CallListBuilder()
self.__call_list = call_list_builder.calls
self.__name_list = call_list_builder.names
self.__index = 0
def take(self, type_of_call, real_call_info=None):
if self.__index >= len(self.__call_list):
raise self.__extra_call(type_of_call, real_call_info)
call = self.__call_list[self.__index]
if call.type != type_of_call:
raise self.__unexpected_type(call, type_of_call, real_call_info)
self.__index += 1
return self.__index, call
def has_type(self, call_type):
return any(call.type == call_type for call in self.__call_list)
@property
def remaining(self):
return self.__call_list[self.__index :]
@property
def taken(self):
return self.__call_list[: self.__index]
def error_with_context(self, message):
return AssertionError(
"{0}\nAll calls in queue (current index={1}):\n{2}".format(
message,
self.__index,
METHOD_NAME(self.__name_list, self.__call_list),
)
)
def __unexpected_type(self, call, real_type, real_call_info):
return self.error_with_context(
(
"{0}. call was expected as '{1}' type but was '{2}' type"
"\n expected call: {3}{4}"
"\nHint: check call compatibility: for example if you use"
" env.push_cib() then runner.cib.push() will be never launched"
).format(
self.__index + 1,
call.type,
real_type,
call,
"\n real call: {0}".format(real_call_info)
if real_call_info
else "",
)
)
def __extra_call(self, type_of_call, real_call_info):
return self.error_with_context(
"No next call expected, but was ({0}):\n '{1}'".format(
type_of_call, real_call_info
)
)
class CallListBuilder:
def __init__(self):
self.__call_list = []
self.__name_list = []
@property
def calls(self):
return list(self.__call_list)
@property
def names(self):
return list(self.__name_list)
def __set(self, instead_name, name, call):
"""
Replace call that has key instead_name with new call that has key name
string name -- key of the call
Call call
string instead_name -- key of call instead of which this new call is to
be placed
"""
if instead_name not in self.__name_list:
raise self.__cannot_put("instead of", instead_name, name, call)
for i, current_name in enumerate(self.__name_list):
if current_name == instead_name:
self.__call_list[i] = call
# yes we change the name as well
self.__name_list[i] = name
return
def __append(self, name, call):
"""
Append call.
string name -- key of the call
Call call
"""
self.__name_list.append(name)
self.__call_list.append(call)
def __insert(self, before_name, name, call):
"""
Insert call before call with before_name.
string before_name -- key of call before which this new call is to be
placed
string name -- key of the call
Call call
"""
if before_name not in self.__name_list:
raise self.__cannot_put("before", before_name, name, call)
index = self.__name_list.index(before_name)
self.__name_list.insert(index, name)
self.__call_list.insert(index, call)
def remove(self, name):
"""
Remove a call with the specified name
"""
try:
index = self.__name_list.index(name)
del self.__call_list[index]
del self.__name_list[index]
except ValueError as e:
raise self.__name_not_exists(name) from e
def trim_before(self, name):
"""
Remove a call with the specified name and all calls after it from the list
"""
try:
index = self.__name_list.index(name)
self.__call_list = self.__call_list[:index]
self.__name_list = self.__name_list[:index]
except ValueError as e:
raise self.__name_not_exists(name) from e
def get(self, name):
"""
Get first call with name.
string name -- key of the call
"""
try:
return self.__call_list[self.__name_list.index(name)]
except ValueError as e:
raise self.__name_not_exists(name) from e
def place(self, name, call, before=None, instead=None):
"""
Place call into calllist.
string name -- key of the call
Call call
string before -- key of call before which this new call is to be placed
string instead -- key of call instead of which this new call is to be
placed
"""
if name and name in self.__name_list and instead != name:
raise self.__name_exists_already(name)
if before and instead:
raise self.__cannot_use_before_and_instead(
name,
call,
before,
instead,
)
if not hasattr(call, "type") or not call.type:
raise self.__type_of_call_is_not_specified(call)
if before:
self.__insert(before, name, call)
elif instead:
self.__set(instead, name, call)
else:
self.__append(name, call)
def __error_with_context(self, message):
return AssertionError(
"{0}\nCalls in the configuration call collection are:\n{1}".format(
message,
METHOD_NAME(self.__name_list, self.__call_list),
)
)
@staticmethod
def __type_of_call_is_not_specified(call):
return AssertionError(
(
"Class {0}.{1} must have the attribute 'type' with no-falsy "
"value."
).format(call.__module__, call.__class__.__name__)
)
def __name_not_exists(self, name):
return self.__error_with_context(
"Call named '{0}' does not exist.".format(name)
)
def __name_exists_already(self, name):
return self.__error_with_context(
"Name '{0}' is in this configuration already.".format(name)
)
def __cannot_use_before_and_instead(self, name, call, before, instead):
return self.__error_with_context(
(
"Args 'before' ({0}) and 'instead' ({1}) cannot be used"
" together\n '{2}': {3}"
).format(before, instead, name, call)
)
def __cannot_put(self, where_type, where_name, name, call):
return self.__error_with_context(
(
"Cannot put call named '{0}' ({1}) {2} '{3}'"
" because '{3}' does not exist."
).format(
name,
call,
where_type,
where_name,
)
) | null |
1,036 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class UpdateDataServiceApiRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'UpdateDataServiceApi')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ScriptDetails(self): # String
return self.get_body_params().get('ScriptDetails')
def set_ScriptDetails(self, ScriptDetails): # String
self.add_body_params('ScriptDetails', ScriptDetails)
def get_RequestMethod(self): # Integer
return self.get_body_params().get('RequestMethod')
def set_RequestMethod(self, RequestMethod): # Integer
self.add_body_params('RequestMethod', RequestMethod)
def get_ApiDescription(self): # String
return self.get_body_params().get('ApiDescription')
def set_ApiDescription(self, ApiDescription): # String
self.add_body_params('ApiDescription', ApiDescription)
def get_Timeout(self): # Integer
return self.get_body_params().get('Timeout')
def set_Timeout(self, Timeout): # Integer
self.add_body_params('Timeout', Timeout)
def get_ResourceGroupId(self): # Long
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # Long
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_TenantId(self): # Long
return self.get_body_params().get('TenantId')
def set_TenantId(self, TenantId): # Long
self.add_body_params('TenantId', TenantId)
def get_Protocols(self): # String
return self.get_body_params().get('Protocols')
def set_Protocols(self, Protocols): # String
self.add_body_params('Protocols', Protocols)
def get_ProjectId(self): # Long
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # Long
self.add_body_params('ProjectId', ProjectId)
def METHOD_NAME(self): # Integer
return self.get_body_params().get('ResponseContentType')
def set_ResponseContentType(self, ResponseContentType): # Integer
self.add_body_params('ResponseContentType', ResponseContentType)
def get_ApiPath(self): # String
return self.get_body_params().get('ApiPath')
def set_ApiPath(self, ApiPath): # String
self.add_body_params('ApiPath', ApiPath)
def get_WizardDetails(self): # String
return self.get_body_params().get('WizardDetails')
def set_WizardDetails(self, WizardDetails): # String
self.add_body_params('WizardDetails', WizardDetails)
def get_VisibleRange(self): # Integer
return self.get_body_params().get('VisibleRange')
def set_VisibleRange(self, VisibleRange): # Integer
self.add_body_params('VisibleRange', VisibleRange)
def get_RegistrationDetails(self): # String
return self.get_body_params().get('RegistrationDetails')
def set_RegistrationDetails(self, RegistrationDetails): # String
self.add_body_params('RegistrationDetails', RegistrationDetails)
def get_ApiId(self): # Long
return self.get_body_params().get('ApiId')
def set_ApiId(self, ApiId): # Long
self.add_body_params('ApiId', ApiId) | null |
1,037 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class RunCommandRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'RunCommand','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_WorkingDir(self): # String
return self.get_query_params().get('WorkingDir')
def set_WorkingDir(self, WorkingDir): # String
self.add_query_param('WorkingDir', WorkingDir)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_Frequency(self): # String
return self.get_query_params().get('Frequency')
def set_Frequency(self, Frequency): # String
self.add_query_param('Frequency', Frequency)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_RepeatMode(self): # String
return self.get_query_params().get('RepeatMode')
def set_RepeatMode(self, RepeatMode): # String
self.add_query_param('RepeatMode', RepeatMode)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_KeepCommand(self): # Boolean
return self.get_query_params().get('KeepCommand')
def set_KeepCommand(self, KeepCommand): # Boolean
self.add_query_param('KeepCommand', KeepCommand)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('Timed')
def set_Timed(self, Timed): # Boolean
self.add_query_param('Timed', Timed)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceIds(self): # RepeatList
return self.get_query_params().get('InstanceId')
def set_InstanceIds(self, InstanceId): # RepeatList
for depth1 in range(len(InstanceId)):
self.add_query_param('InstanceId.' + str(depth1 + 1), InstanceId[depth1])
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_ContainerId(self): # String
return self.get_query_params().get('ContainerId')
def set_ContainerId(self, ContainerId): # String
self.add_query_param('ContainerId', ContainerId)
def get_Parameters(self): # Json
return self.get_query_params().get('Parameters')
def set_Parameters(self, Parameters): # Json
self.add_query_param('Parameters', Parameters)
def get_ContainerName(self): # String
return self.get_query_params().get('ContainerName')
def set_ContainerName(self, ContainerName): # String
self.add_query_param('ContainerName', ContainerName)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_CommandContent(self): # String
return self.get_query_params().get('CommandContent')
def set_CommandContent(self, CommandContent): # String
self.add_query_param('CommandContent', CommandContent)
def get_Timeout(self): # Long
return self.get_query_params().get('Timeout')
def set_Timeout(self, Timeout): # Long
self.add_query_param('Timeout', Timeout)
def get_ContentEncoding(self): # String
return self.get_query_params().get('ContentEncoding')
def set_ContentEncoding(self, ContentEncoding): # String
self.add_query_param('ContentEncoding', ContentEncoding)
def get_WindowsPasswordName(self): # String
return self.get_query_params().get('WindowsPasswordName')
def set_WindowsPasswordName(self, WindowsPasswordName): # String
self.add_query_param('WindowsPasswordName', WindowsPasswordName)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_EnableParameter(self): # Boolean
return self.get_query_params().get('EnableParameter')
def set_EnableParameter(self, EnableParameter): # Boolean
self.add_query_param('EnableParameter', EnableParameter)
def get_Username(self): # String
return self.get_query_params().get('Username')
def set_Username(self, Username): # String
self.add_query_param('Username', Username) | null |
1,038 | """Algorithm to find a proper batch size which is fit to current GPU device."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from typing import Callable, Dict, Tuple
import torch
from otx.algorithms.common.utils.logger import get_logger
logger = get_logger()
class BsSearchAlgo:
"""Algorithm class to find optimal batch size.
Args:
train_func (Callable[[int], None]): Training function with single arugment to set batch size.
default_bs (int): Default batch size. It should be bigger than 0.
max_bs (int): Maximum batch size. It should be bigger than 0.
"""
def __init__(self, train_func: Callable[[int], None], default_bs: int, max_bs: int):
if default_bs <= 0:
raise ValueError("Batch size should be bigger than 0.")
if max_bs <= 0:
raise ValueError("train data set size should be bigger than 0.")
if max_bs < default_bs:
default_bs = max_bs
self._train_func = train_func
self._default_bs = default_bs
self._max_bs = max_bs
self._bs_try_history: Dict[int, int] = {}
_, self._total_mem = torch.cuda.mem_get_info()
self._mem_lower_bound = 0.8 * self._total_mem
self._mem_upper_bound = 0.85 * self._total_mem
def _try_batch_size(self, bs: int) -> Tuple[bool, int]:
cuda_oom = False
torch.cuda.reset_max_memory_allocated(device=None)
torch.cuda.empty_cache()
try:
self._train_func(bs)
except RuntimeError as e:
if str(e).startswith("CUDA out of memory."):
cuda_oom = True
else:
raise e
max_memory_allocated = torch.cuda.max_memory_allocated(device=None)
if not cuda_oom:
# Because heapq only supports min heap, use negatized batch size
self._bs_try_history[bs] = max_memory_allocated
logger.debug(
f"Adapting Batch size => bs : {bs}, CUDA_OOM : {cuda_oom}, "
f"GPU memory usage : {max_memory_allocated / self._total_mem}%"
)
torch.cuda.empty_cache()
return cuda_oom, max_memory_allocated
@staticmethod
def METHOD_NAME(val1: int, val2: int) -> int:
ret = (val1 + val2) // 2
if ret % 2 == 1:
ret += 1
return ret
def auto_decrease_batch_size(self) -> int:
"""Decrease batch size if default batch size isn't fit to current GPU device.
Returns:
int: Proper batch size possibly decreased as default value isn't fit
"""
available_bs = 0
current_bs = self._default_bs
lowest_unavailable_bs = self._default_bs + 2
while True:
cuda_oom, max_memory_allocated = self._try_batch_size(current_bs)
# If GPU memory usage is too close to limit, CUDA OOM can be raised during training
if cuda_oom or max_memory_allocated > self._mem_upper_bound:
if current_bs < lowest_unavailable_bs:
lowest_unavailable_bs = current_bs
current_bs = self.METHOD_NAME(current_bs, available_bs)
else:
available_bs = current_bs
current_bs = self.METHOD_NAME(current_bs, lowest_unavailable_bs)
if lowest_unavailable_bs - available_bs <= 2:
break
if available_bs == 0:
raise RuntimeError("Current device can't train model even with 2.")
return available_bs
def find_big_enough_batch_size(self, drop_last: bool = False) -> int:
"""Find a big enough batch size.
This function finds a big enough batch size by training with various batch sizes.
It estimate a batch size using equation is estimated using training history.
The reason why using the word "big enough" is that it tries to find not maxmium but big enough value which uses
GPU memory between lower and upper bound.
Args:
drop_last (bool): Whether to drop the last incomplete batch.
Raises:
RuntimeError: If training with batch size 2 can't be run, raise an error.
Returns:
int: Big enough batch size.
"""
estimated_bs = self._default_bs
# try default batch size
cuda_oom, bs_mem_usage = self._try_batch_size(estimated_bs)
if cuda_oom or bs_mem_usage > self._mem_upper_bound:
self._default_bs -= 2
if self._default_bs <= 0:
raise RuntimeError("Current device can't train model even with 2.")
return self.auto_decrease_batch_size()
# try default batch size + 2
estimated_bs += 2
if estimated_bs > self._max_bs:
return self._default_bs
cuda_oom, bs_mem_usage = self._try_batch_size(estimated_bs)
if cuda_oom or bs_mem_usage > self._mem_upper_bound:
return self._default_bs
# estimate batch size using equation
estimation_pct = 0.82
while True:
estimated_bs = self._estimate_batch_size(estimation_pct)
if estimated_bs in self._bs_try_history:
break
cuda_oom, mem_usage = self._try_batch_size(estimated_bs)
if cuda_oom:
estimation_pct -= 0.1
if estimation_pct <= 0:
estimated_bs = self._default_bs + 2
break
elif self._mem_lower_bound <= mem_usage <= self._mem_upper_bound:
break
else:
estimation_pct = 0.82
if drop_last and (self._max_bs // 2 < estimated_bs < self._max_bs):
estimated_bs = self._max_bs // 2
return estimated_bs
def _estimate_batch_size(self, estimation_pct: float) -> int:
if len(self._bs_try_history) < 2:
raise RuntimeError("At least two trials should be done without CUDA OOM to estimate batch size.")
def distance_from_bound(val):
if val[1] < self._mem_lower_bound:
# if memory usage is same, then higher batch size is preferred
return self._mem_lower_bound - val[1] - val[0] / 10000
elif self._mem_upper_bound < val[1]:
# if memory usage is same, then lower batch size is preferred
return val[1] - self._mem_upper_bound + val[0] / 10000
else:
return 0
bs_arr = sorted([(bs, mem_usage) for bs, mem_usage in self._bs_try_history.items()], key=distance_from_bound)
bs1 = bs_arr[0][0]
bs1_mem_usage = bs_arr[0][1]
for i in range(1, len(bs_arr)):
graident = (bs_arr[i][1] - bs1_mem_usage) / (bs_arr[i][0] - bs1)
b = bs1_mem_usage - graident * bs1
if graident != 0:
break
if graident == 0: # all batch size history used same GPU memory
if bs1_mem_usage < self._mem_lower_bound:
return bs1 + 2
elif bs1_mem_usage > self._mem_upper_bound:
if bs1 <= 2:
return 2
return bs1 - 2
else:
return bs1
estimated_bs = round(((self._total_mem * estimation_pct) - b) / (graident * 2)) * 2
# If estimated_bs is already tried and it used GPU memory more than upper bound,
# set estimated_bs as lowest value of batch sizes using GPU memory more than uppoer bound - 2
if estimated_bs in self._bs_try_history and self._bs_try_history[estimated_bs] > self._mem_upper_bound:
for bs, mem_usage in bs_arr:
if mem_usage > self._mem_upper_bound:
estimated_bs = bs - 2
break
if estimated_bs > self._max_bs:
estimated_bs = self._max_bs
return estimated_bs | null |
1,039 | """
Tool Parameter specific sanitizing.
"""
import logging
import string
import galaxy.util
log = logging.getLogger(__name__)
class ToolParameterSanitizer:
"""
Handles tool parameter specific sanitizing.
>>> from galaxy.util import XML
>>> sanitizer = ToolParameterSanitizer.from_element(XML(
... '''
... <sanitizer invalid_char="">
... <valid initial="string.ascii_letters"/>
... </sanitizer>
... '''))
>>> sanitizer.sanitize_param(''.join(sorted(c for c in string.printable))) == ''.join(sorted(c for c in string.ascii_letters))
True
>>> slash = chr(92)
>>> sanitizer = ToolParameterSanitizer.from_element(XML(
... '''
... <sanitizer>
... <valid initial="none">
... <add preset="string.printable"/>
... <remove value="""/>
... <remove value="%s"/>
... </valid>
... <mapping initial="none">
... <add source=""" target="%s""/>
... <add source="%s" target="%s%s"/>
... </mapping>
... </sanitizer>
... ''' % (slash, slash, slash, slash, slash)))
>>> text = '%s"$rm&#!' % slash
>>> [c for c in sanitizer.sanitize_param(text)] == [slash, slash, slash, '"', '$', 'r', 'm', '&', '#', '!']
True
"""
VALID_PRESET = {"default": (f"{string.ascii_letters + string.digits} -=_.()/+*^,:?!"), "none": ""}
MAPPING_PRESET = {"default": galaxy.util.mapped_chars, "none": {}}
DEFAULT_INVALID_CHAR = "X"
# class methods
@classmethod
def METHOD_NAME(cls, elem):
"""Loads the proper filter by the type attribute of elem"""
# TODO: Add ability to generically specify a method to use for sanitizing input via specification in tool XML
rval = ToolParameterSanitizer()
rval._invalid_char = elem.get("invalid_char", cls.DEFAULT_INVALID_CHAR)
rval.sanitize = galaxy.util.string_as_bool(elem.get("sanitize", "True"))
for valid_elem in elem.findall("valid"):
rval._valid_chars = rval.get_valid_by_name(valid_elem.get("initial", "default"))
for action_elem in valid_elem:
preset = rval.get_valid_by_name(action_elem.get("preset", "none"))
valid_value = [val for val in action_elem.get("value", [])]
if action_elem.tag.lower() == "add":
for val in preset + valid_value:
if val not in rval._valid_chars:
rval._valid_chars.append(val)
elif action_elem.tag.lower() == "remove":
for val in preset + valid_value:
while val in rval._valid_chars:
rval._valid_chars.remove(val)
else:
log.debug(f"Invalid action tag in valid: {action_elem.tag}")
for mapping_elem in elem.findall("mapping"):
rval._mapped_chars = rval.get_mapping_by_name(mapping_elem.get("initial", "default"))
for action_elem in mapping_elem:
map_source = action_elem.get("source", None)
map_target = action_elem.get("target", None)
preset = rval.get_mapping_by_name(action_elem.get("preset", "none"))
if action_elem.tag.lower() == "add":
rval._mapped_chars.update(preset)
if None not in [map_source, map_target]:
rval._mapped_chars[map_source] = map_target
elif action_elem.tag.lower() == "remove":
for map_key in preset.keys():
if map_key in rval._mapped_chars:
del rval._mapped_chars[map_key]
if map_source is not None and map_key in rval._mapped_chars:
del rval._mapped_chars[map_key]
else:
log.debug(f"Invalid action tag in mapping: {action_elem.tag}")
return rval
@classmethod
def get_valid_by_name(cls, name):
rval = []
for split_name in name.split(","):
# Remove ';' (if present) and everything after it
split_name = split_name.split(";", 1)[0]
split_name = split_name.strip()
value = []
if split_name.startswith("string."):
string_constant = split_name[7:]
if string_constant in ("letters", "lowercase", "uppercase"):
split_name = f"string.ascii_{string_constant}"
try:
value = eval(split_name)
except NameError as e:
log.debug(f"Invalid string preset specified: {e}")
elif split_name in cls.VALID_PRESET:
value = cls.VALID_PRESET[split_name]
else:
log.debug(f"Invalid preset name specified: {split_name}")
rval.extend([val for val in value if val not in rval])
return rval
@classmethod
def get_mapping_by_name(cls, name):
rval = {}
for split_name in name.split(","):
split_name = split_name.strip()
if split_name in cls.MAPPING_PRESET:
rval.update(cls.MAPPING_PRESET[split_name])
else:
log.debug(f"Invalid preset name specified: {split_name}")
return rval
# end class methods
def __init__(self):
self._valid_chars = [] # List of valid characters
self._mapped_chars = {} # Replace a char with a any number of characters
self._invalid_char = self.DEFAULT_INVALID_CHAR # Replace invalid characters with this character
self.sanitize = True # Simply pass back the passed in value
def restore_text(self, text):
"""Restores sanitized text"""
if self.sanitize:
for key, value in self._mapped_chars.items():
text = text.replace(value, key)
return text
def sanitize_text(self, text):
"""Restricts the characters that are allowed in a text"""
if not self.sanitize:
return text
rval = []
for c in text:
if c in self._valid_chars:
rval.append(c)
elif c in self._mapped_chars:
rval.append(self._mapped_chars[c])
else:
rval.append(self._invalid_char)
return "".join(rval)
def sanitize_param(self, value):
"""Clean incoming parameters (strings or lists)"""
if not self.sanitize:
return value
if isinstance(value, str):
return self.sanitize_text(value)
elif isinstance(value, list):
return list(map(self.sanitize_text, value))
else:
raise Exception(f"Unknown parameter type ({type(value)}:{value})") | null |
1,040 | from __future__ import print_function
import json
import boto3
import os
stackoutputs = None
stackname = os.getenv('CFSTACK')
def METHOD_NAME(event, context):
print("INPUT: ",json.dumps(event))
# check we aren't calling this function before any document have been returned to the client and that
try:
#Because "sub documents", like a sofa document that is connected to a room document, does not have a next, the in built query lambda attempts to figure out a parent document and will give the necessary information to perform room iteration
navigationToJson = event["req"]["session"]["qnabotcontext"]["navigation"]
except KeyError as k:
navigationToJson = {}
qidList = navigationToJson.get("previous",[])
# check that there aren't any previous rooms to go to
if len(qidList) > 0:
client = boto3.client('lambda')
#Invoke the prepackaged function that Queries ElasticSearch using a document qid
temp = qidList[-1]
resp = client.invoke(
FunctionName = event["req"]["_info"]["es"]["service"]["qid"],
Payload = json.dumps({'qid':temp,'type':"qid"}),
InvocationType = "RequestResponse"
)
# Because the payload is of a streamable type object, we must explicitly read it and load JSON
response = json.loads(resp['Payload'].read())
#uncomment below if you want to see the response
#print(json.dumps(response))
# Do not call lambdafunction from the next item if the link points to ourselves
function_name = response.get('l', '')
if function_name != '' and function_name != 'QNA:ExamplePYTHONLambdaPrevious' and os.environ.get('AWS_LAMBDA_FUNCTION_NAME') not in function_name:
# This update will pull in standard qid content into the eventual result passed back up the stack
event = updateResult(event,response)
if "args" in response:
event["res"]["result"]["args"] = response["args"]
client = boto3.client('lambda')
targetname = response.get('l', '')
if targetname.startswith('arn') != True:
targetname = mapToArn(targetname, stackname)
lhresp = client.invoke(
FunctionName = targetname,
Payload = json.dumps(event),
InvocationType = "RequestResponse"
)
# Because the payload is of a streamable type object, we must explicitly read it and load JSON
# Next merge in results of the LambdaHook execution
event = updateLambdaHook(event,json.loads(lhresp['Payload'].read()),response)
elif 'a' in response:
# No lambda hook to call so just merge in content from the target question
event = updateResult(event,response)
# modify the event to make the previous question the redirected question that was just asked instead of "Next Question"
else:
event["res"]["session"]["qnabotcontext"]["previous"] ={"qid":qidList,"q":navigationToJson["q"]}
event["res"]["session"]["qnabotcontext"]["navigation"]={"next":navigationToJson["next"],"previous":[],"hasParent":navigationToJson["hasParent"]}
print("OUTPUT: ",json.dumps(event))
return event
#maps a shortname to the full name via CF Output stack value
def mapToArn(name,stack):
res = name
global stackoutputs
if stackoutputs is None:
cf = boto3.client('cloudformation')
r = cf.describe_stacks(StackName=stack)
stack, = r['Stacks']
stackoutputs = stack['Outputs']
for o in stackoutputs:
if name == 'QNA:' + o['OutputKey']:
res = o['OutputValue']
break
return res
#update the event with the information if there is a Lambda hook
def updateLambdaHook(event,hookEvent, response):
navigationToJson = event["req"]["session"]["qnabotcontext"]["navigation"]
tempList= navigationToJson["previous"]
#shift to remove previous function name from list
tempList.pop()
if "session" not in hookEvent["res"]:
hookEvent["res"]["session"] = {}
hookEvent["res"]["session"]["qnabotcontext"]["previous"] ={"qid":response["qid"],"a":response["a"],"alt":response.get("alt",{}),"q":event["req"]["question"]}
hookEvent["res"]["session"]["qnabotcontext"]["navigation"]={"next":response["next"],"previous":tempList,"hasParent":navigationToJson["hasParent"]}
return hookEvent
#update the event with the information from the new Query
def updateResult(event, response):
event["res"]["result"] = response
event["res"]["type"] = "PlainText"
event["res"]["message"] = response["a"]
event["res"]["plainMessage"]=response["a"]
event["res"]["session"]["appContext"]["altMessages"] = response.get("alt",{})
if "outputDialogMode" not in event["req"] or event["req"]["outputDialogMode"]!="Text":
if response.get("alt",False) and "ssml" in response["alt"] and len(response["alt"]["ssml"])>0:
event["res"]["type"]="SSML"
event["res"]["message"]=response["alt"]["ssml"].replace('\n',' ')
if "r" in response:
card = response["r"]
if 'title' in card:
#making sure that the title is not empty, as we don't want to be sending empty cards
if card["title"]!="":
event["res"]["card"]["send"] = True
event["res"]["card"]["title"] = card["title"]
try:
event["res"]["card"]["text"] = card["text"]
except:
event["res"]["card"]["text"] = ""
if 'imageUrl' in card:
event["res"]["card"]["imageUrl"] = card["imageUrl"]
if 'buttons' in card:
event["res"]["card"]["buttons"] = card["buttons"]
if 't' in response:
event["res"]["session"]["topic"] = response["t"]
navigationToJson = event["req"]["session"]["qnabotcontext"]["navigation"]
tempList= navigationToJson["previous"]
#shift to remove previous function name from list
tempList.pop()
event["res"]["session"]["qnabotcontext"]["previous"] ={"qid":response["qid"],"q":event["req"]["question"]}
event["res"]["session"]["qnabotcontext"]["navigation"]={"next":response["next"],"previous":tempList,"hasParent":navigationToJson["hasParent"]}
return event
| null |
1,041 | """Module for defining multi-label linear classification head."""
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import torch
import torch.nn.functional as F
from mmcls.models.builder import HEADS
from mmcls.models.heads import MultiLabelClsHead
from mmcv.cnn import normal_init
from torch import nn
from .mixin import OTXHeadMixin
@HEADS.register_module()
class CustomMultiLabelLinearClsHead(OTXHeadMixin, MultiLabelClsHead):
"""Custom Linear classification head for multilabel task.
Args:
num_classes (int): Number of categories.
in_channels (int): Number of channels in the input feature map.
normalized (bool): Normalize input features and weights.
scale (float): positive scale parameter.
loss (dict): Config of classification loss.
"""
def __init__(
self,
num_classes,
in_channels,
normalized=False,
scale=1.0,
loss=None,
):
loss = loss if loss else dict(type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=1.0)
super().__init__(loss=loss)
if num_classes <= 0:
raise ValueError(f"num_classes={num_classes} must be a positive integer")
self.in_channels = in_channels
self.num_classes = num_classes
self.normalized = normalized
self.scale = scale
self._init_layers()
def _init_layers(self):
if self.normalized:
self.fc = AnglularLinear(self.in_channels, self.num_classes)
else:
self.fc = nn.Linear(self.in_channels, self.num_classes)
def init_weights(self):
"""Initialize weights of head."""
if isinstance(self.fc, nn.Linear):
normal_init(self.fc, mean=0, std=0.01, bias=0)
def loss(self, cls_score, gt_label, valid_label_mask=None):
"""Calculate loss for given cls_score/gt_label."""
gt_label = gt_label.type_as(cls_score)
num_samples = len(cls_score)
losses = dict()
# map difficult examples to positive ones
_gt_label = torch.abs(gt_label)
# compute loss
loss = self.compute_loss(cls_score, _gt_label, valid_label_mask=valid_label_mask, avg_factor=num_samples)
losses["loss"] = loss / self.scale
return losses
def forward(self, x):
"""Forward fuction of CustomMultiLabelLinearClsHead class."""
return self.simple_test(x)
def forward_train(self, cls_score, gt_label, **kwargs):
"""Forward_train fuction of CustomMultiLabelLinearClsHead."""
img_metas = kwargs.get("img_metas", False)
cls_score = self.pre_logits(cls_score)
gt_label = gt_label.type_as(cls_score)
cls_score = self.fc(cls_score) * self.scale
valid_batch_mask = gt_label >= 0
gt_label = gt_label[
valid_batch_mask,
].view(gt_label.shape[0], -1)
cls_score = cls_score[
valid_batch_mask,
].view(cls_score.shape[0], -1)
if img_metas:
valid_label_mask = self.METHOD_NAME(img_metas=img_metas)
valid_label_mask = valid_label_mask.to(cls_score.device)
valid_label_mask = valid_label_mask[
valid_batch_mask,
].view(valid_label_mask.shape[0], -1)
losses = self.loss(cls_score, gt_label, valid_label_mask=valid_label_mask)
else:
losses = self.loss(cls_score, gt_label)
return losses
def simple_test(self, img):
"""Test without augmentation."""
img = self.pre_logits(img)
cls_score = self.fc(img) * self.scale
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
if torch.onnx.is_in_onnx_export():
return cls_score
pred = torch.sigmoid(cls_score) if cls_score is not None else None
pred = list(pred.detach().cpu().numpy())
return pred
def METHOD_NAME(self, img_metas):
"""Get valid label mask using ignored_label."""
valid_label_mask = []
for meta in img_metas:
mask = torch.Tensor([1 for _ in range(self.num_classes)])
if "ignored_labels" in meta and meta["ignored_labels"]:
mask[meta["ignored_labels"]] = 0
valid_label_mask.append(mask)
valid_label_mask = torch.stack(valid_label_mask, dim=0)
return valid_label_mask
class AnglularLinear(nn.Module):
"""Computes cos of angles between input vectors and weights vectors.
Args:
in_features (int): Number of input features.
out_features (int): Number of output cosine logits.
"""
def __init__(self, in_features, out_features):
"""Init fuction of AngularLinear class."""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(self.out_features, self.in_features))
self.weight.data.normal_().renorm_(2, 0, 1e-5).mul_(1e5)
def forward(self, x):
"""Forward fuction of AngularLinear class."""
cos_theta = F.normalize(x.view(x.shape[0], -1), dim=1).mm(F.normalize(self.weight.t(), p=2, dim=0))
return cos_theta.clamp(-1, 1) | null |
1,042 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class RunCommandRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'RunCommand','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_WorkingDir(self): # String
return self.get_query_params().get('WorkingDir')
def set_WorkingDir(self, WorkingDir): # String
self.add_query_param('WorkingDir', WorkingDir)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_Frequency(self): # String
return self.get_query_params().get('Frequency')
def set_Frequency(self, Frequency): # String
self.add_query_param('Frequency', Frequency)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_RepeatMode(self): # String
return self.get_query_params().get('RepeatMode')
def set_RepeatMode(self, RepeatMode): # String
self.add_query_param('RepeatMode', RepeatMode)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_KeepCommand(self): # Boolean
return self.get_query_params().get('KeepCommand')
def set_KeepCommand(self, KeepCommand): # Boolean
self.add_query_param('KeepCommand', KeepCommand)
def get_Timed(self): # Boolean
return self.get_query_params().get('Timed')
def set_Timed(self, Timed): # Boolean
self.add_query_param('Timed', Timed)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceIds(self): # RepeatList
return self.get_query_params().get('InstanceId')
def set_InstanceIds(self, InstanceId): # RepeatList
for depth1 in range(len(InstanceId)):
self.add_query_param('InstanceId.' + str(depth1 + 1), InstanceId[depth1])
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_ContainerId(self): # String
return self.get_query_params().get('ContainerId')
def set_ContainerId(self, ContainerId): # String
self.add_query_param('ContainerId', ContainerId)
def get_Parameters(self): # Json
return self.get_query_params().get('Parameters')
def set_Parameters(self, Parameters): # Json
self.add_query_param('Parameters', Parameters)
def get_ContainerName(self): # String
return self.get_query_params().get('ContainerName')
def set_ContainerName(self, ContainerName): # String
self.add_query_param('ContainerName', ContainerName)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def METHOD_NAME(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_CommandContent(self): # String
return self.get_query_params().get('CommandContent')
def set_CommandContent(self, CommandContent): # String
self.add_query_param('CommandContent', CommandContent)
def get_Timeout(self): # Long
return self.get_query_params().get('Timeout')
def set_Timeout(self, Timeout): # Long
self.add_query_param('Timeout', Timeout)
def get_ContentEncoding(self): # String
return self.get_query_params().get('ContentEncoding')
def set_ContentEncoding(self, ContentEncoding): # String
self.add_query_param('ContentEncoding', ContentEncoding)
def get_WindowsPasswordName(self): # String
return self.get_query_params().get('WindowsPasswordName')
def set_WindowsPasswordName(self, WindowsPasswordName): # String
self.add_query_param('WindowsPasswordName', WindowsPasswordName)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_EnableParameter(self): # Boolean
return self.get_query_params().get('EnableParameter')
def set_EnableParameter(self, EnableParameter): # Boolean
self.add_query_param('EnableParameter', EnableParameter)
def get_Username(self): # String
return self.get_query_params().get('Username')
def set_Username(self, Username): # String
self.add_query_param('Username', Username) | null |
1,043 | # License: MIT
# Copyright © 2023 Frequenz Energy-as-a-Service GmbH
"""Actor model implementation."""
import abc
import asyncio
import logging
from ._background_service import BackgroundService
_logger = logging.getLogger(__name__)
class Actor(BackgroundService, abc.ABC):
"""A primitive unit of computation that runs autonomously.
From [Wikipedia](https://en.wikipedia.org/wiki/Actor_model), an actor is:
> [...] the basic building block of concurrent computation. In response to
> a message it receives, an actor can: make local decisions, create more actors,
> send more messages, and determine how to respond to the next message received.
> Actors may modify their own private state, but can only affect each other
> indirectly through messaging (removing the need for lock-based synchronization).
[Channels](https://github.com/frequenz-floss/frequenz-channels-python/) can be used
to implement communication between actors, as shown in the examples below.
To implement an actor, subclasses must implement the `_run()` method, which should
run the actor's logic. The `_run()` method is called by the base class when the
actor is started, and is expected to run until the actor is stopped.
If an unhandled exception is raised in the `_run()` method, the actor will be
restarted automatically. Unhandled [`BaseException`][]s will cause the actor to stop
immediately and will be re-raised.
!!! warning
As actors manage [`asyncio.Task`][] objects, a reference to them must be held
for as long as the actor is expected to be running, otherwise its tasks will be
cancelled and the actor will stop. For more information, please refer to the
[Python `asyncio`
documentation](https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task).
Example: Example of an actor receiving from two receivers
```python
from frequenz.channels import Broadcast, Receiver, Sender
from frequenz.channels.util import select, selected_from
class EchoActor(Actor):
def __init__(
self,
recv1: Receiver[bool],
recv2: Receiver[bool],
output: Sender[bool],
) -> None:
super().__init__()
self._recv1 = recv1
self._recv2 = recv2
self._output = output
async def _run(self) -> None:
async for selected in select(self._recv1, self._recv2):
if selected_from(selected, self._recv1):
await self._output.send(selected.value)
elif selected_from(selected, self._recv1):
await self._output.send(selected.value)
else:
assert False, "Unknown selected channel"
input_channel_1 = Broadcast[bool]("input_channel_1")
input_channel_2 = Broadcast[bool]("input_channel_2")
input_channel_2_sender = input_channel_2.new_sender()
echo_channel = Broadcast[bool]("EchoChannel")
echo_receiver = echo_channel.new_receiver()
async with EchoActor(
input_channel_1.new_receiver(),
input_channel_2.new_receiver(),
echo_channel.new_sender(),
):
await input_channel_2_sender.send(True)
print(await echo_receiver.receive())
```
Example: Example of composing two actors
```python
from frequenz.channels import Broadcast, Receiver, Sender
class Actor1(Actor):
def __init__(
self,
recv: Receiver[bool],
output: Sender[bool],
) -> None:
super().__init__()
self._recv = recv
self._output = output
async def _run(self) -> None:
async for msg in self._recv:
await self._output.send(msg)
class Actor2(Actor):
def __init__(
self,
recv: Receiver[bool],
output: Sender[bool],
) -> None:
super().__init__()
self._recv = recv
self._output = output
async def _run(self) -> None:
async for msg in self._recv:
await self._output.send(msg)
input_channel: Broadcast[bool] = Broadcast("Input to Actor1")
middle_channel: Broadcast[bool] = Broadcast("Actor1 -> Actor2 stream")
output_channel: Broadcast[bool] = Broadcast("Actor2 output")
input_sender = input_channel.new_sender()
output_receiver = output_channel.new_receiver()
async with (
Actor1(input_channel.new_receiver(), middle_channel.new_sender()),
Actor2(middle_channel.new_receiver(), output_channel.new_sender()),
):
await input_sender.send(True)
print(await output_receiver.receive())
```
"""
_restart_limit: int | None = None
"""The number of times actors can be restarted when they are stopped by unhandled exceptions.
If this is bigger than 0 or `None`, the actor will be restarted when there is an
unhanded exception in the `_run()` method.
If `None`, the actor will be restarted an unlimited number of times.
!!! note
This is mostly used for testing purposes and shouldn't be set in production.
"""
def METHOD_NAME(self) -> None:
"""Start this actor.
If this actor is already running, this method does nothing.
"""
if self.is_running:
return
self._tasks.clear()
self._tasks.add(asyncio.create_task(self._run_loop()))
@abc.abstractmethod
async def _run(self) -> None:
"""Run this actor's logic."""
async def _run_loop(self) -> None:
"""Run this actor's task in a loop until `_restart_limit` is reached.
Raises:
asyncio.CancelledError: If this actor's `_run()` gets cancelled.
Exception: If this actor's `_run()` raises any other `Exception` and reached
the maximum number of restarts.
BaseException: If this actor's `_run()` raises any other `BaseException`.
"""
_logger.info("Actor %s: Started.", self)
n_restarts = 0
while True:
try:
await self._run()
_logger.info("Actor %s: _run() returned without error.", self)
except asyncio.CancelledError:
_logger.info("Actor %s: Cancelled.", self)
raise
except Exception: # pylint: disable=broad-except
_logger.exception("Actor %s: Raised an unhandled exception.", self)
limit_str = "∞" if self._restart_limit is None else self._restart_limit
limit_str = f"({n_restarts}/{limit_str})"
if self._restart_limit is None or n_restarts < self._restart_limit:
n_restarts += 1
_logger.info("Actor %s: Restarting %s...", self._name, limit_str)
continue
_logger.info(
"Actor %s: Maximum restarts attempted %s, bailing out...",
self,
limit_str,
)
raise
except BaseException: # pylint: disable=broad-except
_logger.exception("Actor %s: Raised a BaseException.", self)
raise
break
_logger.info("Actor %s: Stopped.", self) | null |
1,044 | from typing import (
Any,
Callable,
Dict,
List,
cast,
)
from lxml.etree import (
SubElement,
_Element,
)
from pcs.common import reports
from pcs.lib.cib import resource
from pcs.lib.cib.constraint import resource_set
from pcs.lib.cib.tools import (
find_element_by_tag_and_id,
find_unique_id,
)
from pcs.lib.errors import LibraryError
from pcs.lib.xml_tools import (
export_attributes,
find_parent,
get_root,
)
def _validate_attrib_names(attrib_names, options):
invalid_names = [
name for name in options.keys() if name not in attrib_names
]
if invalid_names:
raise LibraryError(
reports.ReportItem.error(
reports.messages.InvalidOptions(
sorted(invalid_names), sorted(attrib_names), None
)
)
)
def find_valid_resource_id(
report_processor: reports.ReportProcessor, cib, in_clone_allowed, _id
):
parent_tags = resource.clone.ALL_TAGS + [resource.bundle.TAG]
resource_element = find_element_by_tag_and_id(
sorted(parent_tags + [resource.primitive.TAG, resource.group.TAG]),
cib,
_id,
)
if resource_element.tag in parent_tags:
return resource_element.attrib["id"]
clone = find_parent(resource_element, parent_tags)
if clone is None:
return resource_element.attrib["id"]
report_msg = reports.messages.ResourceForConstraintIsMultiinstance(
resource_element.attrib["id"],
"clone" if clone.tag == "master" else clone.tag,
str(clone.attrib["id"]),
)
if in_clone_allowed:
if report_processor.report(
reports.ReportItem.warning(report_msg)
).has_errors:
raise LibraryError()
return resource_element.attrib["id"]
raise LibraryError(
reports.ReportItem.error(
report_msg,
force_code=reports.codes.FORCE,
)
)
def prepare_options(attrib_names, options, create_id_fn, validate_id):
_validate_attrib_names(attrib_names + ("id",), options)
options = options.copy()
if "id" not in options:
options["id"] = create_id_fn()
else:
validate_id(options["id"])
return options
def export_with_set(element: _Element) -> Dict[str, Any]:
return {
"resource_sets": [
resource_set.export(resource_set_item)
for resource_set_item in element.findall(".//resource_set")
],
"options": export_attributes(element),
}
def export_plain(element: _Element) -> Dict[str, Any]:
return {"options": export_attributes(element)}
def create_id(cib, type_prefix, resource_set_list):
# Create a semi-random id. We need it to be predictable (for testing), short
# and somehow different than other ids so that we don't spend much time in
# find_unique_id.
# Avoid using actual resource names. It makes the id very long (consider 10
# or more resources in a set constraint). Also, if a resource is deleted
# and therefore removed from the constraint, the id no longer matches the
# constraint.
resource_ids = []
for _set in resource_set_list:
resource_ids.extend(_set["ids"])
id_part = "".join([_id[0] + _id[-1] for _id in resource_ids][:3])
return find_unique_id(cib, "{0}_set_{1}".format(type_prefix, id_part))
def have_duplicate_resource_sets(element, other_element):
def get_id_set_list(element):
return [
resource_set.get_resource_id_set_list(resource_set_item)
for resource_set_item in element.findall(".//resource_set")
]
return get_id_set_list(element) == get_id_set_list(other_element)
def check_is_without_duplication(
report_processor: reports.ReportProcessor,
constraint_section: _Element,
element: _Element,
are_duplicate: Callable[[_Element, _Element], bool],
export_element: Callable[[_Element], Dict[str, Any]],
duplication_allowed: bool = False,
) -> None:
duplicate_element_list = [
duplicate_element
for duplicate_element in cast(
# The xpath method has a complicated return value, but we know our
# xpath expression returns only elements.
List[_Element],
constraint_section.xpath(
".//*[local-name()=$tag_name]", tag_name=element.tag
),
)
if (
element is not duplicate_element
and are_duplicate(element, duplicate_element)
)
]
if not duplicate_element_list:
return
if report_processor.report_list(
[
reports.ReportItem.info(
reports.messages.DuplicateConstraintsList(
element.tag,
[
export_element(duplicate_element)
for duplicate_element in duplicate_element_list
],
)
),
reports.ReportItem(
severity=reports.item.get_severity(
reports.codes.FORCE,
duplication_allowed,
),
message=reports.messages.DuplicateConstraintsExist(
[
str(duplicate.attrib["id"])
for duplicate in duplicate_element_list
]
),
),
]
).has_errors:
raise LibraryError()
def METHOD_NAME(constraint_section, tag_name, options, resource_set_list):
if not resource_set_list:
raise LibraryError(
reports.ReportItem.error(reports.messages.EmptyResourceSetList())
)
element = SubElement(constraint_section, tag_name)
element.attrib.update(options)
if tag_name == "rsc_order":
all_resource_ids = []
for resource_set_item in resource_set_list:
all_resource_ids.extend(resource_set_item["ids"])
resource_set.is_resource_in_same_group(
get_root(constraint_section), all_resource_ids
)
for resource_set_item in resource_set_list:
resource_set.create(element, resource_set_item)
return element | null |
1,045 | # **************************************************************************
# *
# * Authors: Roberto Marabini ([email protected]), May 2013
# * Marta Martinez ([email protected])
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address '[email protected]'
# *
# **************************************************************************
import os
from distutils.spawn import find_executable
from os.path import exists
import pyworkflow.protocol.params as params
from pwem.constants import SYM_I222
from pwem.emlib.image import ImageHandler
from pwem.objects import (SetOfVolumes)
from pyworkflow.viewer import DESKTOP_TKINTER, WEB_DJANGO
from pwem.viewers import Chimera, ChimeraView, EmProtocolViewer
from xmipp3.protocols.protocol_extract_asymmetric_unit import XmippProtExtractUnit
from xmipp3.constants import (XMIPP_TO_SCIPION, XMIPP_I222)
VOLUME_SLICES = 1
VOLUME_CHIMERA = 0
class viewerXmippProtExtractUnit(EmProtocolViewer):
""" Visualize the input and output volumes of protocol XmippProtExtractUnit
by choosing Chimera (3D) or Xmipp visualizer (2D).
The axes of coordinates x, y, z will be shown by choosing Chimera"""
_label = 'viewer extract asymmetric unit'
_targets = [XmippProtExtractUnit]
_environments = [DESKTOP_TKINTER, WEB_DJANGO]
# ROB: I know that there is a nice chimera interface but it does not work
# in this case since I am interested in reading the MRC header. So I will
# use chimera as an external program
def _defineParams(self, form):
form.addSection(label='Visualization of input volume and extracted '
'asymmetric unit')
form.addParam('displayVol', params.EnumParam,
choices=['chimerax', 'slices'], default=VOLUME_CHIMERA,
display=params.EnumParam.DISPLAY_HLIST,
label='Display volume with',
help='*chimerax*: display volumes as surface with '
'ChimeraX.\n*slices*: display volumes as 2D slices '
'along z axis.\n')
def _getVisualizeDict(self):
return{
'displayVol': self._showVolumes,
}
def _validate(self):
if find_executable(Chimera.getProgram()) is None:
return ["chimerax is not available. Either install it or choose"
" option 'slices'. "]
return []
# =========================================================================
# Show Volumes
# =========================================================================
def _showVolumes(self, paramName=None):
if self.displayVol == VOLUME_CHIMERA:
return self._showVolumesChimera()
elif self.displayVol == VOLUME_SLICES:
return self.METHOD_NAME()
def _createSetOfVolumes(self):
if not exists(self.protocol._getExtraPath('tmpVolumes.sqlite')):
tmpFileName = self.protocol._getExtraPath("tmpVolumes.sqlite")
_inputVol = self.protocol.inputVolumes.get()
_outputVol = self.protocol.outputVolume
setOfVolumes = SetOfVolumes(filename=tmpFileName)
setOfVolumes.append(_inputVol)
setOfVolumes.append(_outputVol)
setOfVolumes.write()
else:
tmpFileName = self.protocol._getExtraPath('tmpVolumes.sqlite')
setOfVolumes = SetOfVolumes(filename=tmpFileName)
return setOfVolumes
def _showVolumesChimera(self):
tmpFileNameCMD = self.protocol._getExtraPath("chimera.cxc")
f = open(tmpFileNameCMD, "w")
dim = self.protocol.inputVolumes.get().getDim()[0]
sampling = self.protocol.inputVolumes.get().getSamplingRate()
tmpFileName = os.path.abspath(self.protocol._getExtraPath("axis.bild"))
Chimera.createCoordinateAxisFile(dim,
bildFileName=tmpFileName,
sampling=sampling)
f.write("open %s\n" % tmpFileName)
f.write("cofr 0,0,0\n") # set center of coordinates
_inputVol = self.protocol.inputVolumes.get()
_outputVol = self.protocol.outputVolume
inputVolFileName = os.path.abspath(ImageHandler.removeFileType(
_inputVol.getFileName()))
# input vol origin coordinates
x_input, y_input, z_input = _inputVol.getShiftsFromOrigin()
f.write("open %s\n" % inputVolFileName)
f.write("volume #2 style mesh level 0.001 voxelSize %f origin "
"%0.2f,%0.2f,%0.2f\n"
% (_inputVol.getSamplingRate(), x_input, y_input, z_input))
outputVolFileName = os.path.abspath(ImageHandler.removeFileType(
_outputVol.getFileName()))
# output vol origin coordinates
x_output, y_output, z_output = _outputVol.getShiftsFromOrigin()
f.write("open %s\n" % outputVolFileName)
f.write("volume #3 style surface level 0.001 voxelSize %f origin "
"%0.2f,%0.2f,%0.2f\n"
% (_outputVol.getSamplingRate(), x_output, y_output, z_output))
cMap = ['red', 'yellow', 'green', 'cyan', 'blue']
d = {}
innerRadius = self.protocol.innerRadius.get()
d['outerRadius'] = self.protocol.outerRadius.get() * sampling
if innerRadius < 0:
innerRadius = 0
d['innerRadius'] = innerRadius * sampling
d['innerRadius'] = self.protocol.innerRadius.get() * sampling
d['symmetry'] = Chimera.getSymmetry(XMIPP_TO_SCIPION[self.protocol.symmetryGroup.get()])
if self.protocol.symmetryGroup >= XMIPP_I222:
f.write("shape icosahedron mesh true radius %(outerRadius)d "
"orientation %(symmetry)s\n" % d)
step = (d['outerRadius'] - d['innerRadius']) / float(len(cMap) - 1)
f.write("color radial #3 center 0,0,0 palette -")
counter = 0
s = ""
for color in cMap:
s += "%d,%s:" % (d['innerRadius'] + counter * step, color)
counter += 1
f.write(s[:-1] + '\n')
f.close()
return [ChimeraView(tmpFileNameCMD)]
def METHOD_NAME(self):
setOfVolumes = self._createSetOfVolumes()
return [self.objectView(setOfVolumes)] | null |
1,046 | # Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
import numpy as np
import sys
import os
from collections import OrderedDict
# --- utils ---
from .utils import GetVariablesOnGraph
from .utils import GetCoefficientOnGraph
class ElasticWeightConsolidation:
def __init__(
self,
_y,
_out_FI_path=None,
_iter_num=100,
_apply_function_type_list=['Convolution'],
_calc_switch=False
):
# input
# _y : type=nn.Variable(), The generator output
# _out_FI_path : type=str, The Fisher Information weights result path
# _iter_num : type=int, The iteration number of calculation for the Fisher Information.
# _apply_function_type_list : type=list of str, The function type names which EWC applies to.
# _calc_switch : type=bool, Whether to calculate the Fisher Information forcely.
# [parameters]
self.y = _y
self.out_FI_path = _out_FI_path
self.iter_num = _iter_num
self.apply_function_type_list = _apply_function_type_list
# [variables]
self.FisherInformation_val_dict = None
self.coef_dict_for_FI = OrderedDict()
self.coef_dict_on_graph = None
self.FI_save_switch = True
# [hyper parameters]
self.FI_scope = 'FisherInformation'
# [preprocessing]
self.FisherInformation_val_dict = self._get_initial_FI_dict(
_calc_switch)
self._preprocessing()
def _preprocessing(self):
# --- all coefficients ---
GCG_class = GetCoefficientOnGraph()
self.y.visit(GCG_class)
self.coef_dict_on_graph = GCG_class.variables
# --- variables which EWC applies to ---
GVG_class = GetVariablesOnGraph()
self.y.visit(GVG_class)
for key in GVG_class.variables:
var = GVG_class.variables[key]
if var.parent.info.type_name in self.apply_function_type_list:
if len(var.parent.inputs) > 1:
for in_var in var.parent.inputs[1:]:
use_var = self._get_input_node(in_var)
if use_var is not None:
self.coef_dict_for_FI[use_var.name] = use_var
def _get_input_node(self, _var, _already_read_list=[]):
if _var in self.coef_dict_on_graph.values():
return _var
else:
_already_read_list.append(_var)
if _var.parent is not None:
for in_var in _var.parent.inputs:
if in_var not in _already_read_list:
return self._get_input_node(in_var, _already_read_list)
def __call__(self, _out_var=None):
# input
# _out_var : type=nn.Variable(), The discriminator output
# --- self ---
# self.coef_dict : type=OrderedDict(), The coefficient dict of the synthesis network (This needs to be on the graph.)
# self.data_iterator : type=nnabla data iterator
# output
# loss : type=nn.Variable()
# --- Calculation of the Fisher Information ---
if _out_var is not None:
temp_need_grad = self.y.need_grad
self.y.need_grad = True
if len(self.FisherInformation_val_dict) == 0:
log_likelihood_var = F.log(F.sigmoid(_out_var))
for i in range(self.iter_num):
log_likelihood_var.forward(clear_no_need_grad=True)
self._zero_grad_all()
log_likelihood_var.backward(clear_buffer=True)
self._accumulate_grads()
sys.stdout.write(
'\rFisher Information Accumulating ... {}/{}'.format(i+1, self.iter_num))
sys.stdout.flush()
print('')
for key in self.FisherInformation_val_dict:
self.FisherInformation_val_dict[key] /= self.iter_num
self.y.need_grad = temp_need_grad
# --- make loss graph ---
loss = 0
for key in self.FisherInformation_val_dict:
key_source = key.replace(self.FI_scope + '/', '')
FI_var = nn.Variable.from_numpy_array(
self.FisherInformation_val_dict[key].copy())
FI_var.name = key
coef_source_var = nn.Variable.from_numpy_array(
self.coef_dict_for_FI[key_source].d.copy())
coef_source_var.name = key.replace(
self.FI_scope + '/', 'weight_source/')
loss += F.mean(FI_var *
(self.coef_dict_for_FI[key_source] - coef_source_var)**2)
# --- save Fisher Information ---
if self.FI_save_switch:
self.METHOD_NAME()
print('[ElasticWeightConsolidation] Success!')
return loss
def METHOD_NAME(self):
if self.out_FI_path is not None:
os.makedirs(self.out_FI_path.replace(
self.out_FI_path.split(os.sep)[-1], ''), exist_ok=True)
np.savez(self.out_FI_path.replace('.npz', ''),
**self.FisherInformation_val_dict)
print(
'[ElasticWeightConsolidation] Save the calculated fisher information values to...')
print('[ElasticWeightConsolidation] {}'.format(self.out_FI_path))
def _get_initial_FI_dict(self, _calc_switch):
# input
# _FI_path : type=string or None, Already calculated fisher information.
# output
# FI_dict : type=OrderedDict(), key=parameter name, value=np.ndarray
FI_dict = OrderedDict()
if self.out_FI_path is not None and os.path.isfile(self.out_FI_path) and not _calc_switch:
FI_dict = OrderedDict(np.load(self.out_FI_path))
self.FI_save_switch = False
print('[ElasticWeightConsolidation] Load EWC weights ... {}'.format(
self.out_FI_path))
return FI_dict
def _zero_grad_all(self):
for key in self.coef_dict_for_FI:
self.coef_dict_for_FI[key].g.fill(0)
def _accumulate_grads(self):
for key in self.coef_dict_for_FI:
if self.FI_scope + '/' + key not in self.FisherInformation_val_dict:
self.FisherInformation_val_dict[self.FI_scope +
'/' + key] = self.coef_dict_for_FI[key].g.copy()
else:
self.FisherInformation_val_dict[self.FI_scope +
'/' + key] += self.coef_dict_for_FI[key].g.copy() | null |
1,047 | import os
from .common import TestConfig, test_data
reference_data = \
"""
analysis_name: analysis
---
fit_grains:
do_fit: false
estimate: %(nonexistent_file)s
npdiv: 1
panel_buffer: 10
threshold: 1850
tolerance:
eta: 1
omega: 2
tth: 3
tth_max: false
---
fit_grains:
estimate: %(existing_file)s
panel_buffer: [20, 30]
tolerance:
eta: [1, 2]
omega: [3, 4]
tth: [5, 6]
tth_max: 15
---
fit_grains:
tth_max: -1
---
# cgf #4
fit_grains:
reset_exclusions: false
---
# cfg #5
fit_grains:
dmin: 0.1
tthmin: 0.2
sfacmin: 0.3
pintmin: 0.4
---
# cfg #6
fit_grains:
dmax: 1.1
tthmax: 1.2
sfacmax: 1.3
pintmax: 1.4
""" % test_data
class TestFitGrainsConfig(TestConfig):
@classmethod
def get_reference_data(cls):
return reference_data
def test_do_fit(self):
self.assertTrue(self.cfgs[0].fit_grains.do_fit)
self.assertFalse(self.cfgs[1].fit_grains.do_fit)
def test_estimate(self):
self.assertEqual(self.cfgs[0].fit_grains.estimate, None)
# nonexistent file needs to return None
self.assertEqual(
self.cfgs[1].fit_grains.estimate,
None
)
self.assertEqual(
self.cfgs[2].fit_grains.estimate,
test_data['existing_file']
)
def test_npdiv(self):
self.assertEqual(self.cfgs[0].fit_grains.npdiv, 2)
self.assertEqual(self.cfgs[1].fit_grains.npdiv, 1)
def test_threshold(self):
self.assertRaises(
RuntimeError,
getattr, self.cfgs[0].fit_grains, 'threshold'
)
self.assertEqual(self.cfgs[1].fit_grains.threshold, 1850)
def test_tth_max(self):
self.assertTrue(self.cfgs[0].fit_grains.tth_max)
self.assertFalse(self.cfgs[1].fit_grains.tth_max)
self.assertEqual(self.cfgs[2].fit_grains.tth_max, 15)
self.assertRaises(
RuntimeError,
getattr, self.cfgs[3].fit_grains, 'tth_max'
)
class TestToleranceConfig(TestConfig):
@classmethod
def get_reference_data(cls):
return reference_data
def test_eta(self):
self.assertRaises(
RuntimeError,
getattr, self.cfgs[0].fit_grains.tolerance, 'eta'
)
self.assertEqual(
self.cfgs[1].fit_grains.tolerance.eta,
[1, 1]
)
self.assertEqual(
self.cfgs[2].fit_grains.tolerance.eta,
[1, 2]
)
def test_omega(self):
self.assertRaises(
RuntimeError,
getattr, self.cfgs[0].fit_grains.tolerance, 'omega'
)
self.assertEqual(
self.cfgs[1].fit_grains.tolerance.omega,
[2, 2]
)
self.assertEqual(
self.cfgs[2].fit_grains.tolerance.omega,
[3, 4]
)
def test_tth(self):
self.assertRaises(
RuntimeError,
getattr, self.cfgs[0].fit_grains.tolerance, 'tth'
)
self.assertEqual(
self.cfgs[1].fit_grains.tolerance.tth,
[3, 3]
)
self.assertEqual(
self.cfgs[2].fit_grains.tolerance.tth,
[5, 6]
)
class TestExclusions(TestConfig):
@classmethod
def get_reference_data(cls):
return reference_data
def test_reset_exclusions(self):
for i in range(4):
self.assertTrue(self.cfgs[i].fit_grains.reset_exclusions)
for i in range(4, 7):
self.assertFalse(self.cfgs[i].fit_grains.reset_exclusions)
def METHOD_NAME(self):
ep = self.cfgs[5].fit_grains.exclusion_parameters
self.assertEqual(ep.dmin, 0.1)
self.assertEqual(ep.tthmin, 0.2)
self.assertEqual(ep.sfacmin, 0.3)
self.assertEqual(ep.pintmin, 0.4)
self.assertEqual(ep.dmax, None)
self.assertEqual(ep.tthmax, None)
self.assertEqual(ep.sfacmax, None)
self.assertEqual(ep.pintmax, None)
ep = self.cfgs[6].fit_grains.exclusion_parameters
self.assertEqual(ep.dmin, 0.1)
self.assertEqual(ep.tthmin, 0.2)
self.assertEqual(ep.sfacmin, 0.3)
self.assertEqual(ep.pintmin, 0.4)
self.assertEqual(ep.dmax, 1.1)
self.assertEqual(ep.tthmax, 1.2)
self.assertEqual(ep.sfacmax, 1.3)
self.assertEqual(ep.pintmax, 1.4) | null |
1,048 | from __future__ import annotations
import sys
from typing import Any, Callable, Iterator, List, Mapping, Optional, Sequence
from tabulate import tabulate
from ai.backend.client.cli.pagination import echo_via_pager, get_preferred_page_size, tabulate_items
from ai.backend.client.cli.pretty import print_error, print_fail
from .types import BaseOutputHandler, FieldSpec, PaginatedResult
_Item = Mapping[str, Any]
class NoItems(Exception):
pass
class ConsoleOutputHandler(BaseOutputHandler):
def print_item(
self,
item: Optional[_Item],
fields: Sequence[FieldSpec],
) -> None:
if item is None:
print_fail("No matching entry found.")
return
field_map = {f.field_name: f for f in fields}
print(
tabulate(
[
(
field_map[k].humanized_name,
field_map[k].formatter.format_console(v, field_map[k]),
)
for k, v in item.items()
if k in field_map
],
headers=("Field", "Value"),
)
)
def print_items(
self,
items: Sequence[_Item],
fields: Sequence[FieldSpec],
) -> None:
field_map = {f.field_name: f for f in fields}
for idx, item in enumerate(items):
if idx > 0:
print("-" * 20)
print(
tabulate(
[
(
field_map[k].humanized_name,
field_map[k].formatter.format_console(v, field_map[k]),
)
for k, v in item.items()
if k in field_map
],
headers=("Field", "Value"),
)
)
def print_list(
self,
items: Sequence[_Item],
fields: Sequence[FieldSpec],
*,
is_scalar: bool = False,
) -> None:
if is_scalar:
assert len(fields) == 1
if sys.stdout.isatty():
def infinite_fetch():
current_offset = 0
page_size = get_preferred_page_size()
while True:
if len(items) == 0:
raise NoItems
if is_scalar:
yield from map(
lambda v: {fields[0].field_name: v},
items[current_offset : current_offset + page_size],
)
else:
yield from items[current_offset : current_offset + page_size]
current_offset += page_size
if current_offset >= len(items):
break
try:
echo_via_pager(
tabulate_items(
infinite_fetch(),
fields,
),
)
except NoItems:
print("No matching items.")
else:
if is_scalar:
for line in tabulate_items(
map(lambda v: {fields[0].field_name: v}, items), # type: ignore
fields,
):
print(line, end="")
else:
for line in tabulate_items(
items, # type: ignore
fields,
):
print(line, end="")
def print_paginated_list(
self,
fetch_func: Callable[[int, int], PaginatedResult],
initial_page_offset: int,
page_size: Optional[int] = None,
plain=False,
) -> None:
fields: List[FieldSpec] = []
def infinite_fetch(_page_size: int) -> Iterator[_Item]:
nonlocal fields
current_offset = initial_page_offset
while True:
result = fetch_func(current_offset, _page_size)
if result.total_count == 0:
raise NoItems
current_offset += len(result.items)
if not fields:
fields.extend(result.fields)
yield from result.items
if current_offset >= result.total_count:
break
if sys.stdout.isatty() and page_size is None:
preferred_page_size = get_preferred_page_size()
try:
echo_via_pager(
tabulate_items(
infinite_fetch(preferred_page_size),
fields,
tablefmt="plain" if plain else "simple",
),
)
except NoItems:
print("No matching items.")
else:
if page_size is None:
page_size = 20
for line in tabulate_items(
infinite_fetch(page_size),
fields,
tablefmt="plain" if plain else "simple",
):
print(line, end="")
def METHOD_NAME(
self,
item: _Item,
item_name: Optional[str] = None,
action_name: Optional[str] = None,
extra_info: Mapping = {},
) -> None:
t = [
["ok", item["ok"]],
["msg", item["msg"]],
*[(k, v) for k, v in extra_info.items()],
]
if action_name is not None:
t += [["Action", action_name]]
if item_name is not None:
t += [(k, v) for k, v in item[item_name].items()]
print(
tabulate(
t,
headers=("Field", "Value"),
)
)
def print_mutation_error(
self,
error: Optional[Exception] = None,
msg: str = "Failed",
item_name: Optional[str] = None,
action_name: Optional[str] = None,
extra_info: Mapping = {},
) -> None:
t = [
["Message", msg],
]
if item_name is not None:
t += [["Item", item_name]]
if action_name is not None:
t += [["Action", action_name]]
print(
tabulate(
t,
headers=("Field", "Value"),
)
)
if error is not None:
print_error(error)
def print_error(
self,
error: Exception,
) -> None:
print_error(error)
def print_fail(
self,
message: str,
) -> None:
print_fail(message) | null |
1,049 | import os
import IMP
import IMP.em
import IMP.test
import IMP.core
class Tests(IMP.test.TestCase):
"""Class to test EM correlation restraint"""
def load_density_maps(self):
mrw = IMP.em.MRCReaderWriter()
self.scene = IMP.em.read_map(
self.get_input_file_name("1z5s_20.imp.mrc"), mrw)
self.scene.get_header_writable().set_resolution(20.)
self.scene.update_voxel_size(3.)
self.scene.calcRMS()
def setUp(self):
"""Build test model and optimizer"""
IMP.test.TestCase.setUp(self)
self.imp_model = IMP.Model()
self.load_density_maps()
def test_crop_map(self):
"""Test the cropping functionality"""
t = 0.
cropped_scene = self.scene.get_cropped(0.)
cropped_bb = IMP.em.get_bounding_box(cropped_scene)
scene_bb = IMP.em.get_bounding_box(self.scene, t)
# check that the scene and cropped scene have the same bounding box
for i in range(2):
self.assertAlmostEqual(IMP.algebra.get_distance(
scene_bb.get_corner(i), cropped_bb.get_corner(i)), 0, 2)
cropped_scene.get_header().show()
mrw = IMP.em.MRCReaderWriter()
IMP.em.write_map(cropped_scene, "test2.mrc", mrw)
dmap3 = IMP.em.read_map("test2.mrc", IMP.em.MRCReaderWriter())
# check that the center stays in the same place
self.assertAlmostEqual(IMP.algebra.get_distance(
dmap3.get_centroid(), cropped_scene.get_centroid()), 0, 2)
self.assertAlmostEqual(IMP.algebra.get_distance(
self.scene.get_centroid(), cropped_scene.get_centroid()), 0, 2)
# check that the spacing is correct
self.assertAlmostEqual(dmap3.get_spacing(), 3, 2)
# check that the dmin and dmax are the same
self.assertAlmostEqual(
cropped_scene.get_min_value() - self.scene.get_min_value(), 0., 2)
self.assertAlmostEqual(
cropped_scene.get_max_value() - self.scene.get_max_value(), 0., 2)
os.unlink("test2.mrc")
def _test_crop_using_larger_extent(self):
"""Test the cropping functionality works when the input bb is
larger than the density"""
em_bb = IMP.em.get_bounding_box(self.scene)
larger_bb = IMP.algebra.BoundingBox3D(
em_bb.get_corner(0) - IMP.algebra.Vector3D(10, 10, 10),
em_bb.get_corner(1) + IMP.algebra.Vector3D(10, 10, 10))
cropped_scene = self.scene.get_cropped(larger_bb)
IMP.em.write_map(cropped_scene, "temp.mrc", IMP.em.MRCReaderWriter())
cropped_bb = IMP.em.get_bounding_box(cropped_scene)
scene_bb = IMP.em.get_bounding_box(self.scene)
# check that the scene and cropped scene have the same bounding box
for i in range(2):
self.assertAlmostEqual(IMP.algebra.get_distance(
scene_bb.get_corner(i),
cropped_bb.get_corner(i)), 0, 2)
def test_crop_by_particle_set(self):
"""Test the cropping functionality by inputting a particle set"""
mrw = IMP.em.MRCReaderWriter()
mh = IMP.atom.read_pdb(
self.get_input_file_name("1mbn_21-35.pdb"),
self.imp_model,
IMP.atom.BackbonePDBSelector())
ps = IMP.atom.get_leaves(mh)
ps_cent = IMP.algebra.get_centroid(
[IMP.core.XYZ(p).get_coordinates() for p in ps])
mrc = IMP.em.read_map(self.get_input_file_name("1mbn.6.eman.mrc"), mrw)
mrc.get_header_writable().set_resolution(6.0)
cropped_map = mrc.get_cropped(ps, 5.0)
c_cent = cropped_map.get_centroid()
# Centroid of the particle set used to crop and the cropped density
# should be about the same
self.assertAlmostEqual(IMP.algebra.get_distance(c_cent, ps_cent),
0.0, delta=1.0)
# The correlation between the ps and map should be better
cropped_cfs = 1-IMP.em.compute_fitting_score(ps, cropped_map)
full_cfs = 1-IMP.em.compute_fitting_score(ps, mrc)
self.assertGreater(cropped_cfs, full_cfs)
# The cropped map should have less density
self.assertLess(
IMP.em.approximate_molecular_mass(cropped_map, 0.00),
IMP.em.approximate_molecular_mass(mrc, 0.00))
inv_cropped_map = mrc.get_cropped(ps, 5.0, True)
_ = inv_cropped_map.get_centroid()
inv_cropped_cfs = 1-IMP.em.compute_fitting_score(ps, inv_cropped_map)
# The correlation of the inverse map should be the worst
self.assertGreater(cropped_cfs, inv_cropped_cfs)
# These evaluate to equal - probably a bug in compute_fitting_score
# Change to Ilan's EMFitRestraint score when implemented
# self.assertGreater(full_cfs, inv_cropped_cfs)
# Inverse cropped map should have less density
self.assertLess(
IMP.em.approximate_molecular_mass(inv_cropped_map, 0.00),
IMP.em.approximate_molecular_mass(mrc, 0.00))
# Addition of the two cropped maps should be almost identical
# to the original
inv_cropped_map.add(cropped_map)
inv_cropped_map.calcRMS()
ccc = IMP.em.get_coarse_cc_coefficient(inv_cropped_map, mrc, 0)
self.assertGreater(ccc, 0.99)
self.assertLess(ccc, 1.01)
# Test the keep_em_size flag
cropped_map_keep = mrc.get_cropped(ps, 5.0, False, True)
self.assertEqual(mrc.get_number_of_voxels(),
cropped_map_keep.get_number_of_voxels())
def METHOD_NAME(self):
"""Test the cropping functionality works when the input bb is
larger than the density"""
mh = IMP.atom.read_pdb(
self.get_input_file_name("1z5s_A_fitted.pdb"),
self.imp_model,
IMP.atom.CAlphaPDBSelector())
mh_bb = IMP.atom.get_bounding_box(mh)
_ = self.scene.get_cropped(mh_bb)
if __name__ == '__main__':
IMP.test.main() | null |
1,050 | # Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla_rl.hooks as H
from nnabla_rl.algorithms import MunchausenDQN, MunchausenDQNConfig
from nnabla_rl.builders import ModelBuilder, SolverBuilder, ReplayBufferBuilder
from nnabla_rl.environments.wrappers import ScreenRenderEnv, NumpyFloat32Env
from nnabla_rl.models import DiscreteQFunction
from nnabla_rl.replay_buffer import ReplayBuffer
from nnabla_rl.replay_buffers import MemoryEfficientAtariBuffer
from nnabla_rl.utils.reproductions import build_atari_env # noqa
from nnabla_rl.utils.evaluator import EpisodicEvaluator
from nnabla_rl.writers import FileWriter
def build_classic_control_env(env_name, render=False):
env = gym.make(env_name)
env = NumpyFloat32Env(env)
if render:
# render environment if render is True
env = ScreenRenderEnv(env)
return env
class ExampleClassicControlQFunction(DiscreteQFunction):
def __init__(self, scope_name: str, n_action: int):
super(ExampleClassicControlQFunction, self).__init__(scope_name)
self._n_action = n_action
def all_q(self, s: nn.Variable) -> nn.Variable:
with nn.parameter_scope(self.scope_name):
with nn.parameter_scope("affine1"):
h = PF.affine(s, n_outmaps=100)
h = F.relu(h)
with nn.parameter_scope("affine2"):
h = PF.affine(h, n_outmaps=100)
h = F.relu(h)
with nn.parameter_scope("affine3"):
h = PF.affine(h, n_outmaps=100)
h = F.relu(h)
with nn.parameter_scope("affine4"):
h = PF.affine(h, n_outmaps=self._n_action)
return h
class ExampleAtariQFunction(DiscreteQFunction):
def __init__(self, scope_name: str, n_action: int):
super(ExampleAtariQFunction, self).__init__(scope_name)
self._n_action = n_action
def all_q(self, s: nn.Variable) -> nn.Variable:
with nn.parameter_scope(self.scope_name):
with nn.parameter_scope("conv1"):
h = PF.convolution(s, 32, (8, 8), stride=(4, 4))
h = F.relu(h)
with nn.parameter_scope("conv2"):
h = PF.convolution(h, 64, (4, 4), stride=(2, 2))
h = F.relu(h)
with nn.parameter_scope("conv3"):
h = PF.convolution(h, 64, (3, 3), stride=(1, 1))
h = F.relu(h)
h = F.reshape(h, (-1, 3136))
with nn.parameter_scope("affine1"):
h = PF.affine(h, 512)
h = F.relu(h)
with nn.parameter_scope("affine2"):
h = PF.affine(h, self._n_action)
return h
class ExampleQFunctionBuilder(ModelBuilder):
def __init__(self, is_atari=False):
self._is_atari = is_atari
def METHOD_NAME(self, scope_name, env_info, algorithm_config, **kwargs):
if self._is_atari:
return ExampleAtariQFunction(scope_name, env_info.action_dim)
else:
return ExampleClassicControlQFunction(scope_name, env_info.action_dim)
class ExampleQSolverBuilder(SolverBuilder):
def build_solver(self, env_info, algorithm_config, **kwargs):
config: MunchausenDQNConfig = algorithm_config
solver = S.Adam(alpha=config.learning_rate)
return solver
class ExampleReplayBufferBuilder(ReplayBufferBuilder):
def __init__(self, is_atari=False):
self._is_atari = is_atari
def build_replay_buffer(self, env_info, algorithm_config, **kwargs):
config: MunchausenDQNConfig = algorithm_config
if self._is_atari:
return MemoryEfficientAtariBuffer(capacity=config.replay_buffer_size)
else:
return ReplayBuffer(capacity=config.replay_buffer_size)
def train():
# nnabla-rl's Reinforcement learning algorithm requires environment that implements gym.Env interface
# for the details of gym.Env see: https://github.com/openai/gym
env_name = 'CartPole-v1'
train_env = build_classic_control_env(env_name)
# evaluation env is used only for running the evaluation of models during the training.
# if you do not evaluate the model during the training, this environment is not necessary.
eval_env = build_classic_control_env(env_name, render=True)
is_atari = False
learning_rate = 3e-4
start_timesteps = 5000
max_explore_steps = 10000
evaluation_timing = 10000
total_iterations = 100000
# If you want to train on atari games, uncomment below
# You can change the name of environment to change the game to train.
# For the list of available games see: https://gym.openai.com/envs/#atari
# Your machine must at least have more than 20GB of memory to run the training.
# Adjust the replay_buffer_size through MunchausenDQNConfig if you do not have enough memory on your machine.
# env_name = 'BreakoutNoFrameskip-v4'
# train_env = build_atari_env(env_name)
# eval_env = build_atari_env(env_name, test=True, render=True)
# is_atari = True
# learning_rate = 5e-5
# start_timesteps = 50000
# max_explore_steps = 1000000
# evaluation_timing = 250000
# total_iterations = 50000000
# Will output evaluation results and model snapshots to the outdir
outdir = f'{env_name}_results'
# Writer will save the evaluation results to file.
# If you set writer=None, evaluator will only print the evaluation results on terminal.
writer = FileWriter(outdir, "evaluation_result")
evaluator = EpisodicEvaluator(run_per_evaluation=5)
# evaluate the trained model with eval_env every 5000 iterations
# change the timing to 250000 on atari games.
evaluation_hook = H.EvaluationHook(
eval_env, evaluator, timing=evaluation_timing, writer=writer)
# This will print the iteration number every 100 iteration.
# Printing iteration number is convenient for checking the training progress.
# You can change this number to any number of your choice.
iteration_num_hook = H.IterationNumHook(timing=100)
# save the trained model every 5000 iterations
# change the timing to 250000 on atari games.
save_snapshot_hook = H.SaveSnapshotHook(outdir, timing=evaluation_timing)
# Set gpu_id to -1 to train on cpu.
gpu_id = 0
config = MunchausenDQNConfig(gpu_id=gpu_id,
learning_rate=learning_rate,
start_timesteps=start_timesteps,
max_explore_steps=max_explore_steps)
m_dqn = MunchausenDQN(train_env,
config=config,
q_func_builder=ExampleQFunctionBuilder(
is_atari=is_atari),
q_solver_builder=ExampleQSolverBuilder(),
replay_buffer_builder=ExampleReplayBufferBuilder(is_atari=is_atari))
# Set instanciated hooks to periodically run additional jobs
m_dqn.set_hooks(
hooks=[evaluation_hook, iteration_num_hook, save_snapshot_hook])
m_dqn.train(train_env, total_iterations=total_iterations)
if __name__ == '__main__':
train() | null |
1,051 | from __future__ import annotations
import pytest
import dask
from dask.local import finish_task, get_sync, sortkey, start_state_from_dask
from dask.order import order
from dask.utils_test import GetFunctionTestMixin, add, inc
fib_dask = {"f0": 0, "f1": 1, "f2": 1, "f3": 2, "f4": 3, "f5": 5, "f6": 8}
def test_start_state():
dsk = {"x": 1, "y": 2, "z": (inc, "x"), "w": (add, "z", "y")}
result = start_state_from_dask(dsk)
expected = {
"cache": {"x": 1, "y": 2},
"dependencies": {
"w": {"y", "z"},
"x": set(),
"y": set(),
"z": {"x"},
},
"dependents": {"w": set(), "x": {"z"}, "y": {"w"}, "z": {"w"}},
"finished": set(),
"released": set(),
"running": set(),
"ready": ["z"],
"waiting": {"w": {"z"}},
"waiting_data": {"x": {"z"}, "y": {"w"}, "z": {"w"}},
}
assert result == expected
def test_start_state_looks_at_cache():
dsk = {"b": (inc, "a")}
cache = {"a": 1}
result = start_state_from_dask(dsk, cache)
assert result["dependencies"]["b"] == {"a"}
assert result["ready"] == ["b"]
def test_start_state_with_redirects():
dsk = {"x": 1, "y": "x", "z": (inc, "y")}
result = start_state_from_dask(dsk)
assert result["cache"] == {"x": 1}
def test_start_state_with_independent_but_runnable_tasks():
assert start_state_from_dask({"x": (inc, 1)})["ready"] == ["x"]
def test_start_state_with_tasks_no_deps():
dsk = {"a": [1, (inc, 2)], "b": [1, 2, 3, 4], "c": (inc, 3)}
state = start_state_from_dask(dsk)
assert list(state["cache"].keys()) == ["b"]
assert "a" in state["ready"] and "c" in state["ready"]
deps = {k: set() for k in "abc"}
assert state["dependencies"] == deps
assert state["dependents"] == deps
def test_finish_task():
dsk = {"x": 1, "y": 2, "z": (inc, "x"), "w": (add, "z", "y")}
sortkey = order(dsk).get
state = start_state_from_dask(dsk)
state["ready"].remove("z")
state["running"] = {"z", "other-task"}
task = "z"
result = 2
state["cache"]["z"] = result
finish_task(dsk, task, state, set(), sortkey)
assert state == {
"cache": {"y": 2, "z": 2},
"dependencies": {
"w": {"y", "z"},
"x": set(),
"y": set(),
"z": {"x"},
},
"finished": {"z"},
"released": {"x"},
"running": {"other-task"},
"dependents": {"w": set(), "x": {"z"}, "y": {"w"}, "z": {"w"}},
"ready": ["w"],
"waiting": {},
"waiting_data": {"y": {"w"}, "z": {"w"}},
}
class TestGetAsync(GetFunctionTestMixin):
get = staticmethod(get_sync)
def test_get_sync_num_workers(self):
self.get({"x": (inc, "y"), "y": 1}, "x", num_workers=2)
def test_cache_options():
cache = {}
def inc2(x):
assert "y" in cache
return x + 1
with dask.config.set(cache=cache):
get_sync({"x": (inc2, "y"), "y": 1}, "x")
def test_sort_key():
L = ["x", ("x", 1), ("z", 0), ("x", 0)]
assert sorted(L, key=sortkey) == ["x", ("x", 0), ("x", 1), ("z", 0)]
def test_callback():
f = lambda x: x + 1
dsk = {"a": (f, 1)}
from dask.threaded import get
def start_callback(key, d, state):
assert key == "a" or key is None
assert d == dsk
assert isinstance(state, dict)
def end_callback(key, value, d, state, worker_id):
assert key == "a" or key is None
assert value == 2 or value is None
assert d == dsk
assert isinstance(state, dict)
get(dsk, "a", start_callback=start_callback, end_callback=end_callback)
def test_exceptions_propagate():
class MyException(Exception):
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return "My Exception!"
def f():
raise MyException(1, 2)
from dask.threaded import get
try:
get({"x": (f,)}, "x")
assert False
except MyException as e:
assert "My Exception!" in str(e)
assert "a" in dir(e)
assert e.a == 1
assert e.b == 2
def test_ordering():
L = []
def METHOD_NAME(i):
L.METHOD_NAME(i)
dsk = {("x", i): (METHOD_NAME, i) for i in range(10)}
x_keys = sorted(dsk)
dsk["y"] = (lambda *args: None, list(x_keys))
get_sync(dsk, "y")
assert L == sorted(L)
def test_complex_ordering():
da = pytest.importorskip("dask.array")
from dask.diagnostics import Callback
actual_order = []
def track_order(key, dask, state):
actual_order.METHOD_NAME(key)
x = da.random.normal(size=(20, 20), chunks=(-1, -1))
res = (x.dot(x.T) - x.mean(axis=0)).std()
dsk = dict(res.__dask_graph__())
exp_order_dict = order(dsk)
exp_order = sorted(exp_order_dict.keys(), key=exp_order_dict.get)
with Callback(pretask=track_order):
get_sync(dsk, exp_order[-1])
assert actual_order == exp_order | null |
1,052 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkslb.endpoint import endpoint_data
class UploadServerCertificateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Slb', '2014-05-15', 'UploadServerCertificate','slb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ServerCertificate(self): # String
return self.get_query_params().get('ServerCertificate')
def set_ServerCertificate(self, ServerCertificate): # String
self.add_query_param('ServerCertificate', ServerCertificate)
def get_AliCloudCertificateName(self): # String
return self.get_query_params().get('AliCloudCertificateName')
def set_AliCloudCertificateName(self, AliCloudCertificateName): # String
self.add_query_param('AliCloudCertificateName', AliCloudCertificateName)
def get_AliCloudCertificateId(self): # String
return self.get_query_params().get('AliCloudCertificateId')
def set_AliCloudCertificateId(self, AliCloudCertificateId): # String
self.add_query_param('AliCloudCertificateId', AliCloudCertificateId)
def get_PrivateKey(self): # String
return self.get_query_params().get('PrivateKey')
def set_PrivateKey(self, PrivateKey): # String
self.add_query_param('PrivateKey', PrivateKey)
def METHOD_NAME(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_AliCloudCertificateRegionId(self): # String
return self.get_query_params().get('AliCloudCertificateRegionId')
def set_AliCloudCertificateRegionId(self, AliCloudCertificateRegionId): # String
self.add_query_param('AliCloudCertificateRegionId', AliCloudCertificateRegionId)
def get_ServerCertificateName(self): # String
return self.get_query_params().get('ServerCertificateName')
def set_ServerCertificateName(self, ServerCertificateName): # String
self.add_query_param('ServerCertificateName', ServerCertificateName) | null |
1,053 | # Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
"""Core conda notices logic."""
from __future__ import annotations
import logging
import time
from functools import wraps
from typing import Sequence
from ..base.constants import NOTICES_DECORATOR_DISPLAY_INTERVAL, NOTICES_FN
from ..base.context import Context, context
from ..models.channel import Channel, MultiChannel, get_channel_objs
from . import cache, fetch, views
from .types import ChannelNotice, ChannelNoticeResponse, ChannelNoticeResultSet
# Used below in type hints
ChannelName = str
ChannelUrl = str
logger = logging.getLogger(__name__)
def retrieve_notices(
limit: int | None = None,
always_show_viewed: bool = True,
silent: bool = False,
) -> ChannelNoticeResultSet:
"""
Function used for retrieving notices. This is called by the "notices" decorator as well
as the sub-command "notices"
Args:
limit: Limit the number of notices to show (defaults to None).
always_show_viewed: Whether all notices should be shown, not only the unread ones
(defaults to True).
silent: Whether to use a spinner when fetching and caching notices.
"""
channel_name_urls = get_channel_name_and_urls(get_channel_objs(context))
channel_notice_responses = fetch.get_notice_responses(
channel_name_urls, silent=silent
)
channel_notices = flatten_notice_responses(channel_notice_responses)
total_number_channel_notices = len(channel_notices)
cache_file = cache.get_notices_cache_file()
# We always want to modify the mtime attribute of the file if we are trying to retrieve notices
# This is used later in "is_channel_notices_cache_expired"
cache_file.touch()
viewed_notices = None
viewed_channel_notices = 0
if not always_show_viewed:
viewed_notices = cache.get_viewed_channel_notice_ids(
cache_file, channel_notices
)
viewed_channel_notices = len(viewed_notices)
channel_notices = filter_notices(
channel_notices, limit=limit, exclude=viewed_notices
)
return ChannelNoticeResultSet(
channel_notices=channel_notices,
viewed_channel_notices=viewed_channel_notices,
total_number_channel_notices=total_number_channel_notices,
)
def display_notices(channel_notice_set: ChannelNoticeResultSet) -> None:
"""Prints the channel notices to std out."""
views.print_notices(channel_notice_set.channel_notices)
# Updates cache database, marking displayed notices as "viewed"
cache_file = cache.get_notices_cache_file()
cache.mark_channel_notices_as_viewed(cache_file, channel_notice_set.channel_notices)
views.print_more_notices_message(
channel_notice_set.total_number_channel_notices,
len(channel_notice_set.channel_notices),
channel_notice_set.viewed_channel_notices,
)
def METHOD_NAME(func):
"""
Wrapper for "execute" entry points for subcommands.
If channel notices need to be fetched, we do that first and then
run the command normally. We then display these notices at the very
end of the command output so that the user is more likely to see them.
This ordering was specifically done to address the following bug report:
- https://github.com/conda/conda/issues/11847
Args:
func: Function to be decorated
"""
@wraps(func)
def wrapper(*args, **kwargs):
if is_channel_notices_enabled(context):
channel_notice_set = None
try:
if is_channel_notices_cache_expired():
channel_notice_set = retrieve_notices(
limit=context.number_channel_notices,
always_show_viewed=False,
silent=True,
)
except OSError as exc:
# If we encounter any OSError related error, we simply abandon
# fetching notices
logger.error(f"Unable to open cache file: {str(exc)}")
if channel_notice_set is not None:
return_value = func(*args, **kwargs)
display_notices(channel_notice_set)
return return_value
return func(*args, **kwargs)
return wrapper
def get_channel_name_and_urls(
channels: Sequence[Channel | MultiChannel],
) -> list[tuple[ChannelUrl, ChannelName]]:
"""
Return a sequence of Channel URL and name tuples.
This function handles both Channel and MultiChannel object types.
"""
channel_name_and_urls = []
for channel in channels:
name = channel.name or channel.location
for url in channel.base_urls:
full_url = url.rstrip("/")
channel_name_and_urls.append((f"{full_url}/{NOTICES_FN}", name))
return channel_name_and_urls
def flatten_notice_responses(
channel_notice_responses: Sequence[ChannelNoticeResponse],
) -> Sequence[ChannelNotice]:
return tuple(
notice
for channel in channel_notice_responses
if channel.METHOD_NAME
for notice in channel.METHOD_NAME
)
def filter_notices(
channel_notices: Sequence[ChannelNotice],
limit: int | None = None,
exclude: set[str] | None = None,
) -> Sequence[ChannelNotice]:
"""Perform filtering actions for the provided sequence of ChannelNotice objects."""
if exclude:
channel_notices = tuple(
channel_notice
for channel_notice in channel_notices
if channel_notice.id not in exclude
)
if limit is not None:
channel_notices = channel_notices[:limit]
return channel_notices
def is_channel_notices_enabled(ctx: Context) -> bool:
"""
Determines whether channel notices are enabled and therefore displayed when
invoking the `notices` command decorator.
This only happens when:
- offline is False
- number_channel_notices is greater than 0
Args:
ctx: The conda context object
"""
return ctx.number_channel_notices > 0 and not ctx.offline and not ctx.json
def is_channel_notices_cache_expired() -> bool:
"""
Checks to see if the notices cache file we use to keep track of
displayed notices is expired. This involves checking the mtime
attribute of the file. Anything older than what is specified as
the NOTICES_DECORATOR_DISPLAY_INTERVAL is considered expired.
"""
cache_file = cache.get_notices_cache_file()
cache_file_stat = cache_file.stat()
now = time.time()
seconds_since_checked = now - cache_file_stat.st_mtime
return seconds_since_checked >= NOTICES_DECORATOR_DISPLAY_INTERVAL | null |
1,054 | # IODATA is an input and output module for quantum chemistry.
# Copyright (C) 2011-2019 The IODATA Development Team
#
# This file is part of IODATA.
#
# IODATA is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# IODATA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
# --
"""MOL2 file format.
There are different formats of mol2 files. Here the compatibility with AMBER software
was the main objective to write out files with atomic charges used by antechamber.
"""
from typing import TextIO, Iterator, Tuple
import numpy as np
from ..docstrings import (document_load_one, document_load_many, document_dump_one,
document_dump_many)
from ..iodata import IOData
from ..periodic import sym2num, num2sym, bond2num, num2bond
from ..utils import angstrom, LineIterator
__all__ = []
PATTERNS = ['*.mol2']
@document_load_one("MOL2", ['atcoords', 'atnums', 'atcharges', 'atffparams'], ['title'])
def load_one(lit: LineIterator) -> dict:
"""Do not edit this docstring. It will be overwritten."""
molecule_found = False
while True:
try:
line = next(lit)
except StopIteration:
break
if len(line) > 1:
words = line.split()
if words[0] == "@<TRIPOS>MOLECULE":
# Found another molecule; go one line back and break
if molecule_found:
lit.back(line)
break
title = next(lit).strip()
words = next(lit).split()
natoms = int(words[0])
nbonds = int(words[1])
if words[0] == "@<TRIPOS>ATOM":
atnums, atcoords, atchgs, attypes = _load_helper_atoms(lit, natoms)
atcharges = {"mol2charges": atchgs}
atffparams = {"attypes": attypes}
result = {
'atcoords': atcoords,
'atnums': atnums,
'atcharges': atcharges,
'atffparams': atffparams,
'title': title
}
molecule_found = True
if words[0] == "@<TRIPOS>BOND":
bonds = _load_helper_bonds(lit, nbonds)
result['bonds'] = bonds
if not molecule_found:
raise lit.error("Molecule could not be read")
return result
def _load_helper_atoms(lit: LineIterator, natoms: int)\
-> Tuple[np.ndarray, np.ndarray, np.ndarray, tuple]:
"""Load element numbers, coordinates and atomic charges."""
atnums = np.empty(natoms)
atcoords = np.empty((natoms, 3))
atchgs = np.empty(natoms)
attypes = []
for i in range(natoms):
words = next(lit).split()
# Check the first two characters of atom name and try
# to convert to an element number both or only the first
symbol = words[1][:2].title()
atnum = sym2num.get(symbol, sym2num.get(symbol[0], None))
if atnum is None:
atnum = 0
lit.warn(f'Can not convert {words[1][:2]} to elements')
atnums[i] = atnum
attypes.append(words[5])
atcoords[i] = [float(words[2]), float(words[3]), float(words[4])]
if len(words) == 9:
atchgs[i] = float(words[8])
else:
atchgs[i] = 0.0000
atcoords = atcoords * angstrom
attypes = tuple(attypes)
return atnums, atcoords, atchgs, attypes
def _load_helper_bonds(lit: LineIterator, nbonds: int) -> Tuple[np.ndarray]:
"""Load bond information.
Each line in a bond definition has the following structure
http://chemyang.ccnu.edu.cn/ccb/server/AIMMS/mol2.pdf
bond_index atom_1 atom_2 bond_type
e.g.
1 1 2 1
This would be the first bond between atom 1 and atom 2 and a single bond
"""
bonds = np.empty((nbonds, 3))
for i in range(nbonds):
words = next(lit).split()
# Substract one because of numbering starting at 0
bond = [
int(words[1]) - 1,
int(words[2]) - 1,
# convert mol2 bond type to integer
bond2num.get(words[3], bond2num["un"])
]
if bond is None:
bond = [0, 0, 0]
lit.warn(f'Something wrong in the bond section: {bond}')
bonds[i] = bond
return bonds
@document_load_many("MOL2", ['atcoords', 'atnums', 'atcharges', 'atffparams'], ['title'])
def load_many(lit: LineIterator) -> Iterator[dict]:
"""Do not edit this docstring. It will be overwritten."""
# MOL2 files with more molecules are a simple concatenation of individual MOL2 files,'
# making it trivial to load many frames.
while True:
try:
yield load_one(lit)
except IOError:
return
@document_dump_one("MOL2", ['atcoords', 'atnums'], ['atcharges', 'atffparams', 'title'])
def METHOD_NAME(f: TextIO, data: IOData):
"""Do not edit this docstring. It will be overwritten."""
# The first six lines are reserved for comments
print("# Mol2 file created with Iodata", file=f)
print("\n\n\n\n\n", file=f)
print("@<TRIPOS>MOLECULE", file=f)
print(data.title or 'Created with IOData', file=f)
if data.bonds is not None:
bonds = len(data.bonds)
print(f'{data.natom:5d} {bonds:6d} {0:6d} {0:6d}', file=f)
else:
print(f'{data.natom:5d} {0:6d} {0:6d} {0:6d}', file=f)
print("@<TRIPOS>ATOM", file=f)
atcharges = data.atcharges.get('mol2charges')
attypes = data.atffparams.get('attypes')
for i in range(data.natom):
n = num2sym[data.atnums[i]]
x, y, z = data.atcoords[i] / angstrom
out1 = f'{i+1:7d} {n:2s} {x:15.4f} {y:9.4f} {z:9.4f} '
atcharge = 0.0 if atcharges is None else atcharges[i]
attype = n if attypes is None else attypes[i]
out2 = f'{attype:6s} {1:4d} XXX {atcharge:14.4f}'
print(out1 + out2, file=f)
if data.bonds is not None:
print("@<TRIPOS>BOND", file=f)
for i, bond in enumerate(data.bonds):
bondtype = num2bond.get(bond[2], "un")
print(f'{i+1:6d} {bond[0]+1:4d} {bond[1]+1:4d} {bondtype:2s}',
file=f)
@document_dump_many("MOL2", ['atcoords', 'atnums', 'atcharges'], ['title'])
def dump_many(f: TextIO, datas: Iterator[IOData]):
"""Do not edit this docstring. It will be overwritten."""
# Similar to load_many, this is relatively easy.
for data in datas:
METHOD_NAME(f, data) | null |
1,055 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
from azure import (
WindowsAzureError,
MANAGEMENT_HOST,
_get_request_body,
_parse_response,
_str,
_update_request_uri_query,
)
from azure.http import (
HTTPError,
HTTPRequest,
)
from azure.http.httpclient import _HTTPClient
from azure.servicemanagement import (
AZURE_MANAGEMENT_CERTFILE,
AZURE_MANAGEMENT_SUBSCRIPTIONID,
_management_error_handler,
_parse_response_for_async_op,
_update_management_header,
)
class _ServiceManagementClient(object):
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST):
self.requestid = None
self.subscription_id = subscription_id
self.cert_file = cert_file
self.host = host
if not self.cert_file:
if AZURE_MANAGEMENT_CERTFILE in os.environ:
self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]
if not self.subscription_id:
if AZURE_MANAGEMENT_SUBSCRIPTIONID in os.environ:
self.subscription_id = os.environ[
AZURE_MANAGEMENT_SUBSCRIPTIONID]
if not self.cert_file or not self.subscription_id:
raise WindowsAzureError(
'You need to provide subscription id and certificate file')
self._httpclient = _HTTPClient(
service_instance=self, cert_file=self.cert_file)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
'''Returns a new service which will process requests with the
specified filter. Filtering operations can include logging, automatic
retrying, etc... The filter is a lambda which receives the HTTPRequest
and another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.'''
res = type(self)(self.subscription_id, self.cert_file, self.host)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
#--Helper functions --------------------------------------------------
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as ex:
return _management_error_handler(ex)
return resp
def _perform_get(self, path, response_type):
request = HTTPRequest()
request.method = 'GET'
request.host = self.host
request.path = path
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return _parse_response(response, response_type)
return response
def _perform_put(self, path, body, async=False):
request = HTTPRequest()
request.method = 'PUT'
request.host = self.host
request.path = path
request.body = _get_request_body(body)
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if async:
return _parse_response_for_async_op(response)
return None
def _perform_post(self, path, body, response_type=None, async=False):
request = HTTPRequest()
request.method = 'POST'
request.host = self.host
request.path = path
request.body = _get_request_body(body)
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return _parse_response(response, response_type)
if async:
return _parse_response_for_async_op(response)
return None
def METHOD_NAME(self, path, async=False):
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.host
request.path = path
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if async:
return _parse_response_for_async_op(response)
return None
def _get_path(self, resource, name):
path = '/' + self.subscription_id + '/' + resource
if name is not None:
path += '/' + _str(name)
return path | null |
1,056 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class CreateQualityRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateQualityRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Trend(self): # String
return self.get_body_params().get('Trend')
def set_Trend(self, Trend): # String
self.add_body_params('Trend', Trend)
def get_BlockType(self): # Integer
return self.get_body_params().get('BlockType')
def set_BlockType(self, BlockType): # Integer
self.add_body_params('BlockType', BlockType)
def get_PropertyType(self): # String
return self.get_body_params().get('PropertyType')
def METHOD_NAME(self, PropertyType): # String
self.add_body_params('PropertyType', PropertyType)
def get_EntityId(self): # Long
return self.get_body_params().get('EntityId')
def set_EntityId(self, EntityId): # Long
self.add_body_params('EntityId', EntityId)
def get_RuleName(self): # String
return self.get_body_params().get('RuleName')
def set_RuleName(self, RuleName): # String
self.add_body_params('RuleName', RuleName)
def get_Checker(self): # Integer
return self.get_body_params().get('Checker')
def set_Checker(self, Checker): # Integer
self.add_body_params('Checker', Checker)
def get_Operator(self): # String
return self.get_body_params().get('Operator')
def set_Operator(self, Operator): # String
self.add_body_params('Operator', Operator)
def get_Property(self): # String
return self.get_body_params().get('Property')
def set_Property(self, Property): # String
self.add_body_params('Property', Property)
def get_WarningThreshold(self): # String
return self.get_body_params().get('WarningThreshold')
def set_WarningThreshold(self, WarningThreshold): # String
self.add_body_params('WarningThreshold', WarningThreshold)
def get_ProjectId(self): # Long
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # Long
self.add_body_params('ProjectId', ProjectId)
def get_MethodName(self): # String
return self.get_body_params().get('MethodName')
def set_MethodName(self, MethodName): # String
self.add_body_params('MethodName', MethodName)
def get_ProjectName(self): # String
return self.get_body_params().get('ProjectName')
def set_ProjectName(self, ProjectName): # String
self.add_body_params('ProjectName', ProjectName)
def get_RuleType(self): # Integer
return self.get_body_params().get('RuleType')
def set_RuleType(self, RuleType): # Integer
self.add_body_params('RuleType', RuleType)
def get_TemplateId(self): # Integer
return self.get_body_params().get('TemplateId')
def set_TemplateId(self, TemplateId): # Integer
self.add_body_params('TemplateId', TemplateId)
def get_ExpectValue(self): # String
return self.get_body_params().get('ExpectValue')
def set_ExpectValue(self, ExpectValue): # String
self.add_body_params('ExpectValue', ExpectValue)
def get_WhereCondition(self): # String
return self.get_body_params().get('WhereCondition')
def set_WhereCondition(self, WhereCondition): # String
self.add_body_params('WhereCondition', WhereCondition)
def get_CriticalThreshold(self): # String
return self.get_body_params().get('CriticalThreshold')
def set_CriticalThreshold(self, CriticalThreshold): # String
self.add_body_params('CriticalThreshold', CriticalThreshold)
def get_Comment(self): # String
return self.get_body_params().get('Comment')
def set_Comment(self, Comment): # String
self.add_body_params('Comment', Comment)
def get_PredictType(self): # Integer
return self.get_body_params().get('PredictType')
def set_PredictType(self, PredictType): # Integer
self.add_body_params('PredictType', PredictType) | null |
1,057 | # Copyright cocotb contributors
# Licensed under the Revised BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-3-Clause
"""
Tests of cocotb.test functionality
* expect_error
* expect_fail
* timeout
"""
from collections.abc import Coroutine
import pytest
from common import MyBaseException, MyException
import cocotb
from cocotb.triggers import NullTrigger, Timer
@cocotb.test(expect_error=NameError)
async def test_error(dut):
"""Error in the test"""
await Timer(100, "ns")
fail # noqa
@cocotb.test()
async def test_tests_are_tests(dut):
"""
Test that things annotated with cocotb.test are tests
"""
assert isinstance(test_tests_are_tests, cocotb.test)
# just to be sure...
@cocotb.test(expect_fail=True)
async def test_async_test_can_fail(dut):
assert False
@cocotb.test()
async def test_immediate_test(dut):
"""Test that tests can return immediately"""
return
@cocotb.test(expect_fail=True)
async def test_assertion_is_failure(dut):
assert False
@cocotb.test(expect_error=MyException)
async def test_expect_particular_exception(dut):
raise MyException()
@cocotb.test(expect_error=(MyException, ValueError))
async def test_expect_exception_list(dut):
raise MyException()
@cocotb.test(
expect_error=cocotb.result.SimTimeoutError, timeout_time=1, timeout_unit="ns"
)
async def test_timeout_testdec_fail(dut):
await Timer(10, "ns")
@cocotb.test(timeout_time=100, timeout_unit="ns")
async def test_timeout_testdec_pass(dut):
await Timer(10, "ns")
@cocotb.test(timeout_time=10, timeout_unit="ns")
async def test_timeout_testdec_simultaneous(dut):
try:
await cocotb.triggers.with_timeout(
Timer(1, "ns"), timeout_time=1, timeout_unit="ns"
)
except cocotb.result.SimTimeoutError:
pass
else:
assert False, "Expected a Timeout"
# Whether this test fails or passes depends on the behavior of the
# scheduler, simulator, and the implementation of the timeout function.
# CAUTION: THIS MAY CHANGE
# these tests should run in definition order, not lexicographic order
last_ordered_test = None
@cocotb.test()
async def METHOD_NAME(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 3
assert val is None
@cocotb.test()
async def test_ordering_2(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 2
assert val == 3
@cocotb.test()
async def test_ordering_1(dut):
global last_ordered_test
val, last_ordered_test = last_ordered_test, 1
assert val == 2
@cocotb.test()
class TestClass(Coroutine):
def __init__(self, dut):
self._coro = self.run(dut)
async def run(self, dut):
pass
def send(self, value):
self._coro.send(value)
def throw(self, exception):
self._coro.throw(exception)
def __await__(self):
yield from self._coro.__await__()
@cocotb.test()
async def test_empty_docstring(dut) -> None:
""""""
@cocotb.test(expect_fail=True)
async def test_pytest_raises_fail(dut):
with pytest.raises(AssertionError):
assert True
@cocotb.test(expect_fail=True)
async def test_pytest_warns_fail(dut):
def test_func():
pass
with pytest.warns(RuntimeWarning):
test_func()
@cocotb.test(expect_fail=True)
async def test_pytest_deprecated_call_fail(dut):
def test_func():
pass
with pytest.deprecated_call():
test_func()
@cocotb.test(expect_fail=True)
async def test_pytest_raises_fail_in_task(dut):
async def test_func():
with pytest.raises(AssertionError):
assert True
cocotb.start_soon(test_func())
await NullTrigger()
@cocotb.test(expect_fail=True)
async def test_pytest_warns_fail_in_task(dut):
def inner_func():
pass
async def test_func():
with pytest.warns(RuntimeWarning):
inner_func()
cocotb.start_soon(test_func())
await NullTrigger()
@cocotb.test(expect_fail=True)
async def test_pytest_deprecated_call_fail_in_task(dut):
def inner_func():
pass
async def test_func():
with pytest.deprecated_call():
inner_func()
cocotb.start_soon(test_func())
await NullTrigger()
@cocotb.test(expect_error=MyBaseException)
async def test_base_exception_expect_fail(dut):
raise MyBaseException
@cocotb.test(expect_error=MyBaseException)
async def test_base_exception_in_task_expect_fail(dut):
async def test_func():
raise MyBaseException
cocotb.start_soon(test_func())
await NullTrigger()
@cocotb.test
async def test_without_parenthesis(dut):
pass | null |
1,058 | from pathlib import Path
from typing import Callable
import pytest
from pydoctor import model
testpackages = Path(__file__).parent / 'testpackages'
def processPackage(packname: str, systemcls: Callable[[], model.System] = model.System) -> model.System:
system = systemcls()
builder = system.systemBuilder(system)
builder.addModule(testpackages / packname)
builder.buildModules()
return system
def test_relative_import() -> None:
system = processPackage("relativeimporttest")
cls = system.allobjects['relativeimporttest.mod1.C']
assert isinstance(cls, model.Class)
assert cls.bases == ['relativeimporttest.mod2.B']
def test_package_docstring() -> None:
system = processPackage("relativeimporttest")
assert system.allobjects['relativeimporttest'].docstring == "DOCSTRING"
def METHOD_NAME() -> None:
# well, basically the test is that this doesn't explode:
system = processPackage("modnamedafterbuiltin")
# but let's test _something_
dict_class = system.allobjects['modnamedafterbuiltin.mod.Dict']
assert isinstance(dict_class, model.Class)
assert dict_class.baseobjects == [None]
def test_nestedconfusion() -> None:
system = processPackage("nestedconfusion")
A = system.allobjects['nestedconfusion.mod.nestedconfusion.A']
assert isinstance(A, model.Class)
C = system.allobjects['nestedconfusion.mod.C']
assert A.baseobjects[0] is C
def test_importingfrompackage() -> None:
system = processPackage("importingfrompackage")
system.getProcessedModule('importingfrompackage.mod')
submod = system.allobjects['importingfrompackage.subpack.submod']
assert isinstance(submod, model.Module)
assert submod.state is model.ProcessingState.PROCESSED
def test_allgames() -> None:
"""
Test reparenting of documentables.
A name which is defined in module 1, but included in __all__ of module 2
that it is imported into, should end up in the documentation of module 2.
"""
system = processPackage("allgames")
mod1 = system.allobjects['allgames.mod1']
assert isinstance(mod1, model.Module)
mod2 = system.allobjects['allgames.mod2']
assert isinstance(mod2, model.Module)
# InSourceAll is not moved into mod2, but NotInSourceAll is.
assert 'InSourceAll' in mod1.contents
assert 'NotInSourceAll' in mod2.contents
# Source paths must be unaffected by the move, so that error messages
# point to the right source code.
moved = mod2.contents['NotInSourceAll']
assert isinstance(moved, model.Class)
assert moved.source_path is not None
assert moved.source_path.parts[-2:] == ('allgames', 'mod1.py')
assert moved.parentMod is mod2
assert moved.parentMod.source_path is not None
assert moved.parentMod.source_path.parts[-2:] == ('allgames', 'mod2.py')
def test_cyclic_imports() -> None:
"""
Test whether names are resolved correctly when we have import cycles.
The test package contains module 'a' that defines class 'A' and module 'b'
that defines class 'B'; each module imports the other. Since the test data
is symmetrical, we will at some point be importing a module that has not
been fully processed yet, no matter which module gets processed first.
"""
system = processPackage('cyclic_imports')
mod_a = system.allobjects['cyclic_imports.a']
assert mod_a.expandName('B') == 'cyclic_imports.b.B'
mod_b = system.allobjects['cyclic_imports.b']
assert mod_b.expandName('A') == 'cyclic_imports.a.A'
def test_package_module_name_clash() -> None:
"""
When a module and a package have the same full name, the package wins.
"""
system = processPackage('package_module_name_clash')
pack = system.allobjects['package_module_name_clash.pack']
assert 'package' == pack.contents.popitem()[0]
def test_reparented_module() -> None:
"""
A module that is imported in a package as a different name and exported
in that package under the new name via C{__all__} is presented using the
new name.
"""
system = processPackage('reparented_module')
mod = system.allobjects['reparented_module.module']
top = system.allobjects['reparented_module']
assert mod.fullName() == 'reparented_module.module'
assert top.resolveName('module') is top.contents['module']
assert top.resolveName('module.f') is mod.contents['f']
# The module old name is not in allobjects
assert 'reparented_module.mod' not in system.allobjects
# But can still be resolved with it's old name
assert top.resolveName('mod') is top.contents['module']
def test_reparenting_follows_aliases() -> None:
"""
Test for https://github.com/twisted/pydoctor/issues/505
Reparenting process follows aliases.
"""
system = processPackage('reparenting_follows_aliases')
# reparenting_follows_aliases.main: imports MyClass from ._myotherthing and re-export it in it's __all__ variable.
# reparenting_follows_aliases._mything: defines class MyClass.
# reparenting_follows_aliases._myotherthing: imports class MyClass from ._mything, but do not export it.
# Test that we do not get KeyError
klass = system.allobjects['reparenting_follows_aliases.main.MyClass']
# Test older names still resolves to reparented object
top = system.allobjects['reparenting_follows_aliases']
myotherthing = top.contents['_myotherthing']
mything = top.contents['_mything']
assert isinstance(mything, model.Module)
assert isinstance(myotherthing, model.Module)
assert mything._localNameToFullName('MyClass') == 'reparenting_follows_aliases.main.MyClass'
assert myotherthing._localNameToFullName('MyClass') == 'reparenting_follows_aliases._mything.MyClass'
system.find_object('reparenting_follows_aliases._mything.MyClass') == klass
# This part of the test cannot pass for now since we don't recursively resolve aliases.
# See https://github.com/twisted/pydoctor/pull/414 and https://github.com/twisted/pydoctor/issues/430
try:
assert system.find_object('reparenting_follows_aliases._myotherthing.MyClass') == klass
assert myotherthing.resolveName('MyClass') == klass
assert mything.resolveName('MyClass') == klass
assert top.resolveName('_myotherthing.MyClass') == klass
assert top.resolveName('_mything.MyClass') == klass
except (AssertionError, LookupError):
return
else:
raise AssertionError("Congratulation!")
@pytest.mark.parametrize('modname', ['reparenting_crash','reparenting_crash_alt'])
def test_reparenting_crash(modname: str) -> None:
"""
Test for https://github.com/twisted/pydoctor/issues/513
"""
system = processPackage(modname)
mod = system.allobjects[modname]
assert isinstance(mod.contents[modname], model.Class)
assert isinstance(mod.contents['reparented_func'], model.Function)
assert isinstance(mod.contents[modname].contents['reparented_func'], model.Function) | null |
1,059 | import pytest
import py, sys, os
pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
def test_waitfinish_removes_tempdir():
ff = py.process.ForkedFunc(boxf1)
assert ff.tempdir.check()
ff.waitfinish()
assert not ff.tempdir.check()
def test_tempdir_gets_gc_collected(monkeypatch):
monkeypatch.setattr(os, 'fork', lambda: os.getpid())
ff = py.process.ForkedFunc(boxf1)
assert ff.tempdir.check()
ff.__del__()
assert not ff.tempdir.check()
def METHOD_NAME():
result = py.process.ForkedFunc(boxf1).waitfinish()
assert result.out == "some out\n"
assert result.err == "some err\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 1
def test_exitstatus():
def func():
os._exit(4)
result = py.process.ForkedFunc(func).waitfinish()
assert result.exitstatus == 4
assert result.signal == 0
assert not result.out
assert not result.err
def test_execption_in_func():
def fun():
raise ValueError(42)
ff = py.process.ForkedFunc(fun)
result = ff.waitfinish()
assert result.exitstatus == ff.EXITSTATUS_EXCEPTION
assert result.err.find("ValueError: 42") != -1
assert result.signal == 0
assert not result.retval
def test_forkedfunc_on_fds():
result = py.process.ForkedFunc(boxf2).waitfinish()
assert result.out == "someout"
assert result.err == "someerr"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 2
def test_forkedfunc_on_fds_output():
result = py.process.ForkedFunc(boxf3).waitfinish()
assert result.signal == 11
assert result.out == "s"
def test_forkedfunc_on_stdout():
def boxf3():
import sys
sys.stdout.write("hello\n")
os.kill(os.getpid(), 11)
result = py.process.ForkedFunc(boxf3).waitfinish()
assert result.signal == 11
assert result.out == "hello\n"
def test_forkedfunc_signal():
result = py.process.ForkedFunc(boxseg).waitfinish()
assert result.retval is None
assert result.signal == 11
def test_forkedfunc_huge_data():
result = py.process.ForkedFunc(boxhuge).waitfinish()
assert result.out
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 3
def test_box_seq():
# we run many boxes with huge data, just one after another
for i in range(50):
result = py.process.ForkedFunc(boxhuge).waitfinish()
assert result.out
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 3
def test_box_in_a_box():
def boxfun():
result = py.process.ForkedFunc(boxf2).waitfinish()
print (result.out)
sys.stderr.write(result.err + "\n")
return result.retval
result = py.process.ForkedFunc(boxfun).waitfinish()
assert result.out == "someout\n"
assert result.err == "someerr\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 2
def test_kill_func_forked():
class A:
pass
info = A()
import time
def box_fun():
time.sleep(10) # we don't want to last forever here
ff = py.process.ForkedFunc(box_fun)
os.kill(ff.pid, 15)
result = ff.waitfinish()
assert result.signal == 15
def test_hooks(monkeypatch):
def _boxed():
return 1
def _on_start():
sys.stdout.write("some out\n")
sys.stdout.flush()
def _on_exit():
sys.stderr.write("some err\n")
sys.stderr.flush()
result = py.process.ForkedFunc(_boxed, child_on_start=_on_start,
child_on_exit=_on_exit).waitfinish()
assert result.out == "some out\n"
assert result.err == "some err\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 1
# ======================================================================
# examples
# ======================================================================
#
def boxf1():
sys.stdout.write("some out\n")
sys.stderr.write("some err\n")
return 1
def boxf2():
os.write(1, "someout".encode('ascii'))
os.write(2, "someerr".encode('ascii'))
return 2
def boxf3():
os.write(1, "s".encode('ascii'))
os.kill(os.getpid(), 11)
def boxseg():
os.kill(os.getpid(), 11)
def boxhuge():
s = " ".encode('ascii')
os.write(1, s * 10000)
os.write(2, s * 10000)
os.write(1, s * 10000)
os.write(1, s * 10000)
os.write(2, s * 10000)
os.write(2, s * 10000)
os.write(1, s * 10000)
return 3 | null |
1,060 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoceanbasepro.endpoint import endpoint_data
class DescribeTopSQLListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OceanBasePro', '2019-09-01', 'DescribeTopSQLList','oceanbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def METHOD_NAME(self): # String
return self.get_body_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_body_params('StartTime', StartTime)
def get_PageNumber(self): # Integer
return self.get_body_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_body_params('PageNumber', PageNumber)
def get_SearchRule(self): # String
return self.get_body_params().get('SearchRule')
def set_SearchRule(self, SearchRule): # String
self.add_body_params('SearchRule', SearchRule)
def get_TenantId(self): # String
return self.get_body_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_body_params('TenantId', TenantId)
def get_PageSize(self): # Integer
return self.get_body_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_body_params('PageSize', PageSize)
def get_SearchParameter(self): # String
return self.get_body_params().get('SearchParameter')
def set_SearchParameter(self, SearchParameter): # String
self.add_body_params('SearchParameter', SearchParameter)
def get_SortOrder(self): # String
return self.get_body_params().get('SortOrder')
def set_SortOrder(self, SortOrder): # String
self.add_body_params('SortOrder', SortOrder)
def get_SearchValue(self): # String
return self.get_body_params().get('SearchValue')
def set_SearchValue(self, SearchValue): # String
self.add_body_params('SearchValue', SearchValue)
def get_SQLId(self): # String
return self.get_body_params().get('SQLId')
def set_SQLId(self, SQLId): # String
self.add_body_params('SQLId', SQLId)
def get_FilterCondition(self): # String
return self.get_body_params().get('FilterCondition')
def set_FilterCondition(self, FilterCondition): # String
self.add_body_params('FilterCondition', FilterCondition)
def get_EndTime(self): # String
return self.get_body_params().get('EndTime')
def set_EndTime(self, EndTime): # String
self.add_body_params('EndTime', EndTime)
def get_NodeIp(self): # String
return self.get_body_params().get('NodeIp')
def set_NodeIp(self, NodeIp): # String
self.add_body_params('NodeIp', NodeIp)
def get_DbName(self): # String
return self.get_body_params().get('DbName')
def set_DbName(self, DbName): # String
self.add_body_params('DbName', DbName)
def get_SearchKeyWord(self): # String
return self.get_body_params().get('SearchKeyWord')
def set_SearchKeyWord(self, SearchKeyWord): # String
self.add_body_params('SearchKeyWord', SearchKeyWord)
def get_SortColumn(self): # String
return self.get_body_params().get('SortColumn')
def set_SortColumn(self, SortColumn): # String
self.add_body_params('SortColumn', SortColumn) | null |
1,061 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class CreateGWSInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'CreateGWSInstance')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ImageId(self): # String
return self.get_query_params().get('ImageId')
def set_ImageId(self, ImageId): # String
self.add_query_param('ImageId', ImageId)
def get_AllocatePublicAddress(self): # Boolean
return self.get_query_params().get('AllocatePublicAddress')
def set_AllocatePublicAddress(self, AllocatePublicAddress): # Boolean
self.add_query_param('AllocatePublicAddress', AllocatePublicAddress)
def get_AppList(self): # String
return self.get_query_params().get('AppList')
def set_AppList(self, AppList): # String
self.add_query_param('AppList', AppList)
def get_InternetMaxBandwidthOut(self): # Integer
return self.get_query_params().get('InternetMaxBandwidthOut')
def set_InternetMaxBandwidthOut(self, InternetMaxBandwidthOut): # Integer
self.add_query_param('InternetMaxBandwidthOut', InternetMaxBandwidthOut)
def get_SystemDiskCategory(self): # String
return self.get_query_params().get('SystemDiskCategory')
def set_SystemDiskCategory(self, SystemDiskCategory): # String
self.add_query_param('SystemDiskCategory', SystemDiskCategory)
def get_SystemDiskSize(self): # Integer
return self.get_query_params().get('SystemDiskSize')
def set_SystemDiskSize(self, SystemDiskSize): # Integer
self.add_query_param('SystemDiskSize', SystemDiskSize)
def METHOD_NAME(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_Period(self): # String
return self.get_query_params().get('Period')
def set_Period(self, Period): # String
self.add_query_param('Period', Period)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_WorkMode(self): # String
return self.get_query_params().get('WorkMode')
def set_WorkMode(self, WorkMode): # String
self.add_query_param('WorkMode', WorkMode)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_PeriodUnit(self): # String
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self, PeriodUnit): # String
self.add_query_param('PeriodUnit', PeriodUnit)
def get_AutoRenew(self): # Boolean
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self, AutoRenew): # Boolean
self.add_query_param('AutoRenew', AutoRenew)
def get_InternetChargeType(self): # String
return self.get_query_params().get('InternetChargeType')
def set_InternetChargeType(self, InternetChargeType): # String
self.add_query_param('InternetChargeType', InternetChargeType)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_InternetMaxBandwidthIn(self): # Integer
return self.get_query_params().get('InternetMaxBandwidthIn')
def set_InternetMaxBandwidthIn(self, InternetMaxBandwidthIn): # Integer
self.add_query_param('InternetMaxBandwidthIn', InternetMaxBandwidthIn) | null |
1,062 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
import json
class DescribePriceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribePrice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DBInstanceStorage(self): # Integer
return self.get_query_params().get('DBInstanceStorage')
def set_DBInstanceStorage(self, DBInstanceStorage): # Integer
self.add_query_param('DBInstanceStorage', DBInstanceStorage)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_Engine(self): # String
return self.get_query_params().get('Engine')
def set_Engine(self, Engine): # String
self.add_query_param('Engine', Engine)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_DBInstanceStorageType(self): # String
return self.get_query_params().get('DBInstanceStorageType')
def set_DBInstanceStorageType(self, DBInstanceStorageType): # String
self.add_query_param('DBInstanceStorageType', DBInstanceStorageType)
def get_Quantity(self): # Integer
return self.get_query_params().get('Quantity')
def set_Quantity(self, Quantity): # Integer
self.add_query_param('Quantity', Quantity)
def get_ServerlessConfig(self): # Struct
return self.get_query_params().get('ServerlessConfig')
def set_ServerlessConfig(self, ServerlessConfig): # Struct
self.add_query_param("ServerlessConfig", json.dumps(ServerlessConfig))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def METHOD_NAME(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_CommodityCode(self): # String
return self.get_query_params().get('CommodityCode')
def set_CommodityCode(self, CommodityCode): # String
self.add_query_param('CommodityCode', CommodityCode)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_UsedTime(self): # Integer
return self.get_query_params().get('UsedTime')
def set_UsedTime(self, UsedTime): # Integer
self.add_query_param('UsedTime', UsedTime)
def get_DBInstanceClass(self): # String
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self, DBInstanceClass): # String
self.add_query_param('DBInstanceClass', DBInstanceClass)
def get_InstanceUsedType(self): # Integer
return self.get_query_params().get('InstanceUsedType')
def set_InstanceUsedType(self, InstanceUsedType): # Integer
self.add_query_param('InstanceUsedType', InstanceUsedType)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_TimeType(self): # String
return self.get_query_params().get('TimeType')
def set_TimeType(self, TimeType): # String
self.add_query_param('TimeType', TimeType)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType)
def get_DBNode(self): # String
return self.get_query_params().get('DBNode')
def set_DBNode(self, DBNode): # String
self.add_query_param('DBNode', DBNode)
def get_OrderType(self): # String
return self.get_query_params().get('OrderType')
def set_OrderType(self, OrderType): # String
self.add_query_param('OrderType', OrderType) | null |
1,063 | import utils
import os
import unittest
try:
import urllib.request as urllib2
except ImportError:
import urllib2
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
utils.set_search_paths(TOPDIR)
import ihm.reference
class Tests(unittest.TestCase):
def test_reference(self):
"""Test Reference base class"""
_ = ihm.reference.Reference() # noop
def test_sequence(self):
"""Test Sequence class"""
s = ihm.reference.Sequence(
db_name='testdb', db_code='testcode', accession='testacc',
sequence='CCCG', details='foo')
s.alignments.append(ihm.reference.Alignment(
db_begin=10, db_end=30, entity_begin=20, entity_end=40))
self.assertEqual(s.db_name, 'testdb')
self.assertEqual(s.db_code, 'testcode')
self.assertEqual(s.accession, 'testacc')
self.assertEqual(s.sequence, 'CCCG')
self.assertEqual(s.details, 'foo')
a, = s._get_alignments()
self.assertEqual(a.db_begin, 10)
self.assertEqual(a.db_end, 30)
self.assertEqual(a.entity_begin, 20)
self.assertEqual(a.entity_end, 40)
self.assertEqual(a.seq_dif, [])
def test_sequence_default_alignment(self):
"""Test Sequence class with default Alignment"""
s = ihm.reference.Sequence(
db_name='testdb', db_code='testcode',
accession='testacc', sequence='CCCG', details='foo')
self.assertEqual(s.db_name, 'testdb')
self.assertEqual(s.db_code, 'testcode')
self.assertEqual(s.accession, 'testacc')
self.assertEqual(s.sequence, 'CCCG')
self.assertEqual(s.details, 'foo')
a1, = s._get_alignments()
a1a, = s._get_alignments()
# should get same default alignment each time (get cache 2nd time)
self.assertEqual(id(a1), id(a1a))
self.assertEqual(a1.db_begin, 1)
self.assertIsNone(a1.db_end)
self.assertEqual(a1.entity_begin, 1)
self.assertIsNone(a1.entity_end)
self.assertEqual(a1.seq_dif, [])
def test_uniprot_sequence(self):
"""Test UniProtSequence class"""
lpep = ihm.LPeptideAlphabet()
sd = ihm.reference.SeqDif(seq_id=1, db_monomer=lpep['C'],
monomer=lpep['W'], details='Test mutation')
s = ihm.reference.UniProtSequence(
db_code='testcode', accession='testacc', sequence='CCCG')
s.alignments.append(ihm.reference.Alignment(seq_dif=[sd]))
self.assertEqual(s.db_name, 'UNP')
self.assertEqual(s.db_code, 'testcode')
self.assertEqual(s.accession, 'testacc')
self.assertEqual(s.sequence, 'CCCG')
self.assertIsNone(s.details)
a, = s.alignments
self.assertEqual(len(a.seq_dif), 1)
self.assertEqual(a.seq_dif[0].seq_id, 1)
self.assertEqual(a.seq_dif[0].db_monomer.id, 'CYS')
self.assertEqual(a.seq_dif[0].monomer.id, 'TRP')
self.assertEqual(a.seq_dif[0].details, 'Test mutation')
def _get_from_uniprot_accession(self, fasta_fname):
def METHOD_NAME(url):
self.assertTrue(url.endswith('/testacc.fasta'))
fname = utils.get_input_file_name(TOPDIR, fasta_fname)
return open(fname, 'rb')
# Need to mock out urllib2 so we don't hit the network (expensive)
# every time we test
try:
orig_urlopen = urllib2.urlopen
urllib2.urlopen = METHOD_NAME
return ihm.reference.UniProtSequence.from_accession('testacc')
finally:
urllib2.urlopen = orig_urlopen
def test_uniprot_sequence_from_accession(self):
"""Test UniProtSequence.from_accession()"""
r = self._get_from_uniprot_accession('P52891.fasta')
self.assertIsInstance(r, ihm.reference.UniProtSequence)
self.assertEqual(r.db_code, 'NUP84_YEAST')
self.assertEqual(r.accession, 'testacc')
self.assertEqual(len(r.sequence), 726)
self.assertEqual(r.sequence[:20], 'MELSPTYQTERFTKFSDTLK')
self.assertEqual(
r.details,
'Nucleoporin NUP84 OS=Saccharomyces cerevisiae (strain ATCC '
'204508 / S288c) OX=559292 GN=NUP84 PE=1 SV=1')
def test_uniprot_sequence_from_accession_bad_header(self):
"""Test UniProtSequence.from_accession() with bad header"""
self.assertRaises(ValueError, self._get_from_uniprot_accession,
'uniprot_bad_header.fasta')
def test_uniprot_sequence_from_accession_no_details(self):
"""Test UniProtSequence.from_accession() with no details"""
r = self._get_from_uniprot_accession('uniprot_no_details.fasta')
self.assertIsInstance(r, ihm.reference.UniProtSequence)
self.assertEqual(r.db_code, 'NUP84_YEAST')
self.assertEqual(r.accession, 'testacc')
self.assertEqual(len(r.sequence), 726)
self.assertEqual(r.sequence[:20], 'MELSPTYQTERFTKFSDTLK')
self.assertIsNone(r.details)
if __name__ == '__main__':
unittest.main() | null |
1,064 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
Generate C code from the router schema.
"""
import re
from skupper_router_internal.management.schema import EnumType
from skupper_router_internal.management.qdrouter import QdSchema
copyright = """/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
"""
class Generator:
def __init__(self):
self.schema = QdSchema()
self.prefix = ['qd_schema']
self.generate_enums()
def header(self, name, text):
with open(name + '.h', 'w') as f:
f.write("#ifndef __%s_h__\n#define __%s_h__\n" % (name, name) + copyright + text + "\n#endif\n")
def source(self, name, text):
with open(name + '.c', 'w') as f:
f.write(copyright + text)
def METHOD_NAME(self, name): return re.sub(r'\W', '_', name)
def underscore(self, names): return '_'.join([self.METHOD_NAME(name) for name in names])
def prefix_name(self, names): return self.underscore(self.prefix + names)
def type_name(self, names): return self.prefix_name(names + ['t'])
class EnumGenerator:
def __init__(self, generator, entity, attribute):
self.generator, self.entity, self.attribute = generator, entity, attribute
self.tags = attribute.atype.tags
self.type_name = generator.type_name([entity.short_name, attribute.name])
self.array = self.generator.prefix_name([entity.short_name, attribute.name, 'names'])
self.count = self.name('ENUM_COUNT')
def name(self, tag):
return self.generator.prefix_name([self.entity.short_name, self.attribute.name, tag]).upper()
def decl(self):
tags = self.tags + ['ENUM_COUNT']
return "typedef enum {\n" + \
",\n".join([" " + self.name(tag) for tag in tags]) + \
"\n} %s;\n\n" % self.type_name + \
"extern const char *%s[%s];\n\n" % (self.array, self.count)
def defn(self):
return "const char *%s[%s] = {\n" % (self.array, self.count) + \
",\n".join(' "%s"' % (self.name(tag)) for tag in self.tags) + \
"\n};\n\n"
def generate_enums(self):
enums = [self.EnumGenerator(self, entity, attribute)
for entity in self.schema.entity_types.values()
for attribute in entity.attributes.values()
if isinstance(attribute.atype, EnumType)]
self.header('schema_enum', '\n'.join(e.decl() for e in enums))
self.source('schema_enum', '#include "schema_enum.h"\n\n' + '\n'.join(e.defn() for e in enums))
if __name__ == '__main__':
Generator() | null |
1,065 | import furl
import threading
from django.utils import timezone
from website import mails, settings
from osf.models import PreprintProvider
from website.settings import DOMAIN, CAMPAIGN_REFRESH_THRESHOLD
from website.util.metrics import OsfSourceTags, OsfClaimedTags, CampaignSourceTags, CampaignClaimedTags, provider_source_tag
from framework.utils import throttle_period_expired
mutex = threading.Lock()
CAMPAIGNS = None
CAMPAIGNS_LAST_REFRESHED = timezone.now()
def get_campaigns():
global CAMPAIGNS
global CAMPAIGNS_LAST_REFRESHED
if not CAMPAIGNS or (not mutex.locked() and throttle_period_expired(CAMPAIGNS_LAST_REFRESHED, CAMPAIGN_REFRESH_THRESHOLD)):
with mutex:
newest_campaigns = {
'erpc': {
'system_tag': CampaignSourceTags.ErpChallenge.value,
'redirect_url': furl.furl(DOMAIN).add(path='erpc/').url,
'confirmation_email_template': mails.CONFIRM_EMAIL_ERPC,
'login_type': 'native',
},
}
# Institution Login
newest_campaigns.update({
'institution': {
'system_tag': 'institution_campaign',
'redirect_url': '',
'login_type': 'institution',
},
})
# Proxy campaigns: Preprints, both OSF and branded ones
preprint_providers = PreprintProvider.objects.all()
for provider in preprint_providers:
if provider._id == 'osf':
template = 'osf'
name = 'OSF'
url_path = 'preprints/'
external_url = None
else:
template = 'branded'
name = provider.name
url_path = 'preprints/{}'.format(provider._id)
external_url = provider.domain
campaign = '{}-preprints'.format(provider._id)
system_tag = provider_source_tag(provider._id, 'preprint')
newest_campaigns.update({
campaign: {
'system_tag': system_tag,
'redirect_url': furl.furl(DOMAIN).add(path=url_path).url,
'external_url': external_url,
'confirmation_email_template': mails.CONFIRM_EMAIL_PREPRINTS(template, name),
'login_type': 'proxy',
'provider': name,
'logo': provider._id if name != 'OSF' else settings.OSF_PREPRINTS_LOGO,
}
})
# Proxy campaigns: Registries, OSF only
# TODO: refactor for futher branded registries when there is a model for registries providers
newest_campaigns.update({
'osf-registries': {
'system_tag': provider_source_tag('osf', 'registry'),
'redirect_url': furl.furl(DOMAIN).add(path='registries/').url,
'confirmation_email_template': mails.CONFIRM_EMAIL_REGISTRIES_OSF,
'login_type': 'proxy',
'provider': 'osf',
'logo': settings.OSF_REGISTRIES_LOGO
}
})
newest_campaigns.update({
'osf-registered-reports': {
'system_tag': CampaignSourceTags.OsfRegisteredReports.value,
'redirect_url': furl.furl(DOMAIN).add(path='rr/').url,
'confirmation_email_template': mails.CONFIRM_EMAIL_REGISTRIES_OSF,
'login_type': 'proxy',
'provider': 'osf',
'logo': settings.OSF_REGISTRIES_LOGO
}
})
CAMPAIGNS = newest_campaigns
CAMPAIGNS_LAST_REFRESHED = timezone.now()
return CAMPAIGNS
def METHOD_NAME(campaign):
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('system_tag')
return None
def email_template_for_campaign(campaign):
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('confirmation_email_template')
return None
def campaign_for_user(user):
campaigns = get_campaigns()
for campaign, config in campaigns.items():
if config.get('system_tag') in user.system_tags:
return campaign
return None
def is_institution_login(campaign):
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('login_type') == 'institution'
return None
def is_native_login(campaign):
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('login_type') == 'native'
return None
def is_proxy_login(campaign):
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('login_type') == 'proxy'
return None
def get_campaign_logo(campaign):
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('logo', None)
return None
def get_service_provider(campaign):
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('provider')
return None
def campaign_url_for(campaign):
"""
Return the campaign's URL on OSF domain.
:param campaign: the campaign
:return: the url
"""
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('redirect_url')
return None
def external_campaign_url_for(campaign):
"""
Return the campaign's URL on Non-OSF domain, which is available for phase 2 branded preprints only.
:param campaign: the campaign
:return: the external url if the campaign is hosted on Non-OSF domain, None otherwise
"""
campaigns = get_campaigns()
if campaign in campaigns:
return campaigns.get(campaign).get('external_url')
return None
def get_external_domains():
"""
Return a list of trusted external domains for all eligible campaigns.
"""
campaigns = get_campaigns()
external_domains = []
for campaign, config in campaigns.items():
external_url = config.get('external_url', None)
if external_url:
external_domains.append(external_url)
return external_domains
NODE_SOURCE_TAG_CLAIMED_TAG_RELATION = {
CampaignSourceTags.ErpChallenge.value: CampaignClaimedTags.ErpChallenge.value,
CampaignSourceTags.OsfRegisteredReports.value: CampaignClaimedTags.OsfRegisteredReports.value,
CampaignSourceTags.Osf4m.value: CampaignClaimedTags.Osf4m.value,
OsfSourceTags.Osf.value: OsfClaimedTags.Osf.value,
} | null |
1,066 | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Helper functions to format output for CLI."""
from typing import Dict, Iterable, Iterator, Optional, Sequence, Tuple
from click import formatting
from click._compat import term_len
def METHOD_NAME(rows: Iterable[Tuple[str, ...]]) -> Tuple[int, ...]:
"""Measure the width of each column in a table.
Args:
rows: The rows of the table.
Returns:
A tuple of the width of each column.
"""
widths: Dict[int, int] = {}
for row in rows:
for idx, col in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple(y for x, y in sorted(widths.items()))
def iter_rows(
rows: Iterable[Tuple[str, ...]],
col_count: int,
) -> Iterator[Tuple[str, ...]]:
"""Iterate over rows of a table.
Args:
rows: The rows of the table.
col_count: The number of columns in the table.
Yields:
An iterator over the rows of the table.
"""
for row in rows:
yield row + ("",) * (col_count - len(row))
class ZenFormatter(formatting.HelpFormatter):
"""Override the default HelpFormatter to add a custom format for the help command output."""
def __init__(
self,
indent_increment: int = 2,
width: Optional[int] = None,
max_width: Optional[int] = None,
) -> None:
"""Initialize the formatter.
Args:
indent_increment: The number of spaces to indent each level of
nesting.
width: The maximum width of the help output.
max_width: The maximum width of the help output.
"""
super(ZenFormatter, self).__init__(indent_increment, width, max_width)
self.current_indent = 0
def write_dl(
self,
rows: Sequence[Tuple[str, ...]],
col_max: int = 30,
col_spacing: int = 2,
) -> None:
"""Writes a definition list into the buffer.
This is how options and commands are usually formatted.
Arguments:
rows: a list of items as tuples for the terms and values.
col_max: the maximum width of the first column.
col_spacing: the number of spaces between the first and
second column (and third).
The default behavior is to format the rows in a definition list
with rows of 2 columns following the format ``(term, value)``.
But for new CLI commands, we want to format the rows in a definition
list with rows of 3 columns following the format
``(term, value, description)``.
Raises:
TypeError: if the number of columns is not 2 or 3.
"""
rows = list(rows)
widths = METHOD_NAME(rows)
if len(widths) == 2:
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write(f"{'':>{self.current_indent}}{first}")
if not second:
self.write("\n")
continue
if term_len(first) <= first_col - col_spacing:
self.write(" " * (first_col - term_len(first)))
else:
self.write("\n")
self.write(" " * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
wrapped_text = formatting.wrap_text(
second, text_width, preserve_paragraphs=True
)
lines = wrapped_text.splitlines()
if lines:
self.write(f"{lines[0]}\n")
for line in lines[1:]:
self.write(
f"{'':>{first_col + self.current_indent}}{line}\n"
)
else:
self.write("\n")
elif len(widths) == 3:
first_col = min(widths[0], col_max) + col_spacing
second_col = min(widths[1], col_max) + col_spacing * 2
current_tag = None
for first, second, third in iter_rows(rows, len(widths)):
if current_tag != first:
current_tag = first
self.write("\n")
# Adding [#431d93] [/#431d93] makes the tag colorful when
# it is printed by rich print
self.write(
f"[#431d93]{'':>{self.current_indent}}{first}:[/#431d93]\n"
)
if not third:
self.write("\n")
continue
if term_len(first) <= first_col - col_spacing:
self.write(" " * self.current_indent * 2)
else:
self.write("\n")
self.write(" " * (first_col + self.current_indent))
self.write(f"{'':>{self.current_indent}}{second}")
text_width = max(self.width - second_col - 4, 10)
wrapped_text = formatting.wrap_text(
third, text_width, preserve_paragraphs=True
)
lines = wrapped_text.splitlines()
if lines:
self.write(
" "
* (second_col - term_len(second) + self.current_indent)
)
self.write(f"{lines[0]}\n")
for line in lines[1:]:
self.write(
f"{'':>{second_col + self.current_indent * 4 }}{line}\n"
)
else:
self.write("\n")
else:
raise TypeError(
"Expected either three or two columns for definition list"
) | null |
1,067 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkadb.endpoint import endpoint_data
class CreateDBClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'adb', '2019-03-15', 'CreateDBCluster','ads')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DBClusterDescription(self): # String
return self.get_query_params().get('DBClusterDescription')
def set_DBClusterDescription(self, DBClusterDescription): # String
self.add_query_param('DBClusterDescription', DBClusterDescription)
def get_StorageType(self): # String
return self.get_query_params().get('StorageType')
def set_StorageType(self, StorageType): # String
self.add_query_param('StorageType', StorageType)
def get_Mode(self): # String
return self.get_query_params().get('Mode')
def set_Mode(self, Mode): # String
self.add_query_param('Mode', Mode)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_Period(self): # String
return self.get_query_params().get('Period')
def set_Period(self, Period): # String
self.add_query_param('Period', Period)
def get_BackupSetID(self): # String
return self.get_query_params().get('BackupSetID')
def set_BackupSetID(self, BackupSetID): # String
self.add_query_param('BackupSetID', BackupSetID)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBNodeGroupCount(self): # String
return self.get_query_params().get('DBNodeGroupCount')
def set_DBNodeGroupCount(self, DBNodeGroupCount): # String
self.add_query_param('DBNodeGroupCount', DBNodeGroupCount)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_ComputeResource(self): # String
return self.get_query_params().get('ComputeResource')
def set_ComputeResource(self, ComputeResource): # String
self.add_query_param('ComputeResource', ComputeResource)
def get_ElasticIOResource(self): # String
return self.get_query_params().get('ElasticIOResource')
def set_ElasticIOResource(self, ElasticIOResource): # String
self.add_query_param('ElasticIOResource', ElasticIOResource)
def get_SourceDBInstanceName(self): # String
return self.get_query_params().get('SourceDBInstanceName')
def set_SourceDBInstanceName(self, SourceDBInstanceName): # String
self.add_query_param('SourceDBInstanceName', SourceDBInstanceName)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_StorageResource(self): # String
return self.get_query_params().get('StorageResource')
def set_StorageResource(self, StorageResource): # String
self.add_query_param('StorageResource', StorageResource)
def get_DBClusterCategory(self): # String
return self.get_query_params().get('DBClusterCategory')
def set_DBClusterCategory(self, DBClusterCategory): # String
self.add_query_param('DBClusterCategory', DBClusterCategory)
def get_DBClusterNetworkType(self): # String
return self.get_query_params().get('DBClusterNetworkType')
def set_DBClusterNetworkType(self, DBClusterNetworkType): # String
self.add_query_param('DBClusterNetworkType', DBClusterNetworkType)
def get_RestoreTime(self): # String
return self.get_query_params().get('RestoreTime')
def set_RestoreTime(self, RestoreTime): # String
self.add_query_param('RestoreTime', RestoreTime)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_DBClusterVersion(self): # String
return self.get_query_params().get('DBClusterVersion')
def set_DBClusterVersion(self, DBClusterVersion): # String
self.add_query_param('DBClusterVersion', DBClusterVersion)
def get_DBClusterClass(self): # String
return self.get_query_params().get('DBClusterClass')
def set_DBClusterClass(self, DBClusterClass): # String
self.add_query_param('DBClusterClass', DBClusterClass)
def get_UsedTime(self): # String
return self.get_query_params().get('UsedTime')
def set_UsedTime(self, UsedTime): # String
self.add_query_param('UsedTime', UsedTime)
def METHOD_NAME(self): # String
return self.get_query_params().get('RestoreType')
def set_RestoreType(self, RestoreType): # String
self.add_query_param('RestoreType', RestoreType)
def get_DBNodeStorage(self): # String
return self.get_query_params().get('DBNodeStorage')
def set_DBNodeStorage(self, DBNodeStorage): # String
self.add_query_param('DBNodeStorage', DBNodeStorage)
def get_ExecutorCount(self): # String
return self.get_query_params().get('ExecutorCount')
def set_ExecutorCount(self, ExecutorCount): # String
self.add_query_param('ExecutorCount', ExecutorCount)
def get_VPCId(self): # String
return self.get_query_params().get('VPCId')
def set_VPCId(self, VPCId): # String
self.add_query_param('VPCId', VPCId)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType) | null |
1,068 | # Copyright 2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import unittest
from sls.app.synchronizer import archiving_synchronizer_impl
from sls.app.synchronizer.archiving_synchronizer_impl import StorageLifecycleArchivingSynchronizer
from sls.cloud.model.cloud_object_model import CloudObject
from sls.app.model.config_model import SynchronizerConfig
from sls.pipelineapi.model.archive_rule_model import StorageLifecycleRule, StorageLifecycleRuleTransition, StorageLifecycleRuleExecution
from sls.util.logger import AppLogger
from tests.mock.cp_data_source_mock import MockCloudPipelineDataSource
class TestSynchronizerBuildsActionsForFiles(unittest.TestCase):
synchronizer = StorageLifecycleArchivingSynchronizer(None, None, None, None)
def test_build_actions_for_files_find_files_correctly_with_date_in_transition(self):
folder = "/datastorage"
now = datetime.datetime.now()
rule = StorageLifecycleRule(
1, 1, "/*",
archiving_synchronizer_impl.METHOD_LATEST_FILE,
transitions=[StorageLifecycleRuleTransition("GLACIER", transition_date=now.date())]
)
subject_files = [
CloudObject(os.path.join(folder, "file1.txt"), now, None),
CloudObject(os.path.join(folder, "file2.txt"), now - datetime.timedelta(days=1), None),
]
actions = self.synchronizer._build_action_items_for_files(folder, subject_files, rule)
self.assertEqual(
len(actions.destination_transitions_queues["GLACIER"]),
2
)
def test_build_actions_for_files_find_files_correctly_with_days_in_transition(self):
folder = "/datastorage"
now = datetime.datetime.now()
rule = StorageLifecycleRule(
1, 1, "/*",
archiving_synchronizer_impl.METHOD_LATEST_FILE,
transitions=[StorageLifecycleRuleTransition("GLACIER", transition_after_days=1)]
)
subject_files = [
CloudObject(os.path.join(folder, "file1.txt"), now, None),
CloudObject(os.path.join(folder, "file2.txt"), now - datetime.timedelta(days=1), None),
]
actions = self.synchronizer._build_action_items_for_files(folder, subject_files, rule)
self.assertEqual(
len(actions.destination_transitions_queues["GLACIER"]),
1
)
class TestSynchronizerCheckRuleExecutionProgress(unittest.TestCase):
folder = "/datastorage"
now = datetime.datetime.now(datetime.timezone.utc)
synchronizer = \
StorageLifecycleArchivingSynchronizer(
SynchronizerConfig(command="archive"), MockCloudPipelineDataSource(), None, AppLogger("archive"))
def test_check_rule_execution_progress_still_running(self):
subject_files = {
"GLACIER":
[
CloudObject(os.path.join(self.folder, "file1.txt"), self.now - datetime.timedelta(days=3), "STANDARD"),
CloudObject(os.path.join(self.folder, "file2.txt"), self.now - datetime.timedelta(days=3), "STANDARD")
]
}
transition = StorageLifecycleRuleTransition("GLACIER", transition_after_days=0)
execution = StorageLifecycleRuleExecution(
1, 1, archiving_synchronizer_impl.EXECUTION_RUNNING_STATUS, self.folder,
"GLACIER", self.now
)
self.assertIsNone(self.synchronizer._check_rule_execution_progress(1, transition, subject_files, execution))
def test_check_rule_execution_progress_running_overdue(self):
subject_files = {
"GLACIER":
[
CloudObject(os.path.join(self.folder, "file1.txt"), self.now - datetime.timedelta(days=4), "STANDARD"),
CloudObject(os.path.join(self.folder, "file2.txt"), self.now - datetime.timedelta(days=4), "STANDARD")
]
}
transition = StorageLifecycleRuleTransition("GLACIER", transition_after_days=0)
execution = StorageLifecycleRuleExecution(
1, 1, archiving_synchronizer_impl.EXECUTION_RUNNING_STATUS, self.folder,
"GLACIER", self.now - datetime.timedelta(days=3)
)
updated_execution = self.synchronizer._check_rule_execution_progress(1, transition, subject_files, execution)
self.assertEqual(archiving_synchronizer_impl.EXECUTION_FAILED_STATUS, updated_execution.status)
def test_check_rule_execution_progress_running_should_succeed(self):
subject_files = {"GLACIER": []}
transition = StorageLifecycleRuleTransition("GLACIER", transition_after_days=0)
execution = StorageLifecycleRuleExecution(
1, 1, archiving_synchronizer_impl.EXECUTION_RUNNING_STATUS, self.folder,
"GLACIER", self.now - datetime.timedelta(days=2)
)
updated_execution = self.synchronizer._check_rule_execution_progress(1, transition, subject_files, execution)
self.assertEqual(archiving_synchronizer_impl.EXECUTION_SUCCESS_STATUS, updated_execution.status)
def METHOD_NAME(self):
subject_files = {
"GLACIER":
[
CloudObject(os.path.join(self.folder, "file1.txt"), self.now, "STANDARD"),
CloudObject(os.path.join(self.folder, "file2.txt"), self.now, "STANDARD")
]
}
transition = StorageLifecycleRuleTransition("GLACIER", transition_after_days=0)
execution = StorageLifecycleRuleExecution(
1, 1, archiving_synchronizer_impl.EXECUTION_RUNNING_STATUS, self.folder,
"GLACIER", self.now - datetime.timedelta(days=2)
)
updated_execution = self.synchronizer._check_rule_execution_progress(1, transition, subject_files, execution)
self.assertEqual(archiving_synchronizer_impl.EXECUTION_SUCCESS_STATUS, updated_execution.status)
if __name__ == '__main__':
unittest.main() | null |
1,069 | #!/usr/bin/python
import os
import sys
import re
import string
import infXpostFx
def split_line(line):
# return a tuple (loop invariant) , (![...] and the rest)
s= line.split(",",1)
s1 = s[1].strip("\n")
s1 = s1.replace(").","")
#print s
return (s[0],s1)
def METHOD_NAME(str): # swaps the postitions ( eg: X2 : integer => integer X2 )
part1 = str[0]
spl = part1.split(" ",2); # splitted[2] is the part which interests us!
splitted = spl[2].split(","); # split in individual groups eg X21:$int
outputF = ''
for j in range(len(splitted)):
lhs = splitted[j].split(":") #split each group
splitted[j] = lhs[1]+" "+lhs[0]
if j < len(splitted)-1:
outputF = outputF + splitted[j] +","
else:
outputF = outputF + splitted[j]
outputF =spl[1] + outputF + ";" #add the semicolons after the quantification
return outputF
#introduce the correct negation sign ( aka replace ~ with !)
def introduceNegation(str):
str= str.replace("~","!")
return str
#check if the number of open paranthesis is equal with the closed ones
def balance_paranthesis(string):
res = 0
for x in string:
if x=="(":
res = res+1
if x==")":
res = res-1
return res
#replace the | with it's or correspondance || and the & with &&
#after this step, get rid of the sG functions ( just remove them from the invariant)
def introduceORAND(str):
afterOR = str.replace("|","||")
afterAND = afterOR.replace("&","&&")
afterS = afterOR.split("||")
final=""
#remove the sG*
for x in afterS:
#final = final +x+ "||"
if x.find("sG")==-1:
final=final+x+"||"
final = final.strip("||")
if balance_paranthesis(final)!=0:
final = final+")"
final = final.replace("#","-")
return final
def replaceConnectives(string):
afterNeg = introduceNegation(string)
final = introduceORAND(afterNeg)
return final
#quantify the variables needed to be quantified,
#and also translate the invariant into the syntax for FramaC - call for the other package
## infXpostFx.convertS(string)
def quantify(line):
# replace the ![X..] with \forall and for each variable define the type eg:
# \forall integer X1
firstStep = line.split("]:",1);
#in case the invariant has no quantified variables, return the invariant
if len(firstStep) == 1:
tempSplit = firstStep[0].split("|")
final = infXpostFx.convertS(tempSplit)
FIN = ''
for x in final:
FIN = FIN + x + '|'
FIN = FIN.strip('|')
final = []
final.append(FIN)
return final
else: #the other case: ![..]:invariant
forall = firstStep[0]
forall = forall.replace("![","\\forall ")
integers = forall
integers = integers.replace("$int","integer")
temp = firstStep[1].strip("\n")
temp = temp[:-1]
temp = temp.replace("(","",1)
spl = temp.split('|')
temp = []
temp = infXpostFx.convertS(spl)
finInv = ''
for x in temp:
finInv = finInv + "|" + x
finInv = finInv.strip("|")
return (integers,finInv)
def ensure_dir(name):
d = os.path.dirname(name)
if not os.path.exists(d):
os.makedirs(d)
#create the actual invariant list which has to be printed in the C file
def work(lines):
done= False
i=0
linv = ["/*@ \n"]
while not done:
finalInv=""
try:
l1 = split_line(lines[i])
l2 = quantify(l1[1]) # position two is the actual invariant
if len(l2)==1:
conn = replaceConnectives(l2[0].strip("\n"))
finalInv = l1[0] + " " + conn + ";\n"
else:
l3 = METHOD_NAME(l2)
conn = replaceConnectives(l2[1].strip("\n"))
finalInv = l1[0]+ "\f" + l3 + conn + ";\n" #l2[1].strip("\n") +";\n"
finalInv = finalInv.replace("<","<=")
finalInv = finalInv.replace(">",">=")
linv.append(finalInv)
i = i + 1
except IndexError,e:
print "%s main while loop" %e
print "tried %s records" % i
done = True
linv.append("*/\n")
return linv
#check if the number of command line arguments is correct
#arguments must be: file.c vanalyzeOutputFile outputFile | null |
1,070 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknlb.endpoint import endpoint_data
class UpdateListenerAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Nlb', '2022-04-30', 'UpdateListenerAttribute','nlb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CaCertificateIdss(self): # RepeatList
return self.get_body_params().get('CaCertificateIds')
def METHOD_NAME(self, CaCertificateIds): # RepeatList
for depth1 in range(len(CaCertificateIds)):
self.add_body_params('CaCertificateIds.' + str(depth1 + 1), CaCertificateIds[depth1])
def get_ClientToken(self): # String
return self.get_body_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_body_params('ClientToken', ClientToken)
def get_SecSensorEnabled(self): # Boolean
return self.get_body_params().get('SecSensorEnabled')
def set_SecSensorEnabled(self, SecSensorEnabled): # Boolean
self.add_body_params('SecSensorEnabled', SecSensorEnabled)
def get_AlpnPolicy(self): # String
return self.get_body_params().get('AlpnPolicy')
def set_AlpnPolicy(self, AlpnPolicy): # String
self.add_body_params('AlpnPolicy', AlpnPolicy)
def get_Mss(self): # Integer
return self.get_body_params().get('Mss')
def set_Mss(self, Mss): # Integer
self.add_body_params('Mss', Mss)
def get_ServerGroupId(self): # String
return self.get_body_params().get('ServerGroupId')
def set_ServerGroupId(self, ServerGroupId): # String
self.add_body_params('ServerGroupId', ServerGroupId)
def get_ListenerId(self): # String
return self.get_body_params().get('ListenerId')
def set_ListenerId(self, ListenerId): # String
self.add_body_params('ListenerId', ListenerId)
def get_CertificateIdss(self): # RepeatList
return self.get_body_params().get('CertificateIds')
def set_CertificateIdss(self, CertificateIds): # RepeatList
for depth1 in range(len(CertificateIds)):
self.add_body_params('CertificateIds.' + str(depth1 + 1), CertificateIds[depth1])
def get_AlpnEnabled(self): # Boolean
return self.get_body_params().get('AlpnEnabled')
def set_AlpnEnabled(self, AlpnEnabled): # Boolean
self.add_body_params('AlpnEnabled', AlpnEnabled)
def get_DryRun(self): # Boolean
return self.get_body_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_body_params('DryRun', DryRun)
def get_ProxyProtocolEnabled(self): # Boolean
return self.get_body_params().get('ProxyProtocolEnabled')
def set_ProxyProtocolEnabled(self, ProxyProtocolEnabled): # Boolean
self.add_body_params('ProxyProtocolEnabled', ProxyProtocolEnabled)
def get_Cps(self): # Integer
return self.get_body_params().get('Cps')
def set_Cps(self, Cps): # Integer
self.add_body_params('Cps', Cps)
def get_SecurityPolicyId(self): # String
return self.get_body_params().get('SecurityPolicyId')
def set_SecurityPolicyId(self, SecurityPolicyId): # String
self.add_body_params('SecurityPolicyId', SecurityPolicyId)
def get_IdleTimeout(self): # Integer
return self.get_body_params().get('IdleTimeout')
def set_IdleTimeout(self, IdleTimeout): # Integer
self.add_body_params('IdleTimeout', IdleTimeout)
def get_ListenerDescription(self): # String
return self.get_body_params().get('ListenerDescription')
def set_ListenerDescription(self, ListenerDescription): # String
self.add_body_params('ListenerDescription', ListenerDescription)
def get_CaEnabled(self): # Boolean
return self.get_body_params().get('CaEnabled')
def set_CaEnabled(self, CaEnabled): # Boolean
self.add_body_params('CaEnabled', CaEnabled) | null |
1,071 | """Helper class to connect Nimbus service"""
import logging
from decorators import singleton
from deploy_board.settings import IS_PINTEREST, NIMBUS_SERVICE_URL, NIMBUS_SERVICE_VERSION, NIMBUS_USE_EGRESS, NIMBUS_EGRESS_URL, TELETRAAN_PROJECT_URL_FORMAT
from exceptions import NotAuthorizedException, TeletraanException, FailedAuthenticationException
from urlparse import urlparse
import requests
requests.packages.urllib3.disable_warnings()
log = logging.getLogger(__name__)
@singleton
class NimbusClient(object):
def handle_response(self, response):
if response.status_code == 404:
log.error("Resource not found. Nimbus API response - %s" % response.content)
return None
if response.status_code == 409:
log.error("Resource already exists. Nimbus API response - %s" % response.content)
raise TeletraanException('Resource conflict - Nimbus already has an Identifier for your proposed new stage. ')
if 400 <= response.status_code < 600:
log.error("Nimbus API Error %s, %s" % (response.content, response.status_code))
raise TeletraanException(
"Teletraan failed to successfully call Nimbus. Contact your friendly Teletraan owners for assistance. Hint: %s, %s" % (response.status_code, response.content)
)
if response.status_code == 200 or response.status_code == 201:
return response.json()
return None
def get_one_identifier(self, name, token=None):
service_url = NIMBUS_EGRESS_URL if NIMBUS_USE_EGRESS else NIMBUS_SERVICE_URL
headers = {}
headers['Client-Authorization'] = 'client Teletraan'
if token:
headers['Authorization'] = 'token %s' % token
if NIMBUS_USE_EGRESS:
parsed_uri = urlparse(NIMBUS_SERVICE_URL)
headers['Host'] = parsed_uri.netloc
response = requests.get('{}/api/{}/identifiers/{}'.format(service_url, NIMBUS_SERVICE_VERSION, name),
headers=headers)
return self.handle_response(response)
def create_one_identifier(self, data, token=None):
"""
Create a Nimbus Identifier according to the input request data.
If the request data does not have all the information needed for creating a Nimbus identifier, this method will raise a Teletraan Exception.
"""
requiredParams = ['projectName', 'env_name', 'stage_name']
for param in requiredParams:
if data.get(param) is None or len(data.get(param)) == 0:
log.error("Missing %s in the request data, cannot create a Nimbus identifier" % param)
exceptionMessage = "Teletraan cannot create a Nimbus identifier because %s is missing." % param
if IS_PINTEREST:
exceptionMessage += " Contact #teletraan for assistance."
raise TeletraanException(exceptionMessage)
headers = {}
headers['Client-Authorization'] = 'client Teletraan'
if token:
headers['Authorization'] = 'token %s' % token
payload = {}
payload['kind'] = 'Identifier'
payload['apiVersion'] = 'v1'
payload['platformName'] = 'teletraan'
payload['projectName'] = data.get('projectName')
cellName = None
for property in data['propertyList']['properties']:
if property['propertyName'] == 'cellName':
cellName = property['propertyValue']
if cellName is None:
log.error("Missing cellName in the request data, cannot create a Nimbus identifier")
exceptionMessage = "Teletraan cannot create a Nimbus identifier because cellName is missing in this env's existing identifier."
if IS_PINTEREST:
exceptionMessage += " Contact #teletraan for assistance."
raise TeletraanException(exceptionMessage)
payload['spec'] = {
'kind': 'EnvironmentSpec',
'apiVersion': 'v1',
'cellName': cellName,
'envName': data.get('env_name'),
'stageName': data.get('stage_name')
}
service_url = NIMBUS_EGRESS_URL if NIMBUS_USE_EGRESS else NIMBUS_SERVICE_URL
if NIMBUS_USE_EGRESS:
parsed_uri = urlparse(NIMBUS_SERVICE_URL)
headers['Host'] = parsed_uri.netloc
response = requests.post('{}/api/{}/identifiers'.format(service_url, NIMBUS_SERVICE_VERSION),
json=payload,
headers=headers)
return self.handle_response(response)
def delete_one_identifier(self, name, token=None):
headers = {}
headers['Client-Authorization'] = 'client Teletraan'
if token:
headers['Authorization'] = 'token %s' % token
service_url = NIMBUS_EGRESS_URL if NIMBUS_USE_EGRESS else NIMBUS_SERVICE_URL
if NIMBUS_USE_EGRESS:
parsed_uri = urlparse(NIMBUS_SERVICE_URL)
headers['Host'] = parsed_uri.netloc
response = requests.delete('{}/api/{}/identifiers/{}'.format(service_url, NIMBUS_SERVICE_VERSION, name),
headers=headers)
return self.handle_response(response)
def METHOD_NAME(self, project_name):
if not TELETRAAN_PROJECT_URL_FORMAT:
return ""
return TELETRAAN_PROJECT_URL_FORMAT.format(projectName=project_name) | null |
1,072 | import os
from unittest import TestCase
from pcs import settings
from pcs.common import file_type_codes
from pcs.common.reports import codes as report_codes
from pcs.lib.booth import env
from pcs.lib.file.raw_file import GhostFile
from pcs_test.tools import fixture
from pcs_test.tools.assertions import assert_raise_library_error
class BoothEnv(TestCase):
def test_ghost_conf_real_key(self):
# pylint: disable=no-self-use
assert_raise_library_error(
lambda: env.BoothEnv(
"my_booth", {"config_data": "some config data".encode("utf-8")}
),
fixture.error(
report_codes.LIVE_ENVIRONMENT_NOT_CONSISTENT,
mocked_files=[file_type_codes.BOOTH_CONFIG],
required_files=[file_type_codes.BOOTH_KEY],
),
)
def test_real_conf_ghost_key(self):
# pylint: disable=no-self-use
assert_raise_library_error(
lambda: env.BoothEnv(
"my_booth", {"key_data": "some key data".encode("utf-8")}
),
fixture.error(
report_codes.LIVE_ENVIRONMENT_NOT_CONSISTENT,
mocked_files=[file_type_codes.BOOTH_KEY],
required_files=[file_type_codes.BOOTH_CONFIG],
),
)
def test_real(self):
my_env = env.BoothEnv("my_booth", {})
self.assertEqual("my_booth", my_env.instance_name)
self.assertFalse(isinstance(my_env.config.raw_file, GhostFile))
self.assertFalse(isinstance(my_env.key.raw_file, GhostFile))
self.assertEqual(
os.path.join(settings.booth_config_dir, "my_booth.conf"),
my_env.config_path,
)
self.assertEqual(
os.path.join(settings.booth_config_dir, "my_booth.key"),
my_env.key_path,
)
self.assertEqual([], my_env.ghost_file_codes)
self.assertEqual({}, my_env.export())
site_list = ["site1", "site2"]
arbitrator_list = ["arbitrator1"]
facade = my_env.create_facade(site_list, arbitrator_list)
self.assertEqual(site_list, facade.get_sites())
self.assertEqual(arbitrator_list, facade.get_arbitrators())
def test_ghost(self):
config_data = "some config_data".encode("utf-8")
key_data = "some key_data".encode("utf-8")
key_path = "some key path"
my_env = env.BoothEnv(
"my_booth",
{
"config_data": config_data,
"key_data": key_data,
"key_path": key_path,
},
)
self.assertEqual("my_booth", my_env.instance_name)
self.assertTrue(isinstance(my_env.config.raw_file, GhostFile))
self.assertTrue(isinstance(my_env.key.raw_file, GhostFile))
with self.assertRaises(AssertionError) as cm:
dummy_path = my_env.config_path
self.assertEqual(
"Reading config path is supported only in live environment",
str(cm.exception),
)
self.assertEqual(key_path, my_env.key_path)
self.assertEqual(
[file_type_codes.BOOTH_CONFIG, file_type_codes.BOOTH_KEY],
my_env.ghost_file_codes,
)
self.assertEqual(
{
"config_file": {"content": config_data},
"key_file": {"content": key_data},
},
my_env.export(),
)
site_list = ["site1", "site2"]
arbitrator_list = ["arbitrator1"]
facade = my_env.create_facade(site_list, arbitrator_list)
self.assertEqual(site_list, facade.get_sites())
self.assertEqual(arbitrator_list, facade.get_arbitrators())
def test_invalid_instance(self):
# pylint: disable=no-self-use
assert_raise_library_error(
lambda: env.BoothEnv("/tmp/booth/booth", {}),
fixture.error(
report_codes.BOOTH_INVALID_NAME,
name="/tmp/booth/booth",
forbidden_characters="/",
),
)
def METHOD_NAME(self):
# pylint: disable=no-self-use
assert_raise_library_error(
lambda: env.BoothEnv(
"../../booth/booth",
{
"config_data": "some config data",
"key_data": "some key data",
"key_path": "some key path",
},
),
fixture.error(
report_codes.BOOTH_INVALID_NAME,
name="../../booth/booth",
forbidden_characters="/",
),
)
def test_default_instance(self):
self.assertEqual(env.BoothEnv(None, {}).instance_name, "booth") | null |
1,073 | # **************************************************************************
# *
# * Authors: J.M. de la Rosa Trevin ([email protected])
# *
# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address '[email protected]'
# *
# **************************************************************************
from pyworkflow.viewer import Viewer, DESKTOP_TKINTER, WEB_DJANGO
from pyworkflow.protocol.params import LabelParam
from pwem.viewers import showj, EmProtocolViewer, ObjectView
from pwem.objects import SetOfMicrographs, SetOfMovies
from xmipp3.protocols.protocol_movie_opticalflow import (XmippProtOFAlignment,
OBJCMD_MOVIE_ALIGNCARTESIAN)
from xmipp3.protocols.protocol_flexalign import XmippProtFlexAlign
from xmipp3.protocols.protocol_movie_max_shift import XmippProtMovieMaxShift
from .viewer_ctf_consensus import getStringIfActive
class XmippMovieAlignViewer(Viewer):
_targets = [XmippProtOFAlignment, XmippProtFlexAlign]
_environments = [DESKTOP_TKINTER, WEB_DJANGO]
_label = 'viewer optical/correlation alignment'
def _visualize(self, obj, **kwargs):
views = []
if obj.hasAttribute('outputMicrographs'):
views.append(ObjectView(self._project, obj.strId(),
obj.outputMicrographs.getFileName(),
viewParams=getViewParams()))
elif obj.hasAttribute('outputMovies'):
views.append(ObjectView(self._project, obj.strId(),
obj.outputMovies.getFileName(),
viewParams=getViewParams()))
else:
views.append(self.infoMessage("Output (micrographs or movies) has "
"not been produced yet."))
return views
class XmippMovieMaxShiftViewer(EmProtocolViewer):
""" This viewer is intendet to visualize the selection made by
the Xmipp - Movie max shift protocol.
"""
_label = 'viewer Movie Max Shift'
_environments = [DESKTOP_TKINTER, WEB_DJANGO]
_targets = [XmippProtMovieMaxShift]
_memory = False
resolutionThresholdOLD = -1
# temporary metadata file with ctf that has some resolution greathan than X
tmpMetadataFile = 'viewersTmp.sqlite'
def METHOD_NAME(self, form):
form.addSection(label='Visualization')
form.addParam('visualizeMics', LabelParam,
label="Visualize passed micrographs",
help="Visualize those micrographs considered valid.")
form.addParam('visualizeMicsDiscarded', LabelParam,
label="Visualize discarded micrographs",
help="Visualize discarded micrographs.")
def _getVisualizeDict(self):
return {'visualizeMics': self._visualizeMics,
'visualizeMicsDiscarded': self._visualizeMicsDiscarded}
def _visualizeAny(self, outNameCondition):
views = []
# to show micrographs is prior than movies
objs = [x for x in self.protocol.iterOutputAttributes(SetOfMicrographs)]
objs += [x for x in self.protocol.iterOutputAttributes(SetOfMovies)]
for outName, outObj in objs:
if outNameCondition(outName):
views.append(self.objectView(outObj, viewParams=getViewParams()))
break
if not views:
outputType = 'discarded' if outNameCondition('Discarded') else 'accepted'
self.infoMessage('%s does not have %s outputs%s'
% (self.protocol.getObjLabel(), outputType,
getStringIfActive(self.protocol)),
title='Info message').show()
return views
def _visualizeMics(self, e=None):
return self._visualizeAny(lambda x: not x.endswith('Discarded'))
def _visualizeMicsDiscarded(self, e=None):
return self._visualizeAny(lambda x: x.endswith('Discarded'))
def getViewParams():
plotLabels = ('psdCorr._filename plotPolar._filename '
'plotCart._filename plotGlobal._filename')
labels = plotLabels + ' _filename '
viewParams = {showj.MODE: showj.MODE_MD,
showj.ORDER: 'id ' + labels,
showj.VISIBLE: 'id ' + labels,
showj.RENDER: plotLabels,
showj.ZOOM: 20,
showj.OBJCMDS: "'%s'" % OBJCMD_MOVIE_ALIGNCARTESIAN
}
return viewParams | null |
1,074 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from src.config import Config
def mocked_url(url_prefix_to_mock):
api = Config.instance().api.strip('/')
return os.path.join(api, url_prefix_to_mock)
def mock_pipe_load_by_id(pipeline_id, pipeline_name, repository, server_date):
get_by_id_response = {
"payload": {
"id": pipeline_id,
"name": pipeline_name,
"description": "",
"repository": repository,
"createdDate": server_date,
"currentVersion": "awesome_version"
},
"status": "OK"
}
return json.dumps(get_by_id_response)
def mock_run(pipeline_id, docker_image="", identifier=None):
run_response = {
"payload": {
"id": identifier,
"pipelineId": pipeline_id,
"dockerImage": docker_image,
"status": "SCHEDULED",
"pipelineRunParameters": [
{
"name": "param_name",
"value": "param",
}
]
},
"status": "OK"
}
return json.dumps(run_response)
def mock_run_parameters(instance_disk, instance_type, param_name="param_name", param_value="param"):
run_parameters_response = {
"payload": {
"main_file": "file",
"instance_disk": instance_disk,
"instance_size": instance_type,
"main_class": "test_class",
"parameters": {
param_name: {
"required": False,
"type": "input",
"value": param_value
}
}
},
"status": "OK"
}
return json.dumps(run_parameters_response)
def mock_pipe(pipeline_id, pipeline_name, raw_date, v_1):
list_response = {
"payload": {
"id": pipeline_id,
"name": pipeline_name,
"currentVersion": mock_version(raw_date, v_1)
},
"status": "OK"
}
return json.dumps(list_response)
def mock_load_pipes(id_1, id_2, name_1, name_2, raw_date, v_1, repo):
pipe_1 = {
"id": id_1,
"name": name_1,
"currentVersion": mock_version(raw_date, v_1),
"createdDate": raw_date,
"repository": repo
}
pipe_2 = {
"id": id_2,
"name": name_2,
"currentVersion": mock_version(raw_date, v_1),
"createdDate": raw_date,
"repository": repo
}
response = {
"payload": [pipe_1, pipe_2],
"status": "OK"
}
return json.dumps(response)
def mock_versions(raw_date, v_1, v_2):
version_1 = mock_version(raw_date, v_1)
version_2 = mock_version(raw_date, v_2)
version_response = {
"payload": [version_1, version_2],
"status": "OK"
}
return json.dumps(version_response)
def mock_version(raw_date, v_1):
version_1 = {
"name": v_1,
"draft": "draft",
"createdDate": raw_date,
"commitId": "commit"
}
return version_1
def mock_pipeline_datastorage(pipeline_id, raw_date):
datastorage_1 = {
"fileMask": "bucket/1",
"pipelineId": pipeline_id,
"createdDate": raw_date
}
datastorage_2 = {
"fileMask": "bucket/2",
"pipelineId": pipeline_id,
"createdDate": raw_date
}
datastorage_response = {
"payload": [datastorage_1, datastorage_2],
"status": "OK"
}
return json.dumps(datastorage_response)
def METHOD_NAME(id, name, path, type, storage_policy={}):
datastorage = {
"id": id,
"name": name,
"path": path,
"type": type,
"storagePolicy": storage_policy
}
datastorage_response = {
"payload": datastorage,
"status": "OK"
}
return json.dumps(datastorage_response)
def mock_storage_policy(backup_duration, lts_duration, sts_duration, versioning_enabled):
storage_policy = {
"backupDuration": backup_duration,
"longTermStorageDuration": lts_duration,
"shortTermStorageDuration": sts_duration,
"versioningEnabled": versioning_enabled
}
return storage_policy
def mock_price(instance_disk, instance_type):
price_response = {
"payload": {
"instanceType": instance_type,
"instanceDisk": instance_disk,
"pricePerHour": 0.12,
"minimumTimePrice": 0.12,
"maximumTimePrice": 0.12,
"averageTimePrice": 0.12
},
"status": "OK"
}
return json.dumps(price_response)
def mock_run_filter(total_count="42", elements=[]):
response = {
"payload": {
"totalCount": total_count,
"elements": elements
},
"status": "OK"
}
return json.dumps(response)
def mock_element(identifier=None, pipeline_id=None, pipeline_name=None, version=None, status=None, start_date=None,
end_date=None, parameters=[], instance={}):
response = {
"id": identifier,
"pipelineId": pipeline_id,
"pipelineName": pipeline_name,
"status": status,
"version": version,
"startDate": start_date,
"endDate": end_date,
"pipelineRunParameters": parameters,
"instance": instance
}
return response
def mock_instance(node_ip):
response = {
"nodeIP": node_ip
}
return response
def mock_parameter(name=None, value=None):
response = {
"name": name,
"value": value
}
return response
def mock_cluster_load_all(nodes=[]):
response = {
"payload": nodes,
"status": "OK"
}
return json.dumps(response)
def mock_node(node, addresses):
response = {
"uid": node.uid,
"name": node.name,
"creationTimestamp": node.created,
"pipelineRun": node.pipeline_run,
"systemInfo": node.system_info,
"labels": node.labels,
"allocatable": node.allocatable,
"capacity": node.capacity,
"addresses": mock_node_addresses(addresses),
"pods": node.pods
}
return response
def mock_node_addresses(addresses):
response = []
for address in addresses:
address_response = {
"address": address.address,
"type": address.address_type
}
response.append(address_response)
return response
def mock_node_pods(pod):
response = {
"uid": pod.uid,
"name": pod.name,
"namespace": pod.namespace,
"nodeName": pod.node_name,
"phase": pod.phase
}
return response
def mock_node_load(node, addresses):
response = {
"payload": mock_node(node, addresses),
"status": "OK"
}
return json.dumps(response)
def mock_pipeline_run(model):
"""
:type model: PipelineRunModel
"""
response = {
"payload": {
"id": model.identifier,
"pipelineId": model.pipeline_id,
"pipelineName": model.pipeline,
"version": model.version,
"startDate": model.scheduled_date,
"endDate": model.end_date,
"owner": model.owner,
"status": model.status
},
"status": "OK"
}
return json.dumps(response) | null |
1,075 | """
Application to explore the difference between sasview 3.x orientation
dispersity and possible replacement algorithms.
"""
import mpl_toolkits.mplot3d # Adds projection='3d' option to subplot
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, CheckButtons
from matplotlib import cm
import numpy as np
from numpy import pi, cos, sin, sqrt, exp, degrees, radians
def draw_sphere(ax, radius=10., steps=100):
u = np.linspace(0, 2 * np.pi, steps)
v = np.linspace(0, np.pi, steps)
x = radius * np.outer(np.cos(u), np.sin(v))
y = radius * np.outer(np.sin(u), np.sin(v))
z = radius * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='w')
def draw_mesh_current(ax, theta, dtheta, phi, dphi, radius=10., dist='gauss'):
theta = radians(theta)
phi = radians(phi)
dtheta = radians(dtheta)
dphi = radians(dphi)
# 10 point 3-sigma gaussian weights
t = np.linspace(-3., 3., 11)
if dist == 'gauss':
weights = exp(-0.5*t**2)
elif dist == 'rect':
weights = np.ones_like(t)
else:
raise ValueError("expected dist to be 'gauss' or 'rect'")
theta = theta + dtheta*t
phi = phi + dphi*t
x = radius * np.outer(cos(phi), cos(theta))
y = radius * np.outer(sin(phi), cos(theta))
z = radius * np.outer(np.ones_like(phi), sin(theta))
w = np.outer(weights, weights*abs(cos(theta)))
x,y,z,w = [v.flatten() for v in (x,y,z,w)]
ax.scatter(x, y, z, c=w, marker='o', vmin=0., vmax=1.0)
def draw_mesh_new(ax, theta, dtheta, phi, dphi, flow, radius=10., dist='gauss'):
theta_center = radians(90-theta)
phi_center = radians(phi)
flow_center = radians(flow)
dtheta = radians(dtheta)
dphi = radians(dphi)
# 10 point 3-sigma gaussian weights
t = np.linspace(-3., 3., 11)
if dist == 'gauss':
weights = exp(-0.5*t**2)
elif dist == 'rect':
weights = np.ones_like(t)
else:
raise ValueError("expected dist to be 'gauss' or 'rect'")
theta = dtheta*t
phi = dphi*t
x = radius * np.outer(cos(phi), cos(theta))
y = radius * np.outer(sin(phi), cos(theta))
z = radius * np.outer(np.ones_like(phi), sin(theta))
#w = np.outer(weights, weights*abs(cos(dtheta*t)))
w = np.outer(weights, weights*abs(cos(theta)))
x, y, z, w = [v.flatten() for v in (x,y,z,w)]
x, y, z = rotate(x, y, z, phi_center, theta_center, flow_center)
ax.scatter(x, y, z, c=w, marker='o', vmin=0., vmax=1.)
def rotate(x, y, z, phi, theta, psi):
R = METHOD_NAME(psi, theta, phi)
p = np.vstack([x,y,z])
q = np.dot(R,p)
return q
def METHOD_NAME(xa,ya,za):
Rz = [[cos(za), -sin(za), 0.],
[sin(za), cos(za), 0.],
[0., 0., 1.]]
Ry = [[cos(ya), 0., -sin(ya)],
[0., 1., 0.],
[sin(ya), 0., cos(ya)]]
Rx = [[1., 0., 0.],
[0., cos(xa), sin(xa)],
[0., -sin(xa), cos(xa)]]
R = np.dot(np.dot(Rz, Ry), Rx)
return R
def main():
plt.hold(True)
plt.set_cmap('gist_earth')
plt.clf()
#gs = gridspec.GridSpec(2,1,height_ratios=[4,1])
#ax = plt.subplot(gs[0], projection='3d')
ax = plt.axes([0.0, 0.2, 1.0, 0.8], projection='3d')
phi, dphi = -45., 3.
theta, dtheta = 70., 10.
flow = 0.
#dist = 'rect'
dist = 'gauss'
axcolor = 'lightgoldenrodyellow'
axphi = plt.axes([0.1, 0.1, 0.45, 0.04], axisbg=axcolor)
axtheta = plt.axes([0.1, 0.15, 0.45, 0.04], axisbg=axcolor)
sphi = Slider(axphi, 'Phi', -180, 180, valinit=phi)
stheta = Slider(axtheta, 'Theta', -180, 180, valinit=theta)
axdphi = plt.axes([0.75, 0.1, 0.15, 0.04], axisbg=axcolor)
axdtheta = plt.axes([0.75, 0.15, 0.15, 0.04], axisbg=axcolor)
sdphi = Slider(axdphi, 'dPhi', 0, 30, valinit=dphi)
sdtheta = Slider(axdtheta, 'dTheta', 0, 30, valinit=dtheta)
axflow = plt.axes([0.1, 0.05, 0.45, 0.04], axisbg=axcolor)
sflow = Slider(axflow, 'Flow', -180, 180, valinit=flow)
axusenew= plt.axes([0.75, 0.05, 0.15, 0.04], axisbg=axcolor)
susenew = CheckButtons(axusenew, ['New'], [True])
def update(val, axis=None):
phi, theta = sphi.val, stheta.val
dphi, dtheta = sdphi.val, sdtheta.val
flow = sflow.val
use_new = susenew.lines[0][0].get_visible()
ax.cla()
draw_sphere(ax)
if use_new:
draw_mesh_new(ax, theta=theta, dtheta=dtheta, phi=phi, dphi=dphi,
flow=flow, radius=11., dist=dist)
else:
draw_mesh_current(ax, theta=theta, dtheta=dtheta, phi=phi, dphi=dphi,
radius=11., dist=dist)
if not axis.startswith('d'):
ax.view_init(elev=90-theta if use_new else theta, azim=phi)
plt.gcf().canvas.draw()
stheta.on_changed(lambda v: update(v,'theta'))
sphi.on_changed(lambda v: update(v, 'phi'))
sdtheta.on_changed(lambda v: update(v, 'dtheta'))
sdphi.on_changed(lambda v: update(v, 'dphi'))
sflow.on_changed(lambda v: update(v, 'dflow'))
susenew.on_clicked(lambda v: update(v, 'use_new'))
update(None, 'phi')
plt.show()
if __name__ == "__main__":
main( | null |
1,076 | from abc import abstractmethod
from dataclasses import dataclass
from typing import List
import numpy as np
from scipy.stats import t # type: ignore
from gbstats.shared.models import (
FrequentistTestResult,
Statistic,
Uplift,
)
from gbstats.shared.tests import BaseABTest
@dataclass
class FrequentistConfig:
alpha: float = 0.05
test_value: float = 0
@dataclass
class SequentialConfig(FrequentistConfig):
sequential_tuning_parameter: float = 5000
class TTest(BaseABTest):
def __init__(
self,
stat_a: Statistic,
stat_b: Statistic,
config: FrequentistConfig = FrequentistConfig(),
):
"""Base class for one- and two-sided T-Tests with unequal variance.
All values are with respect to relative effects, not absolute effects.
A result prepared for integration with the stats runner can be
generated by calling `.compute_result()`
Args:
stat_a (Statistic): the "control" or "baseline" statistic
stat_b (Statistic): the "treatment" or "variation" statistic
"""
super().__init__(stat_a, stat_b)
self.alpha = config.alpha
self.test_value = config.test_value
@property
def variance(self) -> float:
return self.stat_b.variance / (
pow(self.stat_a.unadjusted_mean, 2) * self.stat_b.n
) + self.stat_a.variance * pow(self.stat_b.unadjusted_mean, 2) / (
pow(self.stat_a.unadjusted_mean, 4) * self.stat_a.n
)
@property
def point_estimate(self) -> float:
return (self.stat_b.mean - self.stat_a.mean) / self.stat_a.unadjusted_mean
@property
def critical_value(self) -> float:
return (self.point_estimate - self.test_value) / np.sqrt(self.variance)
@property
def dof(self) -> float:
# welch-satterthwaite approx
return pow(
self.stat_b.variance / self.stat_b.n + self.stat_a.variance / self.stat_a.n,
2,
) / (
pow(self.stat_b.variance, 2) / (pow(self.stat_b.n, 2) * (self.stat_b.n - 1))
+ pow(self.stat_a.variance, 2)
/ (pow(self.stat_a.n, 2) * (self.stat_a.n - 1))
)
@property
@abstractmethod
def p_value(self) -> float:
pass
@property
@abstractmethod
def confidence_interval(self) -> List[float]:
pass
def METHOD_NAME(self) -> FrequentistTestResult:
"""Return uninformative output when AB test analysis can't be performed
adequately
"""
return FrequentistTestResult(
expected=0,
ci=[0, 0],
p_value=1,
uplift=Uplift(
dist="normal",
mean=0,
stddev=0,
),
)
def compute_result(self) -> FrequentistTestResult:
"""Compute the test statistics and return them
for the main gbstats runner
Returns:
FrequentistTestResult -
note the values are with respect to percent uplift,
not absolute differences
"""
if self.stat_a.mean == 0:
return self.METHOD_NAME()
if self.stat_a.unadjusted_mean == 0:
return self.METHOD_NAME()
if self._has_zero_variance():
return self.METHOD_NAME()
return FrequentistTestResult(
expected=self.point_estimate,
ci=self.confidence_interval,
p_value=self.p_value,
uplift=Uplift(
dist="normal",
mean=self.point_estimate,
stddev=np.sqrt(self.variance),
),
)
class TwoSidedTTest(TTest):
@property
def p_value(self) -> float:
return 2 * (1 - t.cdf(abs(self.critical_value), self.dof))
@property
def confidence_interval(self) -> List[float]:
width: float = t.ppf(1 - self.alpha / 2, self.dof) * np.sqrt(self.variance)
return [self.point_estimate - width, self.point_estimate + width]
class OneSidedTreatmentGreaterTTest(TTest):
@property
def p_value(self) -> float:
return 1 - t.cdf(self.critical_value, self.dof)
@property
def confidence_interval(self) -> List[float]:
width: float = t.ppf(1 - self.alpha, self.dof) * np.sqrt(self.variance)
return [self.point_estimate - width, np.inf]
class OneSidedTreatmentLesserTTest(TTest):
@property
def p_value(self) -> float:
return t.cdf(self.critical_value, self.dof)
@property
def confidence_interval(self) -> List[float]:
width: float = t.ppf(1 - self.alpha, self.dof) * np.sqrt(self.variance)
return [-np.inf, self.point_estimate - width]
class SequentialTwoSidedTTest(TTest):
def __init__(
self,
stat_a: Statistic,
stat_b: Statistic,
config: SequentialConfig = SequentialConfig(),
):
super().__init__(
stat_a,
stat_b,
FrequentistConfig(alpha=config.alpha, test_value=config.test_value),
)
self.sequential_tuning_parameter = config.sequential_tuning_parameter
@property
def confidence_interval(self) -> List[float]:
# eq 9 in Waudby-Smith et al. 2023 https://arxiv.org/pdf/2103.06476v7.pdf
N = self.stat_a.n + self.stat_b.n
rho = self.rho
s2 = self.variance * N
width: float = np.sqrt(s2) * np.sqrt(
(
(2 * (N * np.power(rho, 2) + 1))
* np.log(np.sqrt(N * np.power(rho, 2) + 1) / self.alpha)
/ (np.power(N * rho, 2))
)
)
return [self.point_estimate - width, self.point_estimate + width]
@property
def rho(self) -> float:
# eq 161 in https://arxiv.org/pdf/2103.06476v7.pdf
return np.sqrt(
(-2 * np.log(self.alpha) + np.log(-2 * np.log(self.alpha) + 1))
/ self.sequential_tuning_parameter
)
@property
def p_value(self) -> float:
# eq 155 in https://arxiv.org/pdf/2103.06476v7.pdf
N = self.stat_a.n + self.stat_b.n
# slight reparameterization for this quantity below
st2 = np.power(self.point_estimate - self.test_value, 2) * N / (self.variance)
tr2p1 = N * np.power(self.rho, 2) + 1
evalue = np.exp(np.power(self.rho, 2) * st2 / (2 * tr2p1)) / np.sqrt(tr2p1)
return min(1 / evalue, 1) | null |
1,077 | # -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
"""
Abstract class for filesystems providers.
"""
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from datetime import datetime
class FileSystemProvider(metaclass=ABCMeta):
""" Provides tools to access a given filesystem. The filesystem may be distant, and subclasses of FileSystemProvider should take care of
doing appropriate caching.
"""
@classmethod
@abstractmethod
def get_needed_args(cls):
""" Returns a list of arguments needed to create a FileSystemProvider. In the form
{
"arg1": (int, False, "description1"),
"arg2: (str, True, "description2")
}
The first part of the tuple is the type, the second indicates if the arg is mandatory
Only int and str are supported as types.
"""
@classmethod
@abstractmethod
def init_from_args(cls, **args):
""" Given the args from get_needed_args, creates the FileSystemProvider """
def __init__(self, prefix: str):
""" Init the filesystem provider with a given prefix.
:param prefix: The FileSystemPrrovider prefix.
"""
self.prefix = prefix
if not self.prefix.endswith("/"):
self.prefix += "/"
def _checkpath(self, path):
""" Checks that a given path is valid.
:raises FileNotFoundError: If path is invalid.
"""
if path.startswith("/") or ".." in path or path.strip() != path:
raise FileNotFoundError()
@abstractmethod
def from_subfolder(self, subfolder: str) -> FileSystemProvider:
"""
:param subfolder: The prefix of the new FileSystemProvider.
:returns: A new FileSystemProvider, with `subfolder` as prefix.
"""
@abstractmethod
def exists(self, path: str=None) -> bool:
"""
:param path: A path to verify.
:returns: True if the file at the given path exists. If the path is not given, then checks the existence of the prefix.
"""
@abstractmethod
def ensure_exists(self) -> None:
""" Ensure that the current prefix exists. If it is not the case, creates the directory. """
@abstractmethod
def put(self, filepath, content):
""" Write `content` in `filepath`"""
@abstractmethod
def get_fd(self, filepath: str, timestamp:datetime=None):
""" Returns a file descriptor.
If timestamp is not None, it gives an indication to the cache that the file must have been retrieved from the (possibly distant)
filesystem since the timestamp.
:raises FileNotFoundError: if the file does not exists or cannot be retrieved.
:raises IsADirectoryError: if `filepath` points to a directory.
:returns: A file descriptor pointing to `filepath`.
"""
@abstractmethod
def get(self, filepath, timestamp:datetime=None):
""" Get the content of a file.
If timestamp is not None, it gives an indication to the cache that the file must have been retrieved from the (possibly distant)
filesystem since the timestamp.
:raises FileNotFoundError: If the file does not exists or cannot be retrieved.
"""
@abstractmethod
def METHOD_NAME(self, folders: bool=True, files: bool=True, recursive: bool=False) -> METHOD_NAME:
""" List all the files/folder in this prefix. Folders are always ending with a '/'
:param folders: Switch to list folders.
:param files: Switch to list files.
:param recursive: Switch to list recursively the prefix content.
:returns: The list of files/folders in the prefix.
"""
@abstractmethod
def delete(self, filepath: str=None):
""" Delete a path recursively. If filepath is None, then the prefix will be deleted.
:param filepath: The prefix entry to delete.
:raises FileNotFoundError: If `filepath` points to a non-existing entry in the prefix.
"""
@abstractmethod
def get_last_modification_time(self, filepath):
""" Get a timestamp representing the time of the last modification of the file at filepath """
@abstractmethod
def move(self, src, dest):
""" Move path src to path dest, recursively. """
@abstractmethod
def copy_to(self, src_disk, dest=None):
""" Copy the content of *on-disk folder* src_disk into dir dest. If dest is None, copy to the prefix."""
@abstractmethod
def copy_from(self, src, dest_disk):
""" Copy the content of src into the *on-disk folder* dest_disk. If src is None, copy from the prefix. """
@abstractmethod
def distribute(self, filepath, allow_folders=True):
""" Give information on how to distribute a file. Provides Zip files of folders. Can return:
("file", mimetype, fileobj) where fileobj is an object-like file (with read()) and mimetype its mime-type.
("url", None, url) where url is a url to a distant server which possess the file.
("invalid", None, None) if the file cannot be distributed
""" | null |
1,078 | # Copyright (c) 2013 Potential Ventures Ltd
# Copyright (c) 2013 SolarFlare Communications Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Potential Ventures Ltd,
# SolarFlare Communications Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
# TODO: Could use cStringIO?
import traceback
import warnings
from io import StringIO
"""Exceptions and functions for simulation result handling."""
def raise_error(obj, msg):
"""Create a :exc:`TestError` exception and raise it after printing a traceback.
.. deprecated:: 1.3
Raise a standard Python exception instead of calling this function.
A stacktrace will be printed by cocotb automatically if the exception is unhandled.
Args:
obj: Object with a log method.
msg (str): The log message.
"""
warnings.warn(
"``raise_error`` is deprecated - raise a standard Exception instead",
DeprecationWarning,
stacklevel=2,
)
METHOD_NAME(obj, msg)
def METHOD_NAME(obj, msg):
exc_info = sys.exc_info()
buff = StringIO()
traceback.print_exception(*exc_info, file=buff)
obj.log.error(f"{msg}\n{buff.getvalue()}")
exception = TestError(msg)
exception.stderr.write(buff.getvalue())
raise exception
def create_error(obj, msg):
"""Like :func:`raise_error`, but return the exception rather than raise it,
simply to avoid too many levels of nested `try/except` blocks.
.. deprecated:: 1.3
Raise a standard Python exception instead of calling this function.
Args:
obj: Object with a log method.
msg (str): The log message.
"""
warnings.warn(
"``create_error`` is deprecated - raise a standard Exception instead",
DeprecationWarning,
stacklevel=2,
)
try:
# use the private version to avoid multiple warnings
METHOD_NAME(obj, msg)
except TestError as error:
return error
return TestError("Creating error traceback failed")
class ReturnValue(Exception):
"""
Helper exception needed for Python versions prior to 3.3.
.. deprecated:: 1.4
Use a :keyword:`return` statement instead; this works in all supported versions of Python.
"""
def __init__(self, retval):
warnings.warn(
"``ReturnValue`` is deprecated, use a normal return statement instead.",
DeprecationWarning,
stacklevel=2,
)
self.retval = retval
class TestComplete(Exception):
"""
Exception showing that the test was completed. Sub-exceptions detail the exit status.
.. deprecated:: 1.6.0
The ``stdout`` and ``stderr`` attributes.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__stdout = StringIO()
self.__stderr = StringIO()
@staticmethod
def __deprecated(which: str) -> None:
warnings.warn(
f"Attribute {which} is deprecated and will be removed in the next major release",
DeprecationWarning,
stacklevel=3,
)
@property
def stdout(self) -> StringIO:
self.__deprecated("stdout")
return self.__stdout
@stdout.setter
def stdout(self, new_value: StringIO) -> None:
self.__deprecated("stdout")
self.__stdout = new_value
@property
def stderr(self) -> StringIO:
self.__deprecated("stderr")
return self.__stderr
@stderr.setter
def stderr(self, new_value: StringIO) -> None:
self.__deprecated("stderr")
self.__stderr = new_value
class ExternalException(Exception):
"""Exception thrown by :class:`cocotb.external` functions."""
def __init__(self, exception):
self.exception = exception
class TestError(TestComplete):
"""
Exception showing that the test was completed with severity Error.
.. deprecated:: 1.5
Raise a standard Python exception instead.
A stacktrace will be printed by cocotb automatically if the exception is unhandled.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"TestError is deprecated - raise a standard Exception instead",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class TestFailure(TestComplete, AssertionError):
"""
Exception showing that the test was completed with severity Failure.
.. deprecated:: 1.6.0
Use a standard ``assert`` statement instead of raising this exception.
Use ``expect_fail`` rather than ``expect_error`` with this exception in the
:class:`cocotb.test` decorator.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"TestFailure is deprecated, use an ``assert`` statement instead",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class TestSuccess(TestComplete):
"""Exception showing that the test was completed successfully."""
pass
class SimFailure(TestComplete):
"""Exception showing that the simulator exited unsuccessfully."""
pass
class SimTimeoutError(TimeoutError):
"""Exception for when a timeout, in terms of simulation time, occurs."""
pass | null |
1,079 | import os
from celery.canvas import chain
from django.conf import settings
from juriscraper.pacer import PacerSession
from cl.corpus_importer.tasks import (
filter_docket_by_tags,
get_docket_by_pacer_case_id,
get_pacer_case_id_and_title,
make_fjc_idb_lookup_params,
)
from cl.lib.celery_utils import CeleryThrottle
from cl.lib.command_utils import VerboseCommand, logger
from cl.recap.constants import (
CIVIL_RIGHTS_ACCOMMODATIONS,
CIVIL_RIGHTS_ADA_EMPLOYMENT,
CIVIL_RIGHTS_ADA_OTHER,
CIVIL_RIGHTS_JOBS,
CIVIL_RIGHTS_OTHER,
CIVIL_RIGHTS_VOTING,
CIVIL_RIGHTS_WELFARE,
CV_2017,
CV_2020,
PATENT,
PRISONER_CIVIL_RIGHTS,
PRISONER_PETITIONS_HABEAS_CORPUS,
PRISONER_PETITIONS_MANDAMUS_AND_OTHER,
PRISONER_PETITIONS_VACATE_SENTENCE,
PRISONER_PRISON_CONDITION,
SOCIAL_SECURITY,
)
from cl.recap.models import FjcIntegratedDatabase
from cl.search.tasks import add_or_update_recap_docket
PACER_USERNAME = os.environ.get("PACER_USERNAME", settings.PACER_USERNAME)
PACER_PASSWORD = os.environ.get("PACER_PASSWORD", settings.PACER_PASSWORD)
TAG = "xITWtdtYjRbPeHQMftyS"
TAG_SAMPLE = "QAKfjXAcxfjINeFsbtAI"
NOS_EXCLUSIONS = [
CIVIL_RIGHTS_OTHER,
CIVIL_RIGHTS_VOTING,
CIVIL_RIGHTS_JOBS,
CIVIL_RIGHTS_ACCOMMODATIONS,
CIVIL_RIGHTS_WELFARE,
CIVIL_RIGHTS_ADA_EMPLOYMENT,
CIVIL_RIGHTS_ADA_OTHER,
PRISONER_PETITIONS_VACATE_SENTENCE,
PRISONER_PETITIONS_HABEAS_CORPUS,
PRISONER_PETITIONS_MANDAMUS_AND_OTHER,
PRISONER_CIVIL_RIGHTS,
PRISONER_PRISON_CONDITION,
PATENT,
SOCIAL_SECURITY,
]
def get_fjc_rows():
items = FjcIntegratedDatabase.objects.exclude(
nature_of_suit__in=NOS_EXCLUSIONS,
).filter(
date_filed__gte="2014-01-01", dataset_source__in=[CV_2017, CV_2020]
)
return items
def get_everything_sample(options, sample_size):
items = get_fjc_rows()
tags = [TAG, TAG_SAMPLE]
get_dockets(options, items, tags, sample_size)
def price_sample(options, de_upper_bound):
items = get_fjc_rows()
tags = [TAG, TAG_SAMPLE]
get_dockets(
options, items, tags, sample_size=50, doc_num_end=de_upper_bound
)
def get_content_by_year(options, year):
items = get_fjc_rows()
start = f"{year}-01-01"
end = f"{year}-12-31"
items = items.filter(date_filed__gte=start, date_filed__lte=end)
tags = [TAG]
get_dockets(options, items, tags)
def get_everything_full(options):
items = get_fjc_rows()
tags = [TAG]
get_dockets(options, items, tags)
def get_dockets(options, items, tags, sample_size=0, doc_num_end=""):
"""Download dockets from PACER.
:param options: Options provided by argparse
:param items: Items from our FJC IDB database
:param tags: A list of tag names to associate with the purchased content.
:param sample_size: The number of items to get. If 0, get them all. Else,
get only this many and do it randomly.
:param doc_num_end: Only get docket numbers up to this value to constrain
costs. If set to an empty string, no constraints are applied. Note that
applying this value means no unnumbered entries will be retrieved by PACER.
"""
if sample_size > 0:
items = items.order_by("?")[:sample_size]
q = options["queue"]
throttle = CeleryThrottle(queue_name=q)
session = PacerSession(username=PACER_USERNAME, password=PACER_PASSWORD)
session.login()
for i, row in enumerate(items):
if i < options["offset"]:
continue
if i >= options["limit"] > 0:
break
if i % 5000 == 0:
# Re-authenticate just in case the auto-login mechanism isn't
# working.
session = PacerSession(
username=PACER_USERNAME, password=PACER_PASSWORD
)
session.login()
# All tests pass. Get the docket.
logger.info("Doing row %s: %s", i, row)
throttle.maybe_wait()
params = make_fjc_idb_lookup_params(row)
chain(
get_pacer_case_id_and_title.s(
pass_through=None,
docket_number=row.docket_number,
court_id=row.district_id,
cookies=session.cookies,
**params,
).set(queue=q),
filter_docket_by_tags.s(tags, row.district_id).set(queue=q),
get_docket_by_pacer_case_id.s(
court_id=row.district_id,
cookies=session.cookies,
tag_names=tags,
**{
"show_parties_and_counsel": True,
"show_terminated_parties": True,
"show_list_of_member_cases": False,
"doc_num_end": doc_num_end,
},
).set(queue=q),
add_or_update_recap_docket.s().set(queue=q),
).apply_async()
class Command(VerboseCommand):
help = "Purchase dockets from PACER"
def METHOD_NAME(self, parser):
parser.add_argument(
"--queue",
default="batch1",
help="The celery queue where the tasks should be processed.",
)
parser.add_argument(
"--offset",
type=int,
default=0,
help="The number of items to skip before beginning. Default is to "
"skip none.",
)
parser.add_argument(
"--limit",
type=int,
default=0,
help="After doing this number, stop. This number is not additive "
"with the offset parameter. Default is to do all of them.",
)
parser.add_argument(
"--task",
type=str,
required=True,
help="What task are we doing at this point?",
)
def handle(self, *args, **options):
logger.info(f"Using PACER username: {PACER_USERNAME}")
if options["task"] == "everything":
get_everything_full(options)
elif options["task"] == "everything_sample_50":
get_everything_sample(options, 50)
elif options["task"] == "everything_sample_10000":
# See email dated 2019-01-06
get_everything_sample(options, 10000)
elif options["task"] == "price_sample_30":
price_sample(options, "30")
elif options["task"] == "price_sample_40":
price_sample(options, "40")
elif options["task"] == "price_sample_50":
price_sample(options, "50")
elif options["task"] == "2018_only":
# Goes through to 2019-09-30
get_content_by_year(options, 2018)
elif options["task"] == "2017_only":
# Done and billed.
get_content_by_year(options, 2017)
elif options["task"] == "2016_only":
# Done and billed.
get_content_by_year(options, 2016)
else:
print(f"Unknown task: {options['task']}") | null |
1,080 | # -*- coding: utf-8 -*-
from __future__ import print_function
import time, re
from acq4.util import Qt
from .CanvasItem import CanvasItem
import acq4.Manager
import pyqtgraph as pg
import numpy as np
from .MarkersCanvasItem import MarkersCanvasItem
from .itemtypes import registerItemType
class MultiPatchLogCanvasItem(CanvasItem):
"""For displaying events recorded in a MultiPatch log file.
"""
_typeName = "Multipatch Log"
def __init__(self, handle, **kwds):
kwds.pop('viewRect', None)
self.data = handle.read()
self.groupitem = pg.ItemGroup()
self.pipettes = {}
for dev in self.data.devices():
arrow = pg.ArrowItem()
self.pipettes[dev] = arrow
arrow.setParentItem(self.groupitem)
opts = {'movable': False, 'rotatable': False, 'handle': handle}
opts.update(kwds)
if opts.get('name') is None:
opts['name'] = handle.shortName()
CanvasItem.__init__(self, self.groupitem, **opts)
self._timeSliderResolution = 10. # 10 ticks per second on the time slider
self._mpCtrlWidget = MultiPatchLogCtrlWidget()
self.layout.addWidget(self._mpCtrlWidget, self.layout.rowCount(), 0, 1, 2)
self._mpCtrlWidget.timeSlider.setMaximum(self._timeSliderResolution * (self.data.lastTime() - self.data.firstTime()))
self._mpCtrlWidget.timeSlider.valueChanged.connect(self.timeSliderChanged)
self._mpCtrlWidget.createMarkersBtn.clicked.connect(self.createMarkersClicked)
self.timeSliderChanged(0)
def timeSliderChanged(self, v):
t = self.currentTime()
pos = self.data.state(t)
for dev,arrow in self.pipettes.items():
p = pos.get(dev, {'position':None})['position']
if p is None:
arrow.hide()
else:
arrow.show()
arrow.setPos(*p[:2])
if t < 1e7:
# looks like a relative time
h = int(t / 3600.)
m = int((t % 3600) / 60.)
s = t % 60
tstr = "%d:%02d:%0.1f" % (h, m, s)
else:
# looks like a timestamp
tt = time.localtime(t)
tstr = time.strftime("%Y-%m-%d %H:%M:%S", tt)
self._mpCtrlWidget.timeLabel.setText(tstr)
def currentTime(self):
v = self._mpCtrlWidget.timeSlider.value()
return (v / self._timeSliderResolution) + self.data.firstTime()
def setCurrentTime(self, t):
self._mpCtrlWidget.timeSlider.setValue(self._timeSliderResolution * (t - self.data.firstTime()))
def createMarkersClicked(self):
fmt = str(self._mpCtrlWidget.createMarkersFormat.text())
# get name and position of each new marker
state = self.data.state(self.currentTime())
pips = []
for k,v in state.items():
if v.get('position') is None:
continue
# Extract marker number from pipette name
m = re.match(r'\D+(\d+)', k)
if m is not None:
n = int(m.group(1))
name = fmt % n
else:
name = k
pips.append((name, v['position']))
pips.sort()
# create new canvas item and add markers
markers = MarkersCanvasItem(name=self.name + '_markers')
for name, pos in pips:
markers.addMarker(name=name, position=pos)
self.canvas.addItem(markers)
@classmethod
def checkFile(cls, fh):
name = fh.shortName()
if name.startswith('MultiPatch_') and name.endswith('.log'):
return 10
else:
return 0
def METHOD_NAME(self, **kwds):
state = CanvasItem.METHOD_NAME(self, **kwds)
state['currentTime'] = self.currentTime()
return state
def restoreState(self, state):
self.setCurrentTime(state.pop('currentTime'))
CanvasItem.restoreState(self, state)
registerItemType(MultiPatchLogCanvasItem)
class MultiPatchLogCtrlWidget(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
self.layout = Qt.QGridLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.timeSlider = Qt.QSlider()
self.layout.addWidget(self.timeSlider, 0, 0)
self.timeSlider.setOrientation(Qt.Qt.Horizontal)
self.timeSlider.setMinimum(0)
self.timeLabel = Qt.QLabel()
self.layout.addWidget(self.timeLabel, 0, 1)
self.createMarkersBtn = Qt.QPushButton('Create markers')
self.layout.addWidget(self.createMarkersBtn, 1, 0)
self.createMarkersFormat = Qt.QLineEdit("Cell_%02d")
self.layout.addWidget(self.createMarkersFormat, 1, 1) | null |
1,081 | from pathlib import Path
import httpx
import pytest
import yaml
from ..adapters.mapping import MapAdapter
from ..client import Context, from_context, from_profile, record_history
from ..config import ConfigError
from ..profiles import load_profiles, paths
from ..queries import Key
from ..server.app import build_app
from .utils import fail_with_status_code
tree = MapAdapter({})
def test_configurable_timeout():
with Context.from_app(build_app(tree), timeout=httpx.Timeout(17)) as context:
assert context.http_client.timeout.connect == 17
assert context.http_client.timeout.read == 17
def test_client_version_check():
with Context.from_app(build_app(tree)) as context:
client = from_context(context)
# Too-old user agent should generate a 400.
context.http_client.headers["user-agent"] = "python-tiled/0.1.0a77"
with fail_with_status_code(400):
list(client)
# Gibberish user agent should generate a 400.
context.http_client.headers["user-agent"] = "python-tiled/gibberish"
with fail_with_status_code(400):
list(client)
def test_direct(tmpdir):
profile_content = {
"test": {
"structure_clients": "dask",
"direct": {
"trees": [
{"path": "/", "tree": "tiled.examples.generated_minimal:tree"}
]
},
}
}
with open(tmpdir / "example.yml", "w") as file:
file.write(yaml.dump(profile_content))
profile_dir = Path(tmpdir)
try:
paths.append(profile_dir)
load_profiles.cache_clear()
from_profile("test")
finally:
paths.remove(profile_dir)
def test_direct_config_error(tmpdir):
profile_content = {
"test": {
"direct": {
# Intentional config mistake!
# Value of trees must be a list.
"trees": {"path": "/", "tree": "tiled.examples.generated_minimal:tree"}
}
}
}
with open(tmpdir / "example.yml", "w") as file:
file.write(yaml.dump(profile_content))
profile_dir = Path(tmpdir)
try:
paths.append(profile_dir)
load_profiles.cache_clear()
with pytest.raises(ConfigError):
from_profile("test")
finally:
paths.remove(profile_dir)
def METHOD_NAME():
tree = MapAdapter({}, metadata={"number": 1})
for number, letter in enumerate(list("abcde"), start=2):
tree = MapAdapter({letter: tree}, metadata={"number": number})
with Context.from_app(build_app(tree)) as context:
client = from_context(context)
assert (
client["e"]["d"]["c"]["b"]["a"].metadata["number"]
== client["e", "d", "c", "b", "a"].metadata["number"]
== 1
)
assert (
client["e"]["d"]["c"]["b"].metadata["number"]
== client["e", "d", "c", "b"].metadata["number"]
== 2
)
assert (
client["e"]["d"]["c"].metadata["number"]
== client["e", "d", "c"].metadata["number"]
== 3
)
assert (
client["e"]["d"].metadata["number"] == client["e", "d"].metadata["number"] == 4
)
assert client["e"]["d", "c", "b"]["a"].metadata["number"] == 1
assert client["e"]["d", "c", "b", "a"].metadata["number"] == 1
assert client["e", "d", "c", "b"]["a"].metadata["number"] == 1
assert (
client.search(Key("number") == 5)["e", "d", "c", "b", "a"].metadata["number"]
== 1
)
assert (
client["e"].search(Key("number") == 4)["d", "c", "b", "a"].metadata["number"]
== 1
)
# Check that a reasonable KeyError is raised.
# Notice that we do not binary search to find _exactly_ where the problem is.
with pytest.raises(KeyError) as exc_info:
client["e", "d", "c", "b"]["X"]
assert exc_info.value.args[0] == "X"
with pytest.raises(KeyError) as exc_info:
client["e", "d", "c", "b", "X"]
assert exc_info.value.args[0] == ("e", "d", "c", "b", "X")
with pytest.raises(KeyError) as exc_info:
client["e", "d", "X", "b", "a"]
assert exc_info.value.args[0] == ("e", "d", "X", "b", "a")
# Check that jumping raises if a key along the path is not in the search
# resuts.
with pytest.raises(KeyError) as exc_info:
client.search(Key("number") == 4)["e"]
assert exc_info.value.args[0] == "e"
with pytest.raises(KeyError) as exc_info:
client.search(Key("number") == 4)["e", "d", "c", "b", "a"]
assert exc_info.value.args[0] == "e"
with pytest.raises(KeyError) as exc_info:
client["e"].search(Key("number") == 3)["d"]
assert exc_info.value.args[0] == "d"
with pytest.raises(KeyError) as exc_info:
client["e"].search(Key("number") == 3)["d", "c", "b", "a"]
assert exc_info.value.args[0] == "d"
with record_history() as h:
client["e", "d", "c", "b", "a"]
assert len(h.requests) == 1
with record_history() as h:
client["e"]["d"]["c"]["b"]["a"]
assert len(h.requests) == 5 | null |
1,082 | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABCMeta, abstractmethod
from typing import Optional, List
from ...isas import ISA
from m5.objects import BaseMMU, Port, SubSystem, PcCountTrackerManager
from m5.params import PcCountPair
class AbstractCore(SubSystem):
__metaclass__ = ABCMeta
def __init__(self):
super().__init__()
@abstractmethod
def get_isa(self) -> ISA:
raise NotImplementedError
@abstractmethod
def requires_send_evicts(self) -> bool:
"""True if the CPU model or ISA requires sending evictions from caches
to the CPU. Scenarios warrant forwarding evictions to the CPU:
1. The O3 model must keep the LSQ coherent with the caches
2. The x86 mwait instruction is built on top of coherence
3. The local exclusive monitor in ARM systems
"""
return False
@abstractmethod
def is_kvm_core(self) -> bool:
"""
KVM cores need setup differently than other cores. Frequently it's
useful to know whether a core is a KVM core or not. This function helps
with this.
"""
raise NotImplementedError
@abstractmethod
def connect_icache(self, port: Port) -> None:
"""
This function should connect the response port from the instruction
cache to the right request port on the core.
:param port: The response port from the icache to connect to.
"""
raise NotImplementedError
@abstractmethod
def connect_dcache(self, port: Port) -> None:
"""
This function should connect the response port from the data cache to
the right request port on the core.
:param port: The response port from the icache to connect to.
"""
raise NotImplementedError
@abstractmethod
def connect_walker_ports(self, port1: Port, port2: Port) -> None:
"""
Connect the response port from itb and dtb to their respective request
ports in the core.
:param port1: The response port from itb walker to connect to.
:param port2: The response port from dtb walker to connect to.
"""
raise NotImplementedError
@abstractmethod
def set_workload(self, process: "Process") -> None:
raise NotImplementedError
@abstractmethod
def set_switched_out(self, value: bool) -> None:
raise NotImplementedError
@abstractmethod
def connect_interrupt(
self,
interrupt_requestor: Optional[Port] = None,
interrupt_responce: Optional[Port] = None,
) -> None:
"""Connect the core interrupts to the interrupt controller
This function is usually called from the cache hierarchy since the
optional ports can be implemented as cache ports.
"""
raise NotImplementedError
@abstractmethod
def get_mmu(self) -> BaseMMU:
"""Return the MMU for this core.
This is used in the board to setup system-specific MMU settings.
"""
raise NotImplementedError
@abstractmethod
def METHOD_NAME(
self, inst_starts: List[int], board_initialized: bool
) -> None:
"""Schedule simpoint exit events for the core.
This is used to raise SIMPOINT_BEGIN exit events in the gem5 standard
library. This is called through the set_workload functions and should
not be called directly. Duplicate instruction counts in the inst_starts list will not
be scheduled.
:param inst_starts: a list of SimPoints starting instructions
:param board_initialized: True if the board has already been
initialized, otherwise False. This parameter is necessary as simpoints
are setup differently dependent on this.
"""
raise NotImplementedError("This core type does not support simpoints")
@abstractmethod
def _set_inst_stop_any_thread(
self, inst: int, board_initialized: bool
) -> None:
"""Schedule an exit event when any thread in this core reaches the
given number of instructions. This is called through the simulator
module and should not be called directly.
This is used to raise MAX_INSTS exit event in the gem5 standard library
:param inst: a number of instructions
:param board_initialized: True if the board has already been
initialized, otherwise False. This parameter is necessary as the
instruction stop is setup differently dependent on this.
"""
raise NotImplementedError("This core type does not support MAX_INSTS")
@abstractmethod
def add_pc_tracker_probe(
self, target_pair: List[PcCountPair], manager: PcCountTrackerManager
) -> None:
raise NotImplementedError | null |
1,083 | # @file Edk2BinaryBuild.py
# This module contains code that supports building of binary files
# This is the main entry for the build and test process of binary builds
##
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import sys
import logging
from edk2toolext.environment import plugin_manager
from edk2toolext.environment.plugintypes.uefi_helper_plugin import HelperFunctions
from edk2toolext.edk2_invocable import Edk2Invocable
from edk2toolext.environment import self_describing_environment
from edk2toolext.environment import shell_environment
from edk2toolext.environment.uefi_build import UefiBuilder
from edk2toolext import edk2_logging
# import pkg_resources
import DriverBuilder # this is a little weird
class BinaryBuildSettingsManager():
''' Platform settings will be accessed through this implementation. '''
def GetActiveScopes(self):
''' get scope '''
raise NotImplementedError()
def GetWorkspaceRoot(self):
''' get WorkspacePath '''
raise NotImplementedError()
def GetPackagesPath(self):
pass
def GetConfigurations(self):
'''
Gets the next configuration of this run
This is a generator pattern - use yield
'''
raise NotImplementedError()
def METHOD_NAME(self):
''' Called after the before the first build '''
return 0
def PostFinalBuildHook(self, ret):
''' Called after the final build with the summed return code '''
return 0
def PostBuildHook(self, ret):
''' Called after each build with the return code '''
return 0
def PreBuildHook(self):
''' Called before each build '''
return 0
def GetName(self):
''' Get the name of the repo, platform, or product being build by CI '''
raise NotImplementedError()
def AddCommandLineOptions(self, parserObj):
''' Implement in subclass to add command line options to the argparser '''
pass
def RetrieveCommandLineOptions(self, args):
''' Implement in subclass to retrieve command line options from the argparser '''
pass
class Edk2BinaryBuild(Edk2Invocable):
def GetLoggingLevel(self, loggerType):
''' Get the logging level for a given type
base == lowest logging level supported
con == Screen logging
txt == plain text file logging
md == markdown file logging
'''
if(loggerType == "con") and not self.Verbose:
return logging.WARNING
return logging.DEBUG
def AddCommandLineOptions(self, parser):
pass
def RetrieveCommandLineOptions(self, args):
''' Retrieve command line options from the argparser '''
pass
def GetSettingsClass(self):
return BinaryBuildSettingsManager
def GetLoggingFileName(self, loggerType):
return "BINARY_BUILDLOG"
def Go(self):
ret = 0
env = shell_environment.GetBuildVars()
env.SetValue("PRODUCT_NAME",
self.PlatformSettings.GetName(), "Platform Hardcoded")
env.SetValue("BLD_*_BUILDID_STRING", "201905", "Current Version")
env.SetValue("BUILDREPORTING", "TRUE", "Platform Hardcoded")
env.SetValue("BUILDREPORT_TYPES",
'PCD DEPEX LIBRARY BUILD_FLAGS', "Platform Hardcoded")
# make sure python_command is set
python_command = sys.executable
if " "in python_command:
python_command = '"' + python_command + '"'
shell_environment.GetEnvironment().set_shell_var("PYTHON_COMMAND", python_command)
# Run pre build hook
ret += self.PlatformSettings.METHOD_NAME()
ws = self.GetWorkspaceRoot()
pp = self.PlatformSettings.GetModulePkgsPath()
# run each configuration
ret = 0
try:
for config in self.PlatformSettings.GetConfigurations():
pre_ret = self.PlatformSettings.PreBuildHook() # run pre build hook
if pre_ret != 0:
ret = pre_ret
raise RuntimeError("We failed in prebuild hook")
edk2_logging.log_progress(f"--Running next configuration--")
logging.info(config)
shell_environment.CheckpointBuildVars() # checkpoint our config
env = shell_environment.GetBuildVars()
# go through the config and apply to environement
for key in config:
env.SetValue(key, config[key], "provided by configuration")
# make sure to set this after in case the config did
env.SetValue("TOOL_CHAIN_TAG", "VS2017", "provided by builder")
platformBuilder = UefiBuilder() # create our builder
build_ret = platformBuilder.Go(ws, pp, self.helper, self.plugin_manager)
# we always want to run the post build hook
post_ret = self.PlatformSettings.PostBuildHook(ret)
if build_ret != 0:
ret = build_ret
raise RuntimeError("We failed in build")
if post_ret != 0:
ret = post_ret
raise RuntimeError("We failed in postbuild hook")
shell_environment.RevertBuildVars()
except RuntimeError:
pass
finally:
# make sure to do our final build hook
self.PlatformSettings.PostFinalBuildHook(ret)
return ret
def main():
Edk2BinaryBuild().Invoke()
if __name__ == "__main__":
DriverBuilder.main() # otherwise we're in __main__ context | null |
1,084 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class ModifyForwardEntryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ModifyForwardEntry','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_ForwardTableId(self): # String
return self.get_query_params().get('ForwardTableId')
def set_ForwardTableId(self, ForwardTableId): # String
self.add_query_param('ForwardTableId', ForwardTableId)
def get_InternalIp(self): # String
return self.get_query_params().get('InternalIp')
def set_InternalIp(self, InternalIp): # String
self.add_query_param('InternalIp', InternalIp)
def get_ForwardEntryId(self): # String
return self.get_query_params().get('ForwardEntryId')
def set_ForwardEntryId(self, ForwardEntryId): # String
self.add_query_param('ForwardEntryId', ForwardEntryId)
def get_ExternalIp(self): # String
return self.get_query_params().get('ExternalIp')
def set_ExternalIp(self, ExternalIp): # String
self.add_query_param('ExternalIp', ExternalIp)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_IpProtocol(self): # String
return self.get_query_params().get('IpProtocol')
def set_IpProtocol(self, IpProtocol): # String
self.add_query_param('IpProtocol', IpProtocol)
def get_ForwardEntryName(self): # String
return self.get_query_params().get('ForwardEntryName')
def set_ForwardEntryName(self, ForwardEntryName): # String
self.add_query_param('ForwardEntryName', ForwardEntryName)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InternalPort(self): # String
return self.get_query_params().get('InternalPort')
def set_InternalPort(self, InternalPort): # String
self.add_query_param('InternalPort', InternalPort)
def get_PortBreak(self): # Boolean
return self.get_query_params().get('PortBreak')
def METHOD_NAME(self, PortBreak): # Boolean
self.add_query_param('PortBreak', PortBreak)
def get_ExternalPort(self): # String
return self.get_query_params().get('ExternalPort')
def set_ExternalPort(self, ExternalPort): # String
self.add_query_param('ExternalPort', ExternalPort) | null |
1,085 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class UpdateTrafficMirrorFilterRuleAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'UpdateTrafficMirrorFilterRuleAttribute','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SourcePortRange(self): # String
return self.get_query_params().get('SourcePortRange')
def set_SourcePortRange(self, SourcePortRange): # String
self.add_query_param('SourcePortRange', SourcePortRange)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DestinationPortRange(self): # String
return self.get_query_params().get('DestinationPortRange')
def set_DestinationPortRange(self, DestinationPortRange): # String
self.add_query_param('DestinationPortRange', DestinationPortRange)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_RuleAction(self): # String
return self.get_query_params().get('RuleAction')
def set_RuleAction(self, RuleAction): # String
self.add_query_param('RuleAction', RuleAction)
def get_Protocol(self): # String
return self.get_query_params().get('Protocol')
def set_Protocol(self, Protocol): # String
self.add_query_param('Protocol', Protocol)
def get_SourceCidrBlock(self): # String
return self.get_query_params().get('SourceCidrBlock')
def set_SourceCidrBlock(self, SourceCidrBlock): # String
self.add_query_param('SourceCidrBlock', SourceCidrBlock)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def METHOD_NAME(self): # String
return self.get_query_params().get('DestinationCidrBlock')
def set_DestinationCidrBlock(self, DestinationCidrBlock): # String
self.add_query_param('DestinationCidrBlock', DestinationCidrBlock)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_Priority(self): # Integer
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # Integer
self.add_query_param('Priority', Priority)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TrafficMirrorFilterRuleId(self): # String
return self.get_query_params().get('TrafficMirrorFilterRuleId')
def set_TrafficMirrorFilterRuleId(self, TrafficMirrorFilterRuleId): # String
self.add_query_param('TrafficMirrorFilterRuleId', TrafficMirrorFilterRuleId) | null |
1,086 | ## @file
# process FV image section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
from . import Section
from io import BytesIO
from .Ffs import SectionSuffix
import subprocess
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import Common.LongFilePathOs as os
from CommonDataClass.FdfClass import FvImageSectionClassObject
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common import EdkLogger
from Common.BuildToolError import *
from Common.DataType import *
## generate FV image section
#
#
class FvImageSection(FvImageSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FvImageSectionClassObject.__init__(self)
## GenSection() method
#
# Generate FV image section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def METHOD_NAME(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = None, IsMakefile = False):
OutputFileList = []
if Dict is None:
Dict = {}
if self.FvFileType is not None:
FileList, IsSect = Section.Section.GetFileList(FfsInf, self.FvFileType, self.FvFileExtension)
if IsSect :
return FileList, self.Alignment
Num = SecNum
MaxFvAlignment = 0
for FvFileName in FileList:
FvAlignmentValue = 0
if os.path.isfile(FvFileName):
FvFileObj = open (FvFileName, 'rb')
FvFileObj.seek(0)
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
# FV alignment position.
if isinstance(FvHeaderBuffer[0x2E], str):
FvAlignmentValue = 1 << (ord(FvHeaderBuffer[0x2E]) & 0x1F)
else:
FvAlignmentValue = 1 << (FvHeaderBuffer[0x2E] & 0x1F)
FvFileObj.close()
if FvAlignmentValue > MaxFvAlignment:
MaxFvAlignment = FvAlignmentValue
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get("FV_IMAGE"))
GenFdsGlobalVariable.GenerateSection(OutputFile, [FvFileName], 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE', IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
# MaxFvAlignment is larger than or equal to 1K
if MaxFvAlignment >= 0x400:
if MaxFvAlignment >= 0x100000:
#The max alignment supported by FFS is 16M.
if MaxFvAlignment >= 0x1000000:
self.Alignment = "16M"
else:
self.Alignment = str(MaxFvAlignment // 0x100000) + "M"
else:
self.Alignment = str (MaxFvAlignment // 0x400) + "K"
else:
# MaxFvAlignment is less than 1K
self.Alignment = str (MaxFvAlignment)
return OutputFileList, self.Alignment
#
# Generate Fv
#
if self.FvName is not None:
Buffer = BytesIO()
Fv = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(self.FvName)
if Fv is not None:
self.Fv = Fv
if not self.FvAddr and self.Fv.BaseAddress:
self.FvAddr = self.Fv.BaseAddress
FvFileName = Fv.AddToBuffer(Buffer, self.FvAddr, MacroDict = Dict, Flag=IsMakefile)
if Fv.FvAlignment is not None:
if self.Alignment is None:
self.Alignment = Fv.FvAlignment
else:
if GenFdsGlobalVariable.GetAlignment (Fv.FvAlignment) > GenFdsGlobalVariable.GetAlignment (self.Alignment):
self.Alignment = Fv.FvAlignment
else:
if self.FvFileName is not None:
FvFileName = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FvFileName)
if os.path.isfile(FvFileName):
FvFileObj = open (FvFileName, 'rb')
FvFileObj.seek(0)
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
# FV alignment position.
if isinstance(FvHeaderBuffer[0x2E], str):
FvAlignmentValue = 1 << (ord(FvHeaderBuffer[0x2E]) & 0x1F)
else:
FvAlignmentValue = 1 << (FvHeaderBuffer[0x2E] & 0x1F)
# FvAlignmentValue is larger than or equal to 1K
if FvAlignmentValue >= 0x400:
if FvAlignmentValue >= 0x100000:
#The max alignment supported by FFS is 16M.
if FvAlignmentValue >= 0x1000000:
self.Alignment = "16M"
else:
self.Alignment = str(FvAlignmentValue // 0x100000) + "M"
else:
self.Alignment = str (FvAlignmentValue // 0x400) + "K"
else:
# FvAlignmentValue is less than 1K
self.Alignment = str (FvAlignmentValue)
FvFileObj.close()
else:
if len (mws.getPkgPath()) == 0:
EdkLogger.error("GenFds", FILE_NOT_FOUND, "%s is not found in WORKSPACE: %s" % self.FvFileName, GenFdsGlobalVariable.WorkSpaceDir)
else:
EdkLogger.error("GenFds", FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.FvFileName, '\n\t'.join(mws.getPkgPath())))
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "FvImageSection Failed! %s NOT found in FDF" % self.FvName)
#
# Prepare the parameter of GenSection
#
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + SecNum + SectionSuffix.get("FV_IMAGE"))
GenFdsGlobalVariable.GenerateSection(OutputFile, [FvFileName], 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE', IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment | null |
1,087 | '''
Copyright (C) 2017-2023 Bryant Moscon - [email protected]
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import asyncio
from collections import defaultdict
from cryptofeed.symbols import Symbol, str_to_symbol
import logging
import time
from decimal import Decimal
from typing import Dict, Tuple
from yapic import json
from cryptofeed.connection import AsyncConnection, RestEndpoint, Routes, WebsocketEndpoint
from cryptofeed.defines import HUOBI_SWAP, FUNDING, PERPETUAL
from cryptofeed.exchanges.huobi_dm import HuobiDM
from cryptofeed.types import Funding
LOG = logging.getLogger('feedhandler')
class HuobiSwap(HuobiDM):
id = HUOBI_SWAP
websocket_endpoints = [
WebsocketEndpoint('wss://api.hbdm.com/swap-ws', instrument_filter=('QUOTE', ('USD',))),
WebsocketEndpoint('wss://api.hbdm.com/linear-swap-ws', instrument_filter=('QUOTE', ('USDT',)))
]
rest_endpoints = [
RestEndpoint('https://api.hbdm.com', routes=Routes('/swap-api/v1/swap_contract_info', funding='/swap-api/v1/swap_funding_rate?contract_code={}'), instrument_filter=('QUOTE', ('USD',))),
RestEndpoint('https://api.hbdm.com', routes=Routes('/linear-swap-api/v1/swap_contract_info', funding='/linear-swap-api/v1/swap_funding_rate?contract_code={}'), instrument_filter=('QUOTE', ('USDT',)))
]
websocket_channels = {
**HuobiDM.websocket_channels,
FUNDING: 'funding'
}
@classmethod
def METHOD_NAME(cls, data: dict) -> Tuple[Dict, Dict]:
ret = {}
info = defaultdict(dict)
for d in data:
for e in d['data']:
base, quote = e['contract_code'].split("-")
# Perpetual futures contract == perpetual swap
s = Symbol(base, quote, type=PERPETUAL)
ret[s.normalized] = e['contract_code']
info['tick_size'][s.normalized] = e['price_tick']
info['instrument_type'][s.normalized] = s.type
return ret, info
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.funding_updates = {}
async def _funding(self, pairs):
"""
{
"status": "ok",
"data": {
"estimated_rate": "0.000100000000000000",
"funding_rate": "-0.000362360011416593",
"contract_code": "BTC-USD",
"symbol": "BTC",
"fee_asset": "BTC",
"funding_time": "1603872000000",
"next_funding_time": "1603900800000"
},
"ts": 1603866304635
}
"""
while True:
for pair in pairs:
# use symbol to look up correct endpoint
sym = str_to_symbol(self.exchange_symbol_to_std_symbol(pair))
endpoint = None
for ep in self.rest_endpoints:
if sym.quote in ep.instrument_filter[1]:
endpoint = self.rest_endpoints[0].route('funding').format(pair)
data = await self.http_conn.read(endpoint)
data = json.loads(data, parse_float=Decimal)
received = time.time()
update = (data['data']['funding_rate'], self.timestamp_normalize(int(data['data']['next_funding_time'])))
if pair in self.funding_updates and self.funding_updates[pair] == update:
await asyncio.sleep(1)
continue
self.funding_updates[pair] = update
f = Funding(
self.id,
self.exchange_symbol_to_std_symbol(pair),
None,
Decimal(data['data']['funding_rate']),
self.timestamp_normalize(int(data['data']['next_funding_time'])),
self.timestamp_normalize(int(data['data']['funding_time'])),
predicted_rate=Decimal(data['data']['estimated_rate']),
raw=data
)
await self.callback(FUNDING, f, received)
await asyncio.sleep(0.1)
async def subscribe(self, conn: AsyncConnection):
if FUNDING in self.subscription:
loop = asyncio.get_event_loop()
loop.create_task(self._funding(self.subscription[FUNDING]))
await super().subscribe(conn) | null |
1,088 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeInvocationResultsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeInvocationResults','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CommandId(self): # String
return self.get_query_params().get('CommandId')
def set_CommandId(self, CommandId): # String
self.add_query_param('CommandId', CommandId)
def get_PageNumber(self): # Long
return self.get_query_params().get('PageNumber')
def METHOD_NAME(self, PageNumber): # Long
self.add_query_param('PageNumber', PageNumber)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_ContentEncoding(self): # String
return self.get_query_params().get('ContentEncoding')
def set_ContentEncoding(self, ContentEncoding): # String
self.add_query_param('ContentEncoding', ContentEncoding)
def get_PageSize(self): # Long
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Long
self.add_query_param('PageSize', PageSize)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_InvokeId(self): # String
return self.get_query_params().get('InvokeId')
def set_InvokeId(self, InvokeId): # String
self.add_query_param('InvokeId', InvokeId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_InvokeRecordStatus(self): # String
return self.get_query_params().get('InvokeRecordStatus')
def set_InvokeRecordStatus(self, InvokeRecordStatus): # String
self.add_query_param('InvokeRecordStatus', InvokeRecordStatus)
def get_IncludeHistory(self): # Boolean
return self.get_query_params().get('IncludeHistory')
def set_IncludeHistory(self, IncludeHistory): # Boolean
self.add_query_param('IncludeHistory', IncludeHistory) | null |
1,089 | # Copyright 2011 Google Inc. All Rights Reserved.
# Modifications Copyright 2022-2023 Aerleon Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common library for network ports and protocol handling."""
class Error(Exception):
"""Base error class."""
class BadPortValue(Error):
"""Invalid port format."""
class BadPortRange(Error):
"""Out of bounds port range."""
class InvalidRange(Error):
"""Range is not valid (eg, single port)."""
class NotSinglePort(Error):
"""Port range defined instead of a single port."""
class PPP:
"""PPP: [P]ort [P]rotocol [P]airs.
Make port/protocol pairs an object for easy comparisons
"""
def __init__(self, service) -> None:
"""Init for PPP object.
Args:
service: A port/protocol pair as str (eg: '80/tcp', '22-23/tcp') or
a nested service name (eg: 'SSH')
"""
# remove comments (if any)
self.service = service.split('#')[0].strip()
if '/' in self.service:
self.port = self.service.split('/')[0]
self.protocol = self.service.split('/')[1]
self.nested = False
else:
# for nested services
self.nested = True
self.port = None
self.protocol = None
@property
def is_range(self):
if self.port:
return '-' in self.port
else:
return False
@property
def is_single_port(self):
if self.port:
return '-' not in self.port
else:
return False
@property
def start(self):
# return the first port in the range as int
if '-' in self.port:
self._start = int(self.port.split('-')[0])
else:
raise InvalidRange('%s is not a valid port range' % self.port)
return self._start
@property
def METHOD_NAME(self):
# return the last port in the range as int
if '-' in self.port:
self._end = int(self.port.split('-')[1])
else:
raise InvalidRange('%s is not a valid port range' % self.port)
return self._end
def __contains__(self, other):
# determine if a single-port object is within another objects' range
try:
return (
int(self.start) <= int(other.port) <= int(self.METHOD_NAME)
) and self.protocol == other.protocol
except:
raise InvalidRange('%s must be a range' % self.port)
def __lt__(self, other):
if self.is_single_port:
try:
return int(self.port) < int(other.port)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def __gt__(self, other):
if self.is_single_port:
try:
return int(self.port) > int(other.port)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def __le__(self, other):
if self.is_single_port:
try:
return int(self.port) <= int(other.port)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def __ge__(self, other):
if self.is_single_port:
try:
return int(self.port) >= int(other.port)
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def __eq__(self, other):
if self.is_single_port:
try:
return int(self.port) == int(other.port) and self.protocol == other.protocol
except:
return False
else:
raise NotSinglePort('Comparisons cannot be performed on port ranges')
def Port(port):
"""Sanitize a port value.
Args:
port: a port value
Returns:
port: a port value
Raises:
BadPortValue: port is not valid integer or string
BadPortRange: port is outside valid range
"""
pval = -1
try:
pval = int(port)
except ValueError:
raise BadPortValue('port %s is not valid.' % port)
if pval < 0 or pval > 65535:
raise BadPortRange('port %s is out of range 0-65535.' % port)
return pval | null |
1,090 | import tempfile
import pytest
import torch
from lhotse import CutSet
from lhotse.dataset import GlobalMVN, RandomizedSmoothing, SpecAugment
from lhotse.dataset.collation import collate_features
from lhotse.dataset.signal_transforms import DereverbWPE
from lhotse.utils import is_module_available
@pytest.fixture
def global_mvn():
cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json")
return GlobalMVN.from_cuts(cuts)
def test_global_mvn_initialization_and_stats_saving(global_mvn):
with tempfile.NamedTemporaryFile() as tf:
global_mvn.to_file(tf.name)
global_mvn2 = GlobalMVN.from_file(tf.name)
for key_item_1, key_item_2 in zip(
global_mvn.state_dict().items(), global_mvn2.state_dict().items()
):
assert torch.equal(key_item_1[1], key_item_2[1])
@pytest.mark.parametrize("in_tensor", [torch.ones(10, 40), torch.ones(2, 10, 40)])
def test_global_mvn_shapes(global_mvn, in_tensor):
assert global_mvn(in_tensor).shape == in_tensor.shape
assert global_mvn.inverse(in_tensor).shape == in_tensor.shape
@pytest.mark.parametrize("in_tensor", [torch.ones(10, 40), torch.ones(2, 10, 40)])
def test_global_mvn_inverse(global_mvn, in_tensor):
out_tensor = global_mvn(in_tensor)
assert torch.allclose(in_tensor, global_mvn.inverse(out_tensor))
def test_global_mvn_from_cuts():
cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json")
stats1 = GlobalMVN.from_cuts(cuts)
stats2 = GlobalMVN.from_cuts(cuts, max_cuts=1)
assert isinstance(stats1, GlobalMVN)
assert isinstance(stats2, GlobalMVN)
def test_specaugment_2d_input_raises_error():
cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json")
feats = torch.from_numpy(cuts[0].load_features())
tfnm = SpecAugment(p=1.0, time_warp_factor=10)
with pytest.raises(AssertionError):
augmented = tfnm(feats)
assert (feats != augmented).any()
@pytest.mark.parametrize("num_feature_masks", [0, 1, 2])
@pytest.mark.parametrize("num_frame_masks", [1, 2, 3])
def test_specaugment_3d_input_works(num_feature_masks, num_frame_masks):
cuts = CutSet.from_json("test/fixtures/ljspeech/cuts.json")
feats, feat_lens = collate_features(cuts)
tfnm = SpecAugment(
p=1.0,
time_warp_factor=10,
features_mask_size=5,
frames_mask_size=20,
num_feature_masks=num_feature_masks,
num_frame_masks=num_frame_masks,
)
augmented = tfnm(feats)
assert (feats != augmented).any()
def test_specaugment_state_dict():
# all values default
config = dict(
time_warp_factor=80,
num_feature_masks=1,
features_mask_size=13,
num_frame_masks=1,
frames_mask_size=70,
max_frames_mask_fraction=0.2,
p=0.5,
)
specaug = SpecAugment(**config)
state_dict = specaug.state_dict()
for key, value in config.items():
assert state_dict[key] == value
def test_specaugment_load_state_dict():
# all values non-default
config = dict(
time_warp_factor=85,
num_feature_masks=2,
features_mask_size=12,
num_frame_masks=2,
frames_mask_size=71,
max_frames_mask_fraction=0.25,
p=0.6,
)
specaug = SpecAugment()
specaug.load_state_dict(config)
for key, value in config.items():
assert getattr(specaug, key) == value
@pytest.mark.parametrize("sample_sigma", [True, False])
def test_randomized_smoothing(sample_sigma):
audio = torch.zeros(64, 4000, dtype=torch.float32)
tfnm = RandomizedSmoothing(sigma=0.1, sample_sigma=sample_sigma, p=0.8)
audio_aug = tfnm(audio)
# Shapes are the same
assert audio.shape == audio_aug.shape
# All samples are different than the input audio
assert (audio != audio_aug).any()
# Different batch samples receive different augmentation:
# we sum along the time axis and compare the summed values;
# if all examples got the same augmentation,
# there would have been just one unique value.
assert len(set(audio_aug.sum(dim=1).tolist())) > 1
def test_randomized_smoothing_p1():
audio = torch.zeros(64, 4000, dtype=torch.float32)
tfnm = RandomizedSmoothing(sigma=0.1, p=1.0)
audio_aug = tfnm(audio)
# Shapes are the same
assert audio.shape == audio_aug.shape
# Some (most) samples are different than the input audio
assert (audio != audio_aug).any()
# Different batch samples receive different augmentation
assert (audio_aug[0] != audio_aug[1]).any()
def METHOD_NAME():
audio = torch.zeros(64, 4000, dtype=torch.float32)
tfnm = RandomizedSmoothing(sigma=0.1, p=0.0)
audio_aug = tfnm(audio)
# Shapes are the same
assert audio.shape == audio_aug.shape
# Audio is unaffacted
assert (audio == audio_aug).all()
# Audio is unaffacted across batches
assert (audio_aug[0] == audio_aug[1]).all()
def test_randomized_smoothing_schedule():
audio = torch.zeros(16, 16000, dtype=torch.float32)
tfnm = RandomizedSmoothing(sigma=[(0, 0.01), (100, 0.5)], p=0.8)
audio_aug = tfnm(audio)
# Shapes are the same
assert audio.shape == audio_aug.shape
# All samples are different than the input audio
assert (audio != audio_aug).any()
# Different batch samples receive different augmentation:
# we sum along the time axis and compare the summed values;
# if all examples got the same augmentation,
# there would have been just one unique value.
assert len(set(audio_aug.sum(dim=1).tolist())) > 1
tfnm.step = 1000
audio_aug2 = tfnm(audio)
# The schedule kicked in and the abs magnitudes should be larger.
assert audio_aug2.abs().sum() > audio_aug.abs().sum()
@pytest.mark.skipif(
not is_module_available("nara_wpe"), reason="Requires nara_wpe to be installed."
)
def test_wpe_single_channel():
B, T = 16, 32000
audio = torch.randn(B, T, dtype=torch.float32)
tfnm = DereverbWPE()
audio_aug = tfnm(audio)
# Shapes are the same
assert audio.shape == audio_aug.shape
# Some samples are different than the input audio
assert (audio != audio_aug).any()
@pytest.mark.skipif(
not is_module_available("nara_wpe"), reason="Requires nara_wpe to be installed."
)
def test_wpe_multi_channel():
B, D, T = 16, 2, 32000
audio = torch.randn(B, D, T, dtype=torch.float32)
tfnm = DereverbWPE()
audio_aug = tfnm(audio)
# Shapes are the same
assert audio.shape == audio_aug.shape
# Some samples are different than the input audio
assert (audio != audio_aug).any() | null |
1,091 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
class CreateHanaRestoreRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'CreateHanaRestore')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SidAdmin(self): # String
return self.get_query_params().get('SidAdmin')
def set_SidAdmin(self, SidAdmin): # String
self.add_query_param('SidAdmin', SidAdmin)
def get_RecoveryPointInTime(self): # Long
return self.get_query_params().get('RecoveryPointInTime')
def set_RecoveryPointInTime(self, RecoveryPointInTime): # Long
self.add_query_param('RecoveryPointInTime', RecoveryPointInTime)
def get_LogPosition(self): # Long
return self.get_query_params().get('LogPosition')
def set_LogPosition(self, LogPosition): # Long
self.add_query_param('LogPosition', LogPosition)
def get_Source(self): # String
return self.get_query_params().get('Source')
def set_Source(self, Source): # String
self.add_query_param('Source', Source)
def get_ClearLog(self): # Boolean
return self.get_query_params().get('ClearLog')
def set_ClearLog(self, ClearLog): # Boolean
self.add_query_param('ClearLog', ClearLog)
def get_Mode(self): # String
return self.get_query_params().get('Mode')
def set_Mode(self, Mode): # String
self.add_query_param('Mode', Mode)
def get_CheckAccess(self): # Boolean
return self.get_query_params().get('CheckAccess')
def set_CheckAccess(self, CheckAccess): # Boolean
self.add_query_param('CheckAccess', CheckAccess)
def get_BackupId(self): # Long
return self.get_query_params().get('BackupId')
def set_BackupId(self, BackupId): # Long
self.add_query_param('BackupId', BackupId)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_UseDelta(self): # Boolean
return self.get_query_params().get('UseDelta')
def set_UseDelta(self, UseDelta): # Boolean
self.add_query_param('UseDelta', UseDelta)
def get_UseCatalog(self): # Boolean
return self.get_query_params().get('UseCatalog')
def set_UseCatalog(self, UseCatalog): # Boolean
self.add_query_param('UseCatalog', UseCatalog)
def get_BackupPrefix(self): # String
return self.get_query_params().get('BackupPrefix')
def set_BackupPrefix(self, BackupPrefix): # String
self.add_query_param('BackupPrefix', BackupPrefix)
def get_DatabaseName(self): # String
return self.get_query_params().get('DatabaseName')
def METHOD_NAME(self, DatabaseName): # String
self.add_query_param('DatabaseName', DatabaseName)
def get_VolumeId(self): # Integer
return self.get_query_params().get('VolumeId')
def set_VolumeId(self, VolumeId): # Integer
self.add_query_param('VolumeId', VolumeId)
def get_SourceClusterId(self): # String
return self.get_query_params().get('SourceClusterId')
def set_SourceClusterId(self, SourceClusterId): # String
self.add_query_param('SourceClusterId', SourceClusterId)
def get_SystemCopy(self): # Boolean
return self.get_query_params().get('SystemCopy')
def set_SystemCopy(self, SystemCopy): # Boolean
self.add_query_param('SystemCopy', SystemCopy) | null |
1,092 | import math
from logging import getLogger
from simulation.cell import Cell
from simulation.game_logic import SpawnLocationFinder
from simulation.interactables.pickups import ALL_PICKUPS
from simulation.interactables.score_location import ScoreLocation
from simulation.level_settings import DEFAULT_LEVEL_SETTINGS
from simulation.location import Location
from typing import List
LOGGER = getLogger(__name__)
class WorldMap(object):
"""
The non-player world state.
"""
def __init__(self, grid, settings):
"""
:param grid: All types of cells to be inserted into the map.
:param settings: Constant values provided when generating a level/map.
"""
self.grid = grid
self.settings = settings
self._spawn_location_finder = SpawnLocationFinder(self)
@classmethod
def _min_max_from_dimensions(cls, height, width):
"""
The value provided by the user will be an integer both for the width and height
components. We calculate the maximum and minimum dimensions in all directions.
"""
max_x = int(math.floor(width / 2))
min_x = -(width - max_x - 1)
max_y = int(math.floor(height / 2))
METHOD_NAME = -(height - max_y - 1)
return min_x, max_x, METHOD_NAME, max_y
@classmethod
def generate_empty_map(cls, height, width, settings):
new_settings = DEFAULT_LEVEL_SETTINGS.copy()
new_settings.update(settings)
(min_x, max_x, METHOD_NAME, max_y) = WorldMap._min_max_from_dimensions(height, width)
grid = {}
for x in range(min_x, max_x + 1):
for y in range(METHOD_NAME, max_y + 1):
location = Location(x, y)
grid[location] = Cell(location)
return cls(grid, new_settings)
def all_cells(self) -> List[Cell]:
return self.grid.values()
def interactable_cells(self):
return (cell for cell in self.all_cells() if cell.interactable)
def score_cells(self):
return (
cell
for cell in self.all_cells()
if isinstance(cell.interactable, ScoreLocation)
)
def pickup_cells(self):
return (
cell
for cell in self.all_cells()
if isinstance(cell.interactable, ALL_PICKUPS)
)
def is_on_map(self, location):
try:
self.grid[location]
except KeyError:
return False
return True
def get_cell(self, location) -> Cell:
try:
return self.grid[location]
except KeyError:
# For backwards-compatibility, this throws ValueError
raise ValueError("Location %s is not on the map" % location)
def get_cell_by_coords(self, x, y):
return self.get_cell(Location(x, y))
def clear_cell_actions(self, location):
try:
cell = self.get_cell(location)
cell.actions = []
except ValueError:
return
def max_y(self):
return max(self.grid.keys(), key=lambda c: c.y).y
def METHOD_NAME(self):
return min(self.grid.keys(), key=lambda c: c.y).y
def max_x(self):
return max(self.grid.keys(), key=lambda c: c.x).x
def min_x(self):
return min(self.grid.keys(), key=lambda c: c.x).x
@property
def num_rows(self):
return self.max_y() - self.METHOD_NAME() + 1
@property
def num_cols(self):
return self.max_x() - self.min_x() + 1
@property
def num_cells(self):
return self.num_rows * self.num_cols
def can_move_to(self, target_location):
if not self.is_on_map(target_location):
return False
cell = self.get_cell(target_location)
return (
cell.habitable
and (not cell.is_occupied or cell.avatar.is_moving)
and len(cell.moves) <= 1
)
def attackable_avatar(self, target_location):
"""
Return a boolean if the avatar is attackable at the given location (or will be
after next move), else return None.
"""
try:
cell = self.get_cell(target_location)
except ValueError:
return None
if cell.avatar:
return cell.avatar
if len(cell.moves) == 1:
return cell.moves[0].avatar
return None
def get_no_fog_distance(self):
return self.settings["NO_FOG_OF_WAR_DISTANCE"]
def get_partial_fog_distance(self):
return self.settings["PARTIAL_FOG_OF_WAR_DISTANCE"]
def get_random_spawn_location(self):
return self._spawn_location_finder.get_random_spawn_location()
def __repr__(self):
return repr(self.grid)
def __iter__(self):
return (
(
self.get_cell(Location(x, y))
for y in range(self.METHOD_NAME(), self.max_y() + 1)
)
for x in range(self.min_x(), self.max_x() + 1)
)
# Serialisation Utilities
def get_serialized_south_west_corner(self):
"""
Used in serialising the map size when sent to the front end. Very lightweight as
it consists of two integers.
:return: A dictionary with two values, x and y coordinates for the bottom left
(south-west) corner of the map.
"""
return {"x": self.min_x(), "y": self.METHOD_NAME()}
def get_serialized_north_east_corner(self):
"""
Used in serialising the map size when sent to the front end. Very lightweight as
it consists of two integers.
:return: A dictionary with two values, x and y coordinates for the top right
(north-west) corner of the map.
"""
return {"x": self.max_x(), "y": self.max_y()}
def serialize_score_location(self):
"""
Used to serialize the score locations on every update.
:return: A single list that contains all score locations. Within
the list there are x and y coordinates.
"""
def get_coords(cell):
return {"location": {"x": cell.location.x, "y": cell.location.y}}
return [
get_coords(cell)
for cell in self.all_cells()
if isinstance(cell.interactable, ScoreLocation)
]
def serialize_obstacles(self):
"""
Used to serialize the obstacle locations on every update.
:return: A list that contains all the obstacle information generated by inner method.
"""
def serialize_obstacle(cell):
return {
"location": {"x": cell.location.x, "y": cell.location.y},
"width": 1,
"height": 1,
"type": "wall",
"orientation": "north",
}
return [
cell.obstacle.serialize(cell)
for cell in self.all_cells()
if not cell.habitable
]
def WorldMapStaticSpawnDecorator(world_map, spawn_location):
world_map._spawn_location_finder.get_random_spawn_location = lambda: spawn_location
return world_map | null |
1,093 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet50 model."""
# TODO(trandustin): Merge with resnet50.py.
import string
import tensorflow as tf
# Use batch normalization defaults from Pytorch.
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def bottleneck_block(inputs, filters, stage, block, strides):
"""Residual block with 1x1 -> 3x3 -> 1x1 convs in main path.
Note that strides appear in the second conv (3x3) rather than the first (1x1).
This is also known as "ResNet v1.5" as it differs from He et al. (2015)
(http://torch.ch/blog/2016/02/04/resnets.html).
Args:
inputs: tf.Tensor.
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
Returns:
tf.Tensor.
"""
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
filters1,
kernel_size=1,
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(
inputs)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(
x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters2,
kernel_size=3,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2b')(
x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(
x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
filters3,
kernel_size=1,
use_bias=False,
kernel_initializer='he_normal',
name=conv_name_base + '2c')(
x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(
x)
shortcut = inputs
if not x.shape.is_compatible_with(shortcut.shape):
shortcut = tf.keras.layers.Conv2D(
filters3,
kernel_size=1,
use_bias=False,
strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(
shortcut)
shortcut = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(
shortcut)
x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x)
return x
def group(inputs, filters, num_blocks, stage, strides):
blocks = string.ascii_lowercase
x = bottleneck_block(inputs, filters, stage, block=blocks[0], strides=strides)
for i in range(num_blocks - 1):
x = bottleneck_block(x, filters, stage, block=blocks[i + 1], strides=1)
return x
def METHOD_NAME(filters, width_multiplier):
return [round(x * width_multiplier) for x in filters]
def resnet50_resizable_width(input_shape,
num_classes,
width_multiplier=1.,
omit_last_layer=False):
"""Builds width-resized ResNet50 with its width scaled by a factor `width_multiplier`.
Using strided conv, pooling, four groups of residual blocks, and pooling, the
network maps spatial features of size 224x224 -> 112x112 -> 56x56 -> 28x28 ->
14x14 -> 7x7 (Table 1 of He et al. (2015)).
Args:
input_shape: Shape tuple of input excluding batch dimension.
num_classes: Number of output classes.
width_multiplier: Factor used to resize the number of channels at every
stage. If the resized wdith is not an int, the closest int is used.
omit_last_layer: Optional. Omits the last pooling layer if it is to True.
Returns:
tf.keras.Model.
"""
inputs = tf.keras.layers.Input(shape=input_shape)
x = tf.keras.layers.ZeroPadding2D(padding=3, name='conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(
METHOD_NAME([64], width_multiplier)[0],
kernel_size=7,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
name='conv1')(
x)
x = tf.keras.layers.BatchNormalization(
momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name='bn_conv1')(
x)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, padding='same')(x)
x = group(
x,
METHOD_NAME([64, 64, 256], width_multiplier),
stage=2,
num_blocks=3,
strides=1)
x = group(
x,
METHOD_NAME([128, 128, 512], width_multiplier),
stage=3,
num_blocks=4,
strides=2)
x = group(
x,
METHOD_NAME([256, 256, 1024], width_multiplier),
stage=4,
num_blocks=6,
strides=2)
x = group(
x,
METHOD_NAME([512, 512, 2048], width_multiplier),
stage=5,
num_blocks=3,
strides=2)
if omit_last_layer:
return tf.keras.Model(inputs=inputs, outputs=x, name='resnet50')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Dense(
num_classes,
activation=None,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
name='fc1000')(
x)
return tf.keras.Model(inputs=inputs, outputs=x, name='resnet50') | null |
1,094 | #/*##########################################################################
# Copyright (C) 2004-2023 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
__author__ = "V.A. Sole & T. Coutinho - ESRF"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__doc__ = """
This plugin allows to monitor TANGO attributes in a plot window.
It needs PyTango and Taurus installed to be operational.
To use it from withing PyMca, just add this file to your PyMca/plugins folder.
You can also run it as a stand alone script.
"""
import numpy
from PyMca5.PyMcaCore import Plugin1DBase
from PyMca5.PyMcaGui import PyMcaQt as qt
Qt = qt
from taurus import Attribute
from taurus import Release
from taurus.core import TaurusEventType
from taurus.qt.qtcore.taurusqlistener import QObjectTaurusListener
from taurus.qt.qtgui.panel import TaurusModelChooser
class TaurusPlugin1D(Plugin1DBase.Plugin1DBase, QObjectTaurusListener):
def __init__(self, plotWindow, **kw):
Plugin1DBase.Plugin1DBase.__init__(self, plotWindow, **kw)
QObjectTaurusListener.__init__(self)
# "standard" way to handle multiple calls
self.methodDict = {}
text = "Show the device selector.\n"
text += "Make sure your TANGO_HOST\n"
text += "environment variable is set"
function = self._showTaurusTree
info = text
icon = None
self.methodDict["Show"] =[function,
info,
icon]
self._oldModels = []
self._newModels = []
self._widget = None
def METHOD_NAME(self, evt_src, evt_type, evt_value):
if evt_type not in (TaurusEventType.Change,
TaurusEventType.Periodic):
return
y = evt_value.value
x = numpy.arange(y.shape[0])
self.addCurve(x, y, legend=evt_src.getNormalName())
def onSelectionChanged(self, models):
if self._oldModels in [None, []]:
self._attrDict = {}
for model in models:
try:
attr = Attribute(model)
except Exception:
# old PyTango versions do not handle unicode
attr = Attribute(str(model))
#force a read -> attr.read()
attr.addListener(self)
legend = qt.safe_str(attr.getNormalName())
self._attrDict[legend] = attr
self._oldModels = models
else:
keptModels = []
newModels = []
for model in models:
if model in self._oldModels:
keptModels.append(model)
else:
newModels.append(model)
for model in self._oldModels:
if model not in keptModels:
attr = Attribute(model)
attr.removeListener(self)
legend = qt.safe_str(attr.getNormalName())
if legend in self._attrDict:
del self._attrDict[legend]
print("Trying to remove ", legend)
self.removeCurve(legend, replot=False)
for model in newModels:
attr = Attribute(model)
# attr.read()
attr.addListener(self)
legend = qt.safe_str(attr.getNormalName())
self._attrDict[legend] = attr
self._oldModels = keptModels + newModels
#Methods to be implemented by the plugin
# I should put this mechanism in the base class ...
def getMethods(self, plottype=None):
"""
A list with the NAMES associated to the callable methods
that are applicable to the specified plot.
Plot type can be "SCAN", "MCA", None, ...
"""
# visualize everywhere, therefore ignore MCA or SCAN
# if plottype in ["MCA"]:
# return []
names = list(self.methodDict.keys())
names.sort()
return names
def getMethodToolTip(self, name):
"""
Returns the help associated to the particular method name or None.
"""
return self.methodDict[name][1]
def getMethodPixmap(self, name):
"""
Returns the pixmap associated to the particular method name or None.
"""
return self.methodDict[name][2]
def applyMethod(self, name):
"""
The plugin is asked to apply the method associated to name.
"""
self.methodDict[name][0]()
return
def _showTaurusTree(self):
if self._widget is None:
self._widget = TaurusModelChooser()
#self._adapter = TaurusPyMcaAdapter()
if Release.version_info >= (4,):
self._widget.updateModels.connect(self.onSelectionChanged)
else:
Qt.QObject.connect(self._widget,
Qt.SIGNAL("updateModels"),
self.onSelectionChanged)
self._widget.show()
MENU_TEXT = "Taurus Device Browser"
def getPlugin1DInstance(plotWindow, **kw):
ob = TaurusPlugin1D(plotWindow)
return ob
if __name__ == "__main__":
app = qt.QApplication([])
import os
from PyMca5.PyMcaGui import ScanWindow
plot = ScanWindow.ScanWindow()
pluginDir = os.path.dirname(os.path.abspath(__file__))
SILX = False
if silx:
plot.pluginsToolButton.setPluginDirectoryList([pluginDir])
plot.pluginsToolButton.getPlugins()
else
plot.setPluginDirectoryList([pluginDir])
plot.getPlugins()
plot.show()
app.exec() | null |
1,095 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalb.endpoint import endpoint_data
class CreateLoadBalancerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alb', '2020-06-16', 'CreateLoadBalancer','alb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LoadBalancerEdition(self): # String
return self.get_query_params().get('LoadBalancerEdition')
def set_LoadBalancerEdition(self, LoadBalancerEdition): # String
self.add_query_param('LoadBalancerEdition', LoadBalancerEdition)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_ModificationProtectionConfig(self): # Struct
return self.get_query_params().get('ModificationProtectionConfig')
def set_ModificationProtectionConfig(self, ModificationProtectionConfig): # Struct
if ModificationProtectionConfig.get('Reason') is not None:
self.add_query_param('ModificationProtectionConfig.Reason', ModificationProtectionConfig.get('Reason'))
if ModificationProtectionConfig.get('Status') is not None:
self.add_query_param('ModificationProtectionConfig.Status', ModificationProtectionConfig.get('Status'))
def get_LoadBalancerBillingConfig(self): # Struct
return self.get_query_params().get('LoadBalancerBillingConfig')
def set_LoadBalancerBillingConfig(self, LoadBalancerBillingConfig): # Struct
if LoadBalancerBillingConfig.get('BandwidthPackageId') is not None:
self.add_query_param('LoadBalancerBillingConfig.BandwidthPackageId', LoadBalancerBillingConfig.get('BandwidthPackageId'))
if LoadBalancerBillingConfig.get('InternetChargeType') is not None:
self.add_query_param('LoadBalancerBillingConfig.InternetChargeType', LoadBalancerBillingConfig.get('InternetChargeType'))
if LoadBalancerBillingConfig.get('InternetBandwidth') is not None:
self.add_query_param('LoadBalancerBillingConfig.InternetBandwidth', LoadBalancerBillingConfig.get('InternetBandwidth'))
if LoadBalancerBillingConfig.get('PayType') is not None:
self.add_query_param('LoadBalancerBillingConfig.PayType', LoadBalancerBillingConfig.get('PayType'))
def get_AddressIpVersion(self): # String
return self.get_query_params().get('AddressIpVersion')
def set_AddressIpVersion(self, AddressIpVersion): # String
self.add_query_param('AddressIpVersion', AddressIpVersion)
def get_DeletionProtectionEnabled(self): # Boolean
return self.get_query_params().get('DeletionProtectionEnabled')
def set_DeletionProtectionEnabled(self, DeletionProtectionEnabled): # Boolean
self.add_query_param('DeletionProtectionEnabled', DeletionProtectionEnabled)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_LoadBalancerName(self): # String
return self.get_query_params().get('LoadBalancerName')
def set_LoadBalancerName(self, LoadBalancerName): # String
self.add_query_param('LoadBalancerName', LoadBalancerName)
def get_AddressType(self): # String
return self.get_query_params().get('AddressType')
def set_AddressType(self, AddressType): # String
self.add_query_param('AddressType', AddressType)
def METHOD_NAME(self): # String
return self.get_query_params().get('AddressAllocatedMode')
def set_AddressAllocatedMode(self, AddressAllocatedMode): # String
self.add_query_param('AddressAllocatedMode', AddressAllocatedMode)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ZoneMappings(self): # Array
return self.get_query_params().get('ZoneMappings')
def set_ZoneMappings(self, ZoneMappings): # Array
for index1, value1 in enumerate(ZoneMappings):
if value1.get('VSwitchId') is not None:
self.add_query_param('ZoneMappings.' + str(index1 + 1) + '.VSwitchId', value1.get('VSwitchId'))
if value1.get('EipType') is not None:
self.add_query_param('ZoneMappings.' + str(index1 + 1) + '.EipType', value1.get('EipType'))
if value1.get('ZoneId') is not None:
self.add_query_param('ZoneMappings.' + str(index1 + 1) + '.ZoneId', value1.get('ZoneId'))
if value1.get('AllocationId') is not None:
self.add_query_param('ZoneMappings.' + str(index1 + 1) + '.AllocationId', value1.get('AllocationId'))
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId) | null |
1,096 | #!/usr/bin/env python
import json
import aiohttp
import asyncio
import logging
import hummingbot.connector.exchange.mexc.mexc_constants as CONSTANTS
import hummingbot.connector.exchange.mexc.mexc_utils as mexc_utils
from typing import Dict, Optional, AsyncIterable, Any, List
from hummingbot.connector.exchange.mexc.mexc_auth import MexcAuth
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.logger import HummingbotLogger
class MexcWebSocketAdaptor:
DEAL_CHANNEL_ID = "push.deal"
DEPTH_CHANNEL_ID = "push.depth"
SUBSCRIPTION_LIST = set([DEAL_CHANNEL_ID, DEPTH_CHANNEL_ID])
_ID_FIELD_NAME = "id"
_logger: Optional[HummingbotLogger] = None
MESSAGE_TIMEOUT = 120.0
PING_TIMEOUT = 10.0
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(
self,
throttler: AsyncThrottler,
auth: Optional[MexcAuth] = None,
shared_client: Optional[aiohttp.ClientSession] = None,
):
self._auth: Optional[MexcAuth] = auth
self._is_private = True if self._auth is not None else False
self._WS_URL = CONSTANTS.MEXC_WS_URL_PUBLIC
self._shared_client = shared_client
self._websocket: Optional[aiohttp.ClientWebSocketResponse] = None
self._throttler = throttler
def get_shared_client(self) -> aiohttp.ClientSession:
if not self._shared_client:
self._shared_client = aiohttp.ClientSession()
return self._shared_client
async def send_request(self, payload: Dict[str, Any]):
await self._websocket.send_json(payload)
async def send_request_str(self, payload: str):
await self._websocket.send_str(payload)
async def subscribe_to_order_book_streams(self, trading_pairs: List[str]):
try:
for trading_pair in trading_pairs:
trading_pair = mexc_utils.convert_to_exchange_trading_pair(trading_pair)
subscribe_deal_request: Dict[str, Any] = {
"op": "sub.deal",
"symbol": trading_pair,
}
async with self._throttler.execute_task(CONSTANTS.MEXC_WS_URL_PUBLIC):
await self.send_request_str(json.dumps(subscribe_deal_request))
subscribe_depth_request: Dict[str, Any] = {
"op": "sub.depth",
"symbol": trading_pair,
}
async with self._throttler.execute_task(CONSTANTS.MEXC_WS_URL_PUBLIC):
await self.send_request_str(json.dumps(subscribe_depth_request))
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred subscribing to order book trading and delta streams...", exc_info=True
)
raise
async def subscribe_to_user_streams(self):
pass
async def METHOD_NAME(self):
pass
async def connect(self):
try:
self._websocket = await self.get_shared_client().ws_connect(
url=self._WS_URL)
except Exception as e:
self.logger().error(f"Websocket error: '{str(e)}'", exc_info=True)
raise
# disconnect from exchange
async def disconnect(self):
if self._websocket is None:
return
await self._websocket.close()
async def iter_messages(self) -> AsyncIterable[Any]:
try:
while True:
try:
msg = await asyncio.wait_for(self._websocket.receive(), timeout=self.MESSAGE_TIMEOUT)
if msg.type == aiohttp.WSMsgType.CLOSED:
raise ConnectionError
yield json.loads(msg.data)
except asyncio.TimeoutError:
pong_waiter = self._websocket.ping()
self.logger().warning("WebSocket receive_json timeout ...")
await asyncio.wait_for(pong_waiter, timeout=self.PING_TIMEOUT)
except ConnectionError:
return | null |
1,097 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
from numpy.polynomial.polynomial import polyval
class ReferencePath:
def __init__(self):
self.MINIMUM_PATH_LENGTH = 5
self.MAX_LAT_CHANGE = 0.1
self.init_y_last = None
def get_path_length(self, speed_mps):
path_length = self.MINIMUM_PATH_LENGTH
current_speed = speed_mps
if current_speed is not None:
if path_length < current_speed * 2:
path_length = math.ceil(current_speed * 2)
return path_length
def get_ref_path_init_y(self, init_y_perception):
if self.init_y_last is None:
return 0
if abs(init_y_perception - self.init_y_last) < self.MAX_LAT_CHANGE:
return init_y_perception
else:
if init_y_perception > self.init_y_last:
return self.init_y_last + self.MAX_LAT_CHANGE
else:
return self.init_y_last - self.MAX_LAT_CHANGE
def METHOD_NAME(self, perception, chassis):
path_length = self.get_path_length(chassis.get_speed_mps())
init_y_perception = (perception.right_lm_coef[0] +
perception.left_lm_coef[0]) / -2.0
init_y = self.get_ref_path_init_y(init_y_perception)
self.init_y_last = init_y
path_x, path_y = self._get_perception_ref_path(
perception, path_length, init_y)
return path_x, path_y, path_length
def _get_perception_ref_path(self, perception, path_length, init_y):
path_coef = [0, 0, 0, 0]
path_coef[0] = -1 * init_y
quality = perception.right_lm_quality + perception.left_lm_quality
if quality > 0:
for i in range(1, 4):
path_coef[i] = (perception.right_lm_coef[i] *
perception.right_lm_quality +
perception.left_lm_coef[i] *
perception.left_lm_quality) / quality
path_x = []
path_y = []
for x in range(int(path_length)):
y = -1 * polyval(x, path_coef)
path_x.append(x)
path_y.append(y)
return path_x, path_y
def get_ref_path_by_lmr(self, perception, routing, adv):
path_length = self.get_path_length(adv.speed_mps)
rpath_x, rpath_y = routing.get_local_segment_spline(adv.x,
adv.y,
adv.heading)
init_y_perception = (perception.right_lm_coef[0] +
perception.left_lm_coef[0]) / -2.0
quality = perception.right_lm_quality + perception.left_lm_quality
quality = quality / 2.0
if len(rpath_x) >= path_length and routing.human and rpath_y[0] <= 3:
init_y_routing = rpath_y[0]
init_y = self.get_ref_path_init_y(init_y_routing)
if quality > 0.1:
quality = 0.1
self.init_y_last = init_y
else:
init_y = self.get_ref_path_init_y(init_y_perception)
self.init_y_last = init_y
lmpath_x, lmpath_y = self._get_perception_ref_path(
perception, path_length, init_y)
if len(rpath_x) < path_length:
return lmpath_x, lmpath_y, path_length
routing_shift = rpath_y[0] - init_y
path_x = []
path_y = []
for i in range(int(path_length)):
# TODO(yifei): more accurate shift is needed.
y = (lmpath_y[i] * quality + rpath_y[i] - routing_shift) / (
1 + quality)
path_x.append(i)
path_y.append(y)
return path_x, path_y, path_length
def shift_point(self, p, p2, distance):
delta_y = p2.y - p.y
delta_x = p2.x - p.x
angle = 0
if distance >= 0:
angle = math.atan2(delta_y, delta_x) + math.pi / 2.0
else:
angle = math.atan2(delta_y, delta_x) - math.pi / 2.0
p1n = []
p1n.append(p.x + (math.cos(angle) * distance))
p1n.append(p.y + (math.sin(angle) * distance))
p2n = []
p2n.append(p2.x + (math.cos(angle) * distance))
p2n.append(p2.y + (math.sin(angle) * distance))
return p1n, p2n | null |
1,098 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class DescribeRunningLogRecordsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeRunningLogRecords','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_StartTime(self): # String
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_query_param('StartTime', StartTime)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_RoleType(self): # String
return self.get_query_params().get('RoleType')
def set_RoleType(self, RoleType): # String
self.add_query_param('RoleType', RoleType)
def get_NodeId(self): # String
return self.get_query_params().get('NodeId')
def set_NodeId(self, NodeId): # String
self.add_query_param('NodeId', NodeId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_QueryKeyword(self): # String
return self.get_query_params().get('QueryKeyword')
def set_QueryKeyword(self, QueryKeyword): # String
self.add_query_param('QueryKeyword', QueryKeyword)
def get_EndTime(self): # String
return self.get_query_params().get('EndTime')
def METHOD_NAME(self, EndTime): # String
self.add_query_param('EndTime', EndTime)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_DBName(self): # String
return self.get_query_params().get('DBName')
def set_DBName(self, DBName): # String
self.add_query_param('DBName', DBName)
def get_CharacterType(self): # String
return self.get_query_params().get('CharacterType')
def set_CharacterType(self, CharacterType): # String
self.add_query_param('CharacterType', CharacterType)
def get_OrderType(self): # String
return self.get_query_params().get('OrderType')
def set_OrderType(self, OrderType): # String
self.add_query_param('OrderType', OrderType) | null |
1,099 | import re
import pytest
from alfasim_sdk._internal.types import MultipleReference
from alfasim_sdk._internal.types import Reference
@pytest.mark.parametrize("expression_type", ["enable_expr", "visible_expr"])
def test_enable_expr_and_visible_expr(expression_type):
from alfasim_sdk._internal.types import String
inputs = {"value": "value", "caption": "caption", expression_type: ""}
with pytest.raises(TypeError, match=f"'{expression_type}' must be callable"):
String(**inputs)
def function_definition(): # pragma: no cover
pass
valid_input_1 = {"value": "value", "caption": "caption", expression_type: None}
valid_input_2 = {
"value": "value",
"caption": "caption",
expression_type: function_definition,
}
String(**valid_input_1)
String(**valid_input_2)
def test_string():
from alfasim_sdk._internal.types import String
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
String(value="acme")
with pytest.raises(
TypeError, match=re.escape("'caption' must be 'str' (got 1 that is a 'int')")
):
String(value="acme", caption=1)
with pytest.raises(
TypeError, match=re.escape("'value' must be 'str' (got 1 that is a 'int')")
):
String(value=1, caption="caption")
def test_enum():
from alfasim_sdk._internal.types import Enum
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Enum(values=["s"], initial="")
with pytest.raises(TypeError, match="values must be a list, got a 'str'."):
Enum(values="", caption="caption")
with pytest.raises(
TypeError, match="values must be a list of strings, the item '1' is a 'int'"
):
Enum(values=[1], caption="caption")
with pytest.raises(
ValueError, match='Enum type cannot have an empty string on field "values"'
):
Enum(values=[""], caption="caption")
enum = Enum(values=["value"], caption="caption")
assert enum.initial is None
enum = Enum(values=["value"], initial="value", caption="caption")
assert enum.initial == "value"
with pytest.raises(
TypeError, match="The initial condition must be within the declared values"
):
Enum(values=["value1, value2"], initial="", caption="caption")
@pytest.mark.parametrize("class_", [Reference, MultipleReference])
def test_reference(class_):
from alfasim_sdk._internal.types import TracerType
from alfasim_sdk._internal.models import data_model, container_model
@data_model(caption="caption")
class Data:
pass
@container_model(caption="caption", model=Data, icon="")
class DataContainer:
pass
class InvalidClass:
pass
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
class_(ref_type="")
with pytest.raises(TypeError, match="ref_type must be a class"):
class_(ref_type="", caption="caption")
with pytest.raises(
TypeError,
match="ref_type must be an ALFAsim type or a class decorated with 'data_model'",
):
class_(ref_type=InvalidClass, caption="caption")
error_msg = "ref_type must be an ALFAsim type or a class decorated with 'data_model', got a class decorated with 'container_model'"
with pytest.raises(TypeError, match=error_msg):
class_(ref_type=DataContainer, caption="caption")
error_msg = "The container_type field must be given when ref_type is a class decorated with 'data_model'"
with pytest.raises(TypeError, match=error_msg):
class_(ref_type=Data, caption="caption")
with pytest.raises(ValueError, match='The field "container_type" cannot be empty'):
class_(ref_type=Data, container_type="", caption="caption")
assert (
class_(ref_type=Data, container_type="DataContainer", caption="caption")
is not None
)
assert class_(ref_type=TracerType, caption="caption") is not None
def test_quantity():
from alfasim_sdk._internal.types import Quantity
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Quantity(value="", unit="")
with pytest.raises(TypeError, match="'value' must be <class 'numbers.Real'>"):
Quantity(value="", unit="", caption="caption")
with pytest.raises(
TypeError, match=re.escape("'unit' must be 'str' (got 1 that is a 'int')")
):
Quantity(value=1, unit=1, caption="caption")
def test_table():
from alfasim_sdk._internal.types import Table
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Table(rows=[])
with pytest.raises(TypeError, match="rows must be a list with TableColumn."):
Table(rows=[], caption="caption")
with pytest.raises(TypeError, match="rows must be a list of TableColumn."):
Table(rows=[""], caption="caption")
def test_table_column():
from alfasim_sdk._internal.types import TableColumn, Quantity
with pytest.raises(
TypeError, match="value must be a Quantity, got a <class 'str'>."
):
TableColumn(id="id", value="")
column = TableColumn(
id="id", value=Quantity(value=1, unit="m", caption="CAPTION FOR COLUMN")
)
assert column.caption == column.value.caption
def test_boolean():
from alfasim_sdk._internal.types import Boolean
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Boolean(value="")
with pytest.raises(TypeError, match="'value' must be <class 'bool'"):
Boolean(value=1, caption="caption")
def test_file_content():
from alfasim_sdk._internal.types import FileContent
FileContent(caption="Test")
def METHOD_NAME():
from alfasim_sdk._internal.types import Boolean
field = Boolean(value=True, caption="caption")
assert field.tooltip == ""
field = Boolean(value=True, caption="caption", tooltip="Test123")
assert field.tooltip == "Test123"
expected_msg = re.escape(
"'tooltip' must be <class 'str'> (got 2 that is a <class 'int'>)."
)
with pytest.raises(TypeError, match=expected_msg):
Boolean(value=True, caption="caption", tooltip=2)
field = Boolean(value=True, caption="caption", tooltip="∩ ∪ ∫ ∬ ∭ ∮")
assert field.tooltip == "∩ ∪ ∫ ∬ ∭ ∮" | null |