id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
600 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeImageGroupedVulListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeImageGroupedVulList')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_IsLatest(self): # Integer
return self.get_query_params().get('IsLatest')
def set_IsLatest(self, IsLatest): # Integer
self.add_query_param('IsLatest', IsLatest)
def get_ImageTag(self): # String
return self.get_query_params().get('ImageTag')
def set_ImageTag(self, ImageTag): # String
self.add_query_param('ImageTag', ImageTag)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_AliasName(self): # String
return self.get_query_params().get('AliasName')
def set_AliasName(self, AliasName): # String
self.add_query_param('AliasName', AliasName)
def get_PatchId(self): # Long
return self.get_query_params().get('PatchId')
def set_PatchId(self, PatchId): # Long
self.add_query_param('PatchId', PatchId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_Necessity(self): # String
return self.get_query_params().get('Necessity')
def set_Necessity(self, Necessity): # String
self.add_query_param('Necessity', Necessity)
def get_Uuids(self): # String
return self.get_query_params().get('Uuids')
def set_Uuids(self, Uuids): # String
self.add_query_param('Uuids', Uuids)
def get_RepoId(self): # String
return self.get_query_params().get('RepoId')
def set_RepoId(self, RepoId): # String
self.add_query_param('RepoId', RepoId)
def get_CveId(self): # String
return self.get_query_params().get('CveId')
def set_CveId(self, CveId): # String
self.add_query_param('CveId', CveId)
def get_RepoNamespace(self): # String
return self.get_query_params().get('RepoNamespace')
def set_RepoNamespace(self, RepoNamespace): # String
self.add_query_param('RepoNamespace', RepoNamespace)
def get_ImageDigest(self): # String
return self.get_query_params().get('ImageDigest')
def set_ImageDigest(self, ImageDigest): # String
self.add_query_param('ImageDigest', ImageDigest)
def get_ScanRanges(self): # RepeatList
return self.get_query_params().get('ScanRange')
def set_ScanRanges(self, ScanRange): # RepeatList
for depth1 in range(len(ScanRange)):
self.add_query_param('ScanRange.' + str(depth1 + 1), ScanRange[depth1])
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_RepoName(self): # String
return self.get_query_params().get('RepoName')
def set_RepoName(self, RepoName): # String
self.add_query_param('RepoName', RepoName)
def get_RepoInstanceId(self): # String
return self.get_query_params().get('RepoInstanceId')
def set_RepoInstanceId(self, RepoInstanceId): # String
self.add_query_param('RepoInstanceId', RepoInstanceId)
def get_ImageLayer(self): # String
return self.get_query_params().get('ImageLayer')
def set_ImageLayer(self, ImageLayer): # String
self.add_query_param('ImageLayer', ImageLayer)
def METHOD_NAME(self): # String
return self.get_query_params().get('RepoRegionId')
def set_RepoRegionId(self, RepoRegionId): # String
self.add_query_param('RepoRegionId', RepoRegionId) | null |
601 | from datetime import datetime, timedelta
from app import db, bcrypt, LOGGER
from app.utils.misc import make_code
from flask_login import UserMixin
from sqlalchemy.schema import UniqueConstraint
def expiration_date():
return datetime.now() + timedelta(days=1)
class AppUser(db.Model, UserMixin):
id = db.Column(db.Integer(), primary_key=True)
email = db.Column(db.String(255), nullable=False)
firstname = db.Column(db.String(100), nullable=False)
lastname = db.Column(db.String(100), nullable=False)
user_title = db.Column(db.String(20), nullable=False)
nationality_country_id = db.Column(db.Integer(), db.ForeignKey('country.id'), nullable=True)
residence_country_id = db.Column(db.Integer(), db.ForeignKey('country.id'), nullable=True)
user_gender = db.Column(db.String(20), nullable=True)
affiliation = db.Column(db.String(255), nullable=True)
department = db.Column(db.String(255), nullable=True)
user_disability = db.Column(db.String(255), nullable=True)
user_category_id = db.Column(db.Integer(), db.ForeignKey('user_category.id'), nullable=True)
user_dateOfBirth = db.Column(db.DateTime(), nullable=True)
user_primaryLanguage = db.Column(db.String(255), nullable=True)
password = db.Column(db.String(255), nullable=False)
active = db.Column(db.Boolean(), nullable=False)
is_admin = db.Column(db.Boolean(), nullable=False)
is_deleted = db.Column(db.Boolean(), nullable=False)
deleted_datetime_utc = db.Column(db.DateTime(), nullable=True)
verified_email = db.Column(db.Boolean(), nullable=True)
verify_token = db.Column(db.String(255), nullable=True, unique=True, default=make_code)
policy_agreed_datetime = db.Column(db.DateTime(), nullable=True)
organisation_id = db.Column(db.Integer(), db.ForeignKey('organisation.id'), nullable=False)
__table_args__ = (UniqueConstraint('email', 'organisation_id', name='org_email_unique'),)
nationality_country = db.relationship('Country', foreign_keys=[nationality_country_id])
residence_country = db.relationship('Country', foreign_keys=[residence_country_id])
user_category = db.relationship('UserCategory')
event_roles = db.relationship('EventRole')
def __init__(self,
email,
firstname,
lastname,
user_title,
password,
organisation_id,
is_admin=False):
self.email = email
self.firstname = firstname
self.lastname = lastname
self.user_title = user_title
self.set_password(password)
self.organisation_id = organisation_id
self.active = True
self.is_admin = is_admin
self.is_deleted = False
self.deleted_datetime_utc = None
self.verified_email = False
self.agree_to_policy()
@property
def full_name(self):
return f"{self.firstname} {self.lastname}"
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password).decode('utf-8')
def deactivate(self):
self.active = False
def verify(self):
self.verified_email = True
def agree_to_policy(self):
self.policy_agreed_datetime = datetime.now()
def METHOD_NAME(self):
return self.policy_agreed_datetime is not None
def update_email(self, new_email):
self.verified_email = False
self.verify_token = make_code()
self.email = new_email
def delete(self):
self.is_deleted = True
self.deleted_datetime_utc = datetime.now()
def _has_admin_role(self, event_id, admin_role_name):
if self.is_admin:
return True
if self.event_roles is None:
return False
for event_role in self.event_roles:
if event_role.event_id == event_id and event_role.role == admin_role_name:
return True
return False
def is_event_admin(self, event_id):
return self._has_admin_role(event_id, 'admin')
def is_event_treasurer(self, event_id):
return self._has_admin_role(event_id, 'treasurer')
def is_registration_admin(self, event_id):
# An event admin is also a registration admin
return self._has_admin_role(event_id, 'registration-admin') or self._has_admin_role(event_id, 'admin')
def is_reviewer(self, event_id):
if self.event_roles is None:
return False
for event_role in self.event_roles:
if event_role.event_id == event_id and event_role.role == 'reviewer':
return True
return False
def is_registration_volunteer(self, event_id):
# An event admin is also a registration admin
return (
self._has_admin_role(event_id, 'registration-admin')
or self._has_admin_role(event_id, 'admin')
or self._has_admin_role(event_id,'registration-volunteer')
)
class PasswordReset(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('app_user.id'))
code = db.Column(db.String(255), unique=True, default=make_code)
date = db.Column(db.DateTime(), default=expiration_date)
user = db.relationship(AppUser)
db.UniqueConstraint('user_id', 'code', name='uni_user_code')
def __init__(self, user):
self.user = user
class Country(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(100), nullable=False)
def __init__(self, name):
self.name = name
class UserCategory(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(100), nullable=False)
description = db.Column(db.String(500))
group = db.Column(db.String(100))
def __init__(self, name, description=None, group=None):
self.name = name
self.description = description
self.group = group
class UserComment(db.Model):
id = db.Column(db.Integer(), primary_key=True)
event_id = db.Column(db.Integer(), db.ForeignKey('event.id'), nullable=False)
user_id = db.Column(db.Integer(), db.ForeignKey('app_user.id'), nullable=False)
comment_by_user_id = db.Column(db.Integer(), db.ForeignKey('app_user.id'), nullable=False)
timestamp = db.Column(db.DateTime(), nullable=False)
comment = db.Column(db.String(2000))
event = db.relationship('Event')
user = db.relationship('AppUser', foreign_keys=[user_id])
comment_by_user = db.relationship('AppUser', foreign_keys=[comment_by_user_id])
def __init__(self, event_id, user_id, comment_by_user_id, timestamp, comment):
self.event_id = event_id
self.user_id = user_id
self.comment_by_user_id = comment_by_user_id
self.timestamp = timestamp
self.comment = comment | null |
602 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateFlowlogRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateFlowlog')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_CenId(self): # String
return self.get_query_params().get('CenId')
def set_CenId(self, CenId): # String
self.add_query_param('CenId', CenId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def METHOD_NAME(self): # String
return self.get_query_params().get('ProjectName')
def set_ProjectName(self, ProjectName): # String
self.add_query_param('ProjectName', ProjectName)
def get_LogStoreName(self): # String
return self.get_query_params().get('LogStoreName')
def set_LogStoreName(self, LogStoreName): # String
self.add_query_param('LogStoreName', LogStoreName)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransitRouterAttachmentId(self): # String
return self.get_query_params().get('TransitRouterAttachmentId')
def set_TransitRouterAttachmentId(self, TransitRouterAttachmentId): # String
self.add_query_param('TransitRouterAttachmentId', TransitRouterAttachmentId)
def get_Interval(self): # Long
return self.get_query_params().get('Interval')
def set_Interval(self, Interval): # Long
self.add_query_param('Interval', Interval)
def get_FlowLogName(self): # String
return self.get_query_params().get('FlowLogName')
def set_FlowLogName(self, FlowLogName): # String
self.add_query_param('FlowLogName', FlowLogName) | null |
603 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateCapacityReservationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateCapacityReservation','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_StartTime(self): # String
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_query_param('StartTime', StartTime)
def get_Platform(self): # String
return self.get_query_params().get('Platform')
def set_Platform(self, Platform): # String
self.add_query_param('Platform', Platform)
def METHOD_NAME(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_PrivatePoolOptionsMatchCriteria(self): # String
return self.get_query_params().get('PrivatePoolOptions.MatchCriteria')
def set_PrivatePoolOptionsMatchCriteria(self, PrivatePoolOptionsMatchCriteria): # String
self.add_query_param('PrivatePoolOptions.MatchCriteria', PrivatePoolOptionsMatchCriteria)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_EndTimeType(self): # String
return self.get_query_params().get('EndTimeType')
def set_EndTimeType(self, EndTimeType): # String
self.add_query_param('EndTimeType', EndTimeType)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_PrivatePoolOptionsName(self): # String
return self.get_query_params().get('PrivatePoolOptions.Name')
def set_PrivatePoolOptionsName(self, PrivatePoolOptionsName): # String
self.add_query_param('PrivatePoolOptions.Name', PrivatePoolOptionsName)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_EndTime(self): # String
return self.get_query_params().get('EndTime')
def set_EndTime(self, EndTime): # String
self.add_query_param('EndTime', EndTime)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ZoneIds(self): # RepeatList
return self.get_query_params().get('ZoneId')
def set_ZoneIds(self, ZoneId): # RepeatList
for depth1 in range(len(ZoneId)):
self.add_query_param('ZoneId.' + str(depth1 + 1), ZoneId[depth1])
def get_InstanceAmount(self): # Integer
return self.get_query_params().get('InstanceAmount')
def set_InstanceAmount(self, InstanceAmount): # Integer
self.add_query_param('InstanceAmount', InstanceAmount) | null |
604 | #!/usr/bin/env python
import argparse
import glob
import subprocess
from pathlib import Path
import yaml
def flatten(d, path):
"""
Flatten a dictionary into ('some/path/to/key', value)
>>> flatten({'a': {'b': 2}, 'q': 3}, [])
[('a.b', 2), ('q', 3)]
"""
if isinstance(d, dict):
for k, v in d.items():
yield from flatten(v, path + [k])
else:
yield (".".join(path), d)
def flat_dict(d):
return dict(flatten(d, []))
# Load without the includes since we can't follow those across git revisions.
class MockOrderedLoader(yaml.SafeLoader):
def METHOD_NAME(self, node):
return {}
MockOrderedLoader.add_constructor("!include", MockOrderedLoader.METHOD_NAME)
def diff_files(old, new):
# Flatten them
old_kv = flat_dict(old)
new_kv = flat_dict(new)
# Compare them
old_k = set(old_kv.keys())
new_k = set(new_kv.keys())
added = []
for item in new_k - old_k:
parent = ".".join(item.split(".")[0:-1])
if parent in new_k and parent not in old_k:
added.append(item)
else:
added.append(parent)
added = set(added)
removed = []
for item in old_k - new_k:
parent = ".".join(item.split(".")[0:-1])
if parent in old_k and parent not in new_k:
removed.append(item)
else:
removed.append(parent)
removed = set(removed)
shared = old_k & new_k
changed = [(k, old_kv[k], new_kv[k]) for k in shared if old_kv[k] != new_kv[k]]
return added, removed, changed
def _report_dict(title, subheading, data, mapper):
print(title)
print("-" * len(title))
print()
print(subheading)
print()
for fn in data:
print(fn)
print("~" * len(fn))
print()
for k in data[fn]:
print(mapper(k))
print()
print()
def _indent(s, by=4):
whitespace = " " * by
s = s if isinstance(s, list) else str(s).splitlines()
return "\n".join(f"{whitespace}{line}" for line in s)
def report_diff(added, changed, removed, new_files):
# Print out report
if added or changed or removed:
print("Configuration Changes")
print("=====================")
print()
if added:
_report_dict("Added", "The following configuration options are new", added, lambda x: f"- {x}")
if changed:
_report_dict(
"Changed",
"The following configuration options have been changed",
changed,
lambda x: f"- {x[0]} has changed from\n\n ::\n\n{_indent(x[1])}\n\n to\n\n ::\n\n{_indent(x[2])}\n\n",
)
if removed:
_report_dict(
"Removed", "The following configuration options have been completely removed", removed, lambda x: f"- {x}"
)
if new_files:
print("New Configuration Files")
print("-----------------------")
print()
print("The following files are new, or recently converted to yaml")
print()
for k in new_files:
print(f"- ``{k}``")
def load_at_time(path, revision=None):
if revision is not None:
return subprocess.check_output(["git", "show", f"{revision}:{path}"], stderr=subprocess.STDOUT)
else:
with open(path) as handle:
return handle.read()
def main(old_revision, new_revision=None):
globs = (
"config/*.yml.sample",
"lib/galaxy/config/schemas/*schema.yml",
)
files_to_diff = [f for g in globs for f in glob.glob(g)]
added = {}
removed = {}
changed = {}
new_files = []
for file in files_to_diff:
filename = file
if "config_schema.yml" in file:
filename = "config/galaxy.yml.sample:galaxy"
real_path = Path(file).resolve().relative_to(Path.cwd())
try:
old_contents = yaml.load(load_at_time(real_path, old_revision), Loader=MockOrderedLoader)
new_contents = yaml.load(load_at_time(real_path, new_revision), Loader=MockOrderedLoader)
(a, r, c) = diff_files(old_contents, new_contents)
if a:
added[filename] = sorted(a)
if r:
removed[filename] = sorted(r)
if c:
changed[filename] = sorted(c)
except subprocess.CalledProcessError:
new_files.append(file)
report_diff(added, changed, removed, new_files)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Diff yaml configuration files between two points in time.")
parser.add_argument("old_revision", help="Old revision")
parser.add_argument(
"--new_revision",
help="New revision (defaults to whatever is currently in tree)",
)
args = parser.parse_args()
main(args.old_revision, args.new_revision) | null |
605 | import os
import pytest
from ..utils import assert_content
from pyfs import head, cp
from pyio import write, write_regions
def test_cp_file_from_local_folder_to_mount_folder(size, local_file, mount_file, source_path):
"""TC-PIPE-FUSE-50"""
head(source_path, size=size, write_to=local_file)
cp(local_file, mount_file)
assert_content(local_file, mount_file)
def test_append_to_file_end(local_file, mount_file, source_path):
"""TC-PIPE-FUSE-51"""
head(source_path, append_to=local_file)
head(source_path, append_to=mount_file)
assert_content(local_file, mount_file)
def test_override_file_tail(size, local_file, mount_file):
"""TC-PIPE-FUSE-52"""
if size < 10:
pytest.skip()
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size - 10, amount=10)
write(mount_file, offset=actual_size - 10, amount=10)
assert_content(local_file, mount_file)
def test_override_file_head(size, local_file, mount_file):
"""TC-PIPE-FUSE-53"""
if size < 10:
pytest.skip()
write(local_file, offset=0, amount=10)
write(mount_file, offset=0, amount=10)
assert_content(local_file, mount_file)
def test_write_to_position_that_is_bigger_than_file_length(local_file, mount_file):
"""TC-PIPE-FUSE-54"""
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size + 10, amount=10)
write(mount_file, offset=actual_size + 10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_that_exceeds_file_length(size, local_file, mount_file):
"""TC-PIPE-FUSE-55"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size - 5, amount=10)
write(mount_file, offset=actual_size - 5, amount=10)
assert_content(local_file, mount_file)
def test_write_region_in_first_chunk(size, local_file, mount_file):
"""TC-PIPE-FUSE-56"""
if size < 20:
pytest.skip()
write(local_file, offset=10, amount=10)
write(mount_file, offset=10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_in_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-57"""
if size < chunk_size + 20:
pytest.skip()
write(local_file, offset=chunk_size + 10, amount=10)
write(mount_file, offset=chunk_size + 10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_matching_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-58"""
if size < chunk_size:
pytest.skip()
write(local_file, offset=0, amount=chunk_size)
write(mount_file, offset=0, amount=chunk_size)
assert_content(local_file, mount_file)
def test_write_region_between_two_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-59"""
if size < chunk_size + 5:
pytest.skip()
write(local_file, offset=chunk_size - 5, amount=10)
write(mount_file, offset=chunk_size - 5, amount=10)
assert_content(local_file, mount_file)
def test_write_two_regions_in_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-60"""
if size < chunk_size + 110:
pytest.skip()
write_regions(local_file, {'offset': chunk_size + 10, 'amount': 10}, {'offset': chunk_size + 100, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size + 10, 'amount': 10}, {'offset': chunk_size + 100, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_in_two_adjacent_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-61"""
if size < chunk_size + 20:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size + 10, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size + 10, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_in_two_non_adjacent_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-62"""
if size < chunk_size * 2 + 20:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size * 2 + 10, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size * 2 + 10, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_between_three_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-63"""
if size < chunk_size * 2 + 5:
pytest.skip()
write_regions(local_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 2 - 5, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 2 - 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_between_four_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-64"""
if size < chunk_size * 3 + 5:
pytest.skip()
write_regions(local_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 3 - 5, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 3 - 5, 'amount': 10})
assert_content(local_file, mount_file)
def METHOD_NAME(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-65"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': actual_size - 5, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': actual_size - 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_with_one_of_them_starting_from_position_that_is_bigger_than_file_length(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-66"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': actual_size + 5, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': actual_size + 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_starting_from_position_that_is_bigger_than_file_length(chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-67"""
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': actual_size + 5, 'amount': 10}, {'offset': actual_size + 20, 'amount': 10})
write_regions(mount_file, {'offset': actual_size + 5, 'amount': 10}, {'offset': actual_size + 20, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_overlapping_regions(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-68"""
if size < 25:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': 15, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': 15, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_region_to_an_already_written_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-69"""
if size < chunk_size + 10:
pytest.skip()
write_regions(local_file, {'offset': 0, 'amount': chunk_size}, {'offset': 10, 'amount': chunk_size})
write_regions(mount_file, {'offset': 0, 'amount': chunk_size}, {'offset': 10, 'amount': chunk_size})
assert_content(local_file, mount_file) | null |
606 | # Drakkar-Software OctoBot-Tentacles
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import decimal
import typing
import octobot_trading.exchanges as exchanges
import octobot_trading.exchanges.connectors.ccxt.constants as ccxt_constants
import octobot_trading.enums as trading_enums
import octobot_trading.errors
class Bitget(exchanges.RestExchange):
DESCRIPTION = ""
FIX_MARKET_STATUS = True
REMOVE_MARKET_STATUS_PRICE_LIMITS = True
@classmethod
def get_name(cls):
return 'bitget'
def METHOD_NAME(self):
return BitgetCCXTAdapter
def get_additional_connector_config(self):
# tell ccxt to use amount as provided and not to compute it by multiplying it by price which is done here
# (price should not be sent to market orders). Only used for buy market orders
return {
ccxt_constants.CCXT_OPTIONS: {
"createMarketBuyOrderRequiresPrice": False # disable quote conversion
}
}
async def create_order(self, order_type: trading_enums.TraderOrderType, symbol: str, quantity: decimal.Decimal,
price: decimal.Decimal = None, stop_price: decimal.Decimal = None,
side: trading_enums.TradeOrderSide = None, current_price: decimal.Decimal = None,
reduce_only: bool = False, params: dict = None) -> typing.Optional[dict]:
if order_type is trading_enums.TraderOrderType.BUY_MARKET:
# on Bitget, market orders are in quote currency (YYY in XYZ/YYY)
used_price = price or current_price
if not used_price:
raise octobot_trading.errors.NotSupported(f"{self.get_name()} requires a price parameter to create "
f"market orders as quantity is in quote currency")
quantity = quantity * used_price
return await super().create_order(order_type, symbol, quantity,
price=price, stop_price=stop_price,
side=side, current_price=current_price,
reduce_only=reduce_only, params=params)
class BitgetCCXTAdapter(exchanges.CCXTAdapter):
def fix_order(self, raw, **kwargs):
fixed = super().fix_order(raw, **kwargs)
try:
if fixed[trading_enums.ExchangeConstantsOrderColumns.TYPE.value] \
== trading_enums.TradeOrderType.MARKET.value and \
fixed[trading_enums.ExchangeConstantsOrderColumns.SIDE.value] \
== trading_enums.TradeOrderSide.BUY.value:
# convert amount to have the same units as evert other exchange: use FILLED for accuracy
fixed[trading_enums.ExchangeConstantsOrderColumns.AMOUNT.value] = \
fixed[trading_enums.ExchangeConstantsOrderColumns.FILLED.value]
except KeyError:
pass
return fixed
def fix_trades(self, raw, **kwargs):
raw = super().fix_trades(raw, **kwargs)
for trade in raw:
# fees example for paid fees in USDT:
# {'code': 'USDT', 'cost': -0.015922}
fee = trade[trading_enums.ExchangeConstantsOrderColumns.FEE.value]
if trading_enums.FeePropertyColumns.CURRENCY.value not in fee:
fee[trading_enums.FeePropertyColumns.CURRENCY.value] = fee.get("code")
if fee[trading_enums.FeePropertyColumns.COST.value]:
fee[trading_enums.FeePropertyColumns.COST.value] *= -1
return raw | null |
607 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoutboundbot.endpoint import endpoint_data
class CreateTaskExportTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OutboundBot', '2019-12-26', 'CreateTaskExportTask')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_HasAnswered(self): # Boolean
return self.get_query_params().get('HasAnswered')
def set_HasAnswered(self, HasAnswered): # Boolean
self.add_query_param('HasAnswered', HasAnswered)
def get_ActualTimeLte(self): # Long
return self.get_query_params().get('ActualTimeLte')
def set_ActualTimeLte(self, ActualTimeLte): # Long
self.add_query_param('ActualTimeLte', ActualTimeLte)
def get_OtherId(self): # String
return self.get_query_params().get('OtherId')
def set_OtherId(self, OtherId): # String
self.add_query_param('OtherId', OtherId)
def get_TaskCreateTimeLte(self): # Long
return self.get_query_params().get('TaskCreateTimeLte')
def set_TaskCreateTimeLte(self, TaskCreateTimeLte): # Long
self.add_query_param('TaskCreateTimeLte', TaskCreateTimeLte)
def get_JobId(self): # String
return self.get_query_params().get('JobId')
def set_JobId(self, JobId): # String
self.add_query_param('JobId', JobId)
def get_TaskCreateTimeGte(self): # Long
return self.get_query_params().get('TaskCreateTimeGte')
def set_TaskCreateTimeGte(self, TaskCreateTimeGte): # Long
self.add_query_param('TaskCreateTimeGte', TaskCreateTimeGte)
def get_CalledNumber(self): # String
return self.get_query_params().get('CalledNumber')
def METHOD_NAME(self, CalledNumber): # String
self.add_query_param('CalledNumber', CalledNumber)
def get_UserIdMatch(self): # String
return self.get_query_params().get('UserIdMatch')
def set_UserIdMatch(self, UserIdMatch): # String
self.add_query_param('UserIdMatch', UserIdMatch)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ScriptNameQuery(self): # String
return self.get_query_params().get('ScriptNameQuery')
def set_ScriptNameQuery(self, ScriptNameQuery): # String
self.add_query_param('ScriptNameQuery', ScriptNameQuery)
def get_PageIndex(self): # Integer
return self.get_query_params().get('PageIndex')
def set_PageIndex(self, PageIndex): # Integer
self.add_query_param('PageIndex', PageIndex)
def get_SortOrder(self): # String
return self.get_query_params().get('SortOrder')
def set_SortOrder(self, SortOrder): # String
self.add_query_param('SortOrder', SortOrder)
def get_TaskStatusStringList(self): # String
return self.get_query_params().get('TaskStatusStringList')
def set_TaskStatusStringList(self, TaskStatusStringList): # String
self.add_query_param('TaskStatusStringList', TaskStatusStringList)
def get_JobGroupNameQuery(self): # String
return self.get_query_params().get('JobGroupNameQuery')
def set_JobGroupNameQuery(self, JobGroupNameQuery): # String
self.add_query_param('JobGroupNameQuery', JobGroupNameQuery)
def get_TaskId(self): # String
return self.get_query_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_query_param('TaskId', TaskId)
def get_HasHangUpByRejection(self): # Boolean
return self.get_query_params().get('HasHangUpByRejection')
def set_HasHangUpByRejection(self, HasHangUpByRejection): # Boolean
self.add_query_param('HasHangUpByRejection', HasHangUpByRejection)
def get_HasReachedEndOfFlow(self): # Boolean
return self.get_query_params().get('HasReachedEndOfFlow')
def set_HasReachedEndOfFlow(self, HasReachedEndOfFlow): # Boolean
self.add_query_param('HasReachedEndOfFlow', HasReachedEndOfFlow)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_RecordingDurationGte(self): # Long
return self.get_query_params().get('RecordingDurationGte')
def set_RecordingDurationGte(self, RecordingDurationGte): # Long
self.add_query_param('RecordingDurationGte', RecordingDurationGte)
def get_CallDurationLte(self): # Long
return self.get_query_params().get('CallDurationLte')
def set_CallDurationLte(self, CallDurationLte): # Long
self.add_query_param('CallDurationLte', CallDurationLte)
def get_JobGroupId(self): # String
return self.get_query_params().get('JobGroupId')
def set_JobGroupId(self, JobGroupId): # String
self.add_query_param('JobGroupId', JobGroupId)
def get_SortBy(self): # String
return self.get_query_params().get('SortBy')
def set_SortBy(self, SortBy): # String
self.add_query_param('SortBy', SortBy)
def get_JobStatusStringList(self): # String
return self.get_query_params().get('JobStatusStringList')
def set_JobStatusStringList(self, JobStatusStringList): # String
self.add_query_param('JobStatusStringList', JobStatusStringList)
def get_ActualTimeGte(self): # Long
return self.get_query_params().get('ActualTimeGte')
def set_ActualTimeGte(self, ActualTimeGte): # Long
self.add_query_param('ActualTimeGte', ActualTimeGte)
def get_CallDurationGte(self): # Long
return self.get_query_params().get('CallDurationGte')
def set_CallDurationGte(self, CallDurationGte): # Long
self.add_query_param('CallDurationGte', CallDurationGte)
def get_RecordingDurationLte(self): # Long
return self.get_query_params().get('RecordingDurationLte')
def set_RecordingDurationLte(self, RecordingDurationLte): # Long
self.add_query_param('RecordingDurationLte', RecordingDurationLte) | null |
608 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class UpdateQualityRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'UpdateQualityRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Trend(self): # String
return self.get_body_params().get('Trend')
def set_Trend(self, Trend): # String
self.add_body_params('Trend', Trend)
def get_BlockType(self): # Integer
return self.get_body_params().get('BlockType')
def set_BlockType(self, BlockType): # Integer
self.add_body_params('BlockType', BlockType)
def get_PropertyType(self): # String
return self.get_body_params().get('PropertyType')
def set_PropertyType(self, PropertyType): # String
self.add_body_params('PropertyType', PropertyType)
def get_EntityId(self): # Long
return self.get_body_params().get('EntityId')
def set_EntityId(self, EntityId): # Long
self.add_body_params('EntityId', EntityId)
def get_RuleName(self): # String
return self.get_body_params().get('RuleName')
def set_RuleName(self, RuleName): # String
self.add_body_params('RuleName', RuleName)
def get_Checker(self): # Integer
return self.get_body_params().get('Checker')
def set_Checker(self, Checker): # Integer
self.add_body_params('Checker', Checker)
def METHOD_NAME(self): # String
return self.get_body_params().get('Operator')
def set_Operator(self, Operator): # String
self.add_body_params('Operator', Operator)
def get_Property(self): # String
return self.get_body_params().get('Property')
def set_Property(self, Property): # String
self.add_body_params('Property', Property)
def get_Id(self): # Long
return self.get_body_params().get('Id')
def set_Id(self, Id): # Long
self.add_body_params('Id', Id)
def get_WarningThreshold(self): # String
return self.get_body_params().get('WarningThreshold')
def set_WarningThreshold(self, WarningThreshold): # String
self.add_body_params('WarningThreshold', WarningThreshold)
def get_ProjectId(self): # Long
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # Long
self.add_body_params('ProjectId', ProjectId)
def get_MethodName(self): # String
return self.get_body_params().get('MethodName')
def set_MethodName(self, MethodName): # String
self.add_body_params('MethodName', MethodName)
def get_ProjectName(self): # String
return self.get_body_params().get('ProjectName')
def set_ProjectName(self, ProjectName): # String
self.add_body_params('ProjectName', ProjectName)
def get_RuleType(self): # Integer
return self.get_body_params().get('RuleType')
def set_RuleType(self, RuleType): # Integer
self.add_body_params('RuleType', RuleType)
def get_TemplateId(self): # Integer
return self.get_body_params().get('TemplateId')
def set_TemplateId(self, TemplateId): # Integer
self.add_body_params('TemplateId', TemplateId)
def get_ExpectValue(self): # String
return self.get_body_params().get('ExpectValue')
def set_ExpectValue(self, ExpectValue): # String
self.add_body_params('ExpectValue', ExpectValue)
def get_WhereCondition(self): # String
return self.get_body_params().get('WhereCondition')
def set_WhereCondition(self, WhereCondition): # String
self.add_body_params('WhereCondition', WhereCondition)
def get_CriticalThreshold(self): # String
return self.get_body_params().get('CriticalThreshold')
def set_CriticalThreshold(self, CriticalThreshold): # String
self.add_body_params('CriticalThreshold', CriticalThreshold)
def get_OpenSwitch(self): # Boolean
return self.get_body_params().get('OpenSwitch')
def set_OpenSwitch(self, OpenSwitch): # Boolean
self.add_body_params('OpenSwitch', OpenSwitch)
def get_Comment(self): # String
return self.get_body_params().get('Comment')
def set_Comment(self, Comment): # String
self.add_body_params('Comment', Comment)
def get_PredictType(self): # Integer
return self.get_body_params().get('PredictType')
def set_PredictType(self, PredictType): # Integer
self.add_body_params('PredictType', PredictType) | null |
609 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class SetCreditLabelActionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'SetCreditLabelAction','bssopenapi')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ActionType(self): # String
return self.get_query_params().get('ActionType')
def set_ActionType(self, ActionType): # String
self.add_query_param('ActionType', ActionType)
def get_IsNeedSaveNotifyRule(self): # String
return self.get_query_params().get('IsNeedSaveNotifyRule')
def set_IsNeedSaveNotifyRule(self, IsNeedSaveNotifyRule): # String
self.add_query_param('IsNeedSaveNotifyRule', IsNeedSaveNotifyRule)
def get_IsNeedAdjustCreditAccount(self): # String
return self.get_query_params().get('IsNeedAdjustCreditAccount')
def set_IsNeedAdjustCreditAccount(self, IsNeedAdjustCreditAccount): # String
self.add_query_param('IsNeedAdjustCreditAccount', IsNeedAdjustCreditAccount)
def get_NewCreateMode(self): # Boolean
return self.get_query_params().get('NewCreateMode')
def set_NewCreateMode(self, NewCreateMode): # Boolean
self.add_query_param('NewCreateMode', NewCreateMode)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Source(self): # String
return self.get_query_params().get('Source')
def set_Source(self, Source): # String
self.add_query_param('Source', Source)
def get_CurrencyCode(self): # String
return self.get_query_params().get('CurrencyCode')
def set_CurrencyCode(self, CurrencyCode): # String
self.add_query_param('CurrencyCode', CurrencyCode)
def get_DailyCycle(self): # String
return self.get_query_params().get('DailyCycle')
def set_DailyCycle(self, DailyCycle): # String
self.add_query_param('DailyCycle', DailyCycle)
def METHOD_NAME(self): # String
return self.get_query_params().get('Operator')
def set_Operator(self, Operator): # String
self.add_query_param('Operator', Operator)
def get_Uid(self): # String
return self.get_query_params().get('Uid')
def set_Uid(self, Uid): # String
self.add_query_param('Uid', Uid)
def get_SiteCode(self): # String
return self.get_query_params().get('SiteCode')
def set_SiteCode(self, SiteCode): # String
self.add_query_param('SiteCode', SiteCode)
def get_ClearCycle(self): # String
return self.get_query_params().get('ClearCycle')
def set_ClearCycle(self, ClearCycle): # String
self.add_query_param('ClearCycle', ClearCycle)
def get_NeedNotice(self): # Boolean
return self.get_query_params().get('NeedNotice')
def set_NeedNotice(self, NeedNotice): # Boolean
self.add_query_param('NeedNotice', NeedNotice)
def get_RequestId(self): # String
return self.get_query_params().get('RequestId')
def set_RequestId(self, RequestId): # String
self.add_query_param('RequestId', RequestId)
def get_IsNeedSetCreditAmount(self): # String
return self.get_query_params().get('IsNeedSetCreditAmount')
def set_IsNeedSetCreditAmount(self, IsNeedSetCreditAmount): # String
self.add_query_param('IsNeedSetCreditAmount', IsNeedSetCreditAmount)
def get_CreditAmount(self): # String
return self.get_query_params().get('CreditAmount')
def set_CreditAmount(self, CreditAmount): # String
self.add_query_param('CreditAmount', CreditAmount)
def get_IsNeedAddSettleLabel(self): # String
return self.get_query_params().get('IsNeedAddSettleLabel')
def set_IsNeedAddSettleLabel(self, IsNeedAddSettleLabel): # String
self.add_query_param('IsNeedAddSettleLabel', IsNeedAddSettleLabel) | null |
610 | """
Copyright (c) 2022, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
from sparse_operation_kit import experiment as sok
if __name__ == "__main__":
hvd.init()
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], "GPU")
sok.init()
rows = [8192 * 5, 8192]
cols = [128, 4]
hotness = [10, 3]
combiners = ["mean", "sum"]
batch_size = 8192
iters = 100
# initial value of embedding table
weights = []
for i in range(len(rows)):
weight = np.random.rand(rows[i], cols[i]).astype(np.float32)
weight = tf.convert_to_tensor(weight, dtype=tf.float32)
# make sure the weight is same on each rank
weight = hvd.allreduce(weight)
weights.append(weight)
# sok variables
tf_vars = [tf.Variable(w) for w in weights]
sok_vars = [sok.Variable(w) for w in weights]
local_indices = []
for row in rows:
local_size = row // hvd.size()
if hvd.rank() < row % hvd.size():
local_size += 1
indices = np.arange(local_size) * hvd.size() + hvd.rank()
indices = tf.convert_to_tensor(indices, dtype=tf.int64)
local_indices.append(indices)
# indices
total_indices = []
for i in range(len(rows)):
offsets = np.random.randint(1, hotness[i] + 1, iters * batch_size)
offsets = tf.convert_to_tensor(offsets, dtype=tf.int64)
offsets = hvd.broadcast(offsets, root_rank=0)
values = np.random.randint(0, rows[i], tf.reduce_sum(offsets))
values = tf.convert_to_tensor(values, dtype=tf.int64)
values = hvd.broadcast(values, root_rank=0)
total_indices.append(tf.RaggedTensor.from_row_lengths(values, offsets))
left = batch_size // hvd.size() * hvd.rank()
right = batch_size // hvd.size() * (hvd.rank() + 1)
# initialize optimizer
optimizer = tf.keras.optimizers.SGD(learning_rate=1.0)
reg_var = []
reg_var.extend(tf_vars)
reg_var.extend(sok_vars)
def step(params, indices):
with tf.GradientTape() as tape:
embeddings = sok.lookup_sparse(params, indices, combiners=combiners)
loss = 0
for i in range(len(embeddings)):
loss = loss + tf.reduce_sum(embeddings[i])
grads = tape.gradient(loss, params)
optimizer.apply_gradients(zip(grads, params))
loss = hvd.allreduce(loss, op=hvd.Sum)
return loss
# Do training
loss1 = []
ts = []
t = time.time()
for i in range(iters):
ts.append(time.time() - t)
t = time.time()
indices = []
for j in range(len(total_indices)):
indices.append(total_indices[j][i * batch_size + left : i * batch_size + right])
loss = step(sok_vars, indices)
loss1.append(loss)
print("-" * 30 + "iteration %d" % i + "-" * 30)
print("loss:", loss)
out1 = sok_vars
@tf.function
def METHOD_NAME(params, indices):
with tf.GradientTape() as tape:
loss = 0
for i in range(len(params)):
embedding = tf.nn.embedding_lookup_sparse(
params[i], indices[i], None, combiner=combiners[i]
)
loss = loss + tf.reduce_sum(embedding)
grads = tape.gradient(loss, params)
grads = [hvd.allreduce(grad, op=hvd.Sum) for grad in grads]
optimizer.apply_gradients(zip(grads, params))
loss = hvd.allreduce(loss, op=hvd.Sum)
return loss
loss2 = []
for i in range(iters):
indices = []
for j in range(len(total_indices)):
indices.append(
total_indices[j][i * batch_size + left : i * batch_size + right].to_sparse()
)
loss = METHOD_NAME(tf_vars, indices)
loss2.append(loss)
print("-" * 30 + "iteration %d" % i + "-" * 30)
print("tf loss:", loss)
out2 = []
for i, v in enumerate(tf_vars):
out2.append(tf.nn.embedding_lookup(v, local_indices[i]))
# Check results
diff = 0
for i in range(len(out1)):
length = out1[i] ** 2 + out2[i] ** 2 + 1e-8
diff = diff + tf.reduce_sum((out1[i] - out2[i]) ** 2 / length)
print("[SOK INFO] diff:", diff)
assert diff < 1e-6
diff = 0
for i in range(iters):
# normalize
length = loss1[i] ** 2 + loss2[i] ** 2 + 1e-8
diff = diff + (loss1[i] - loss2[i]) ** 2 / length
print("[SOK INFO] loss diff:", diff)
assert diff < 1e-6
print("[SOK INFO] lookup_sparse distributed test passed")
ts = ts[5:]
print("[SOK INFO] Average time: %f ms/iteration" % (sum(ts) / len(ts) * 1000)) | null |
611 | import re
from typing import (
Optional,
Union,
)
from ._util import (
_assert_number,
_assert_presence_number,
)
def assert_has_text(
output: str,
text: str,
n: Optional[Union[int, str]] = None,
delta: Union[int, str] = 0,
min: Optional[Union[int, str]] = None,
max: Optional[Union[int, str]] = None,
negate: Union[bool, str] = False,
) -> None:
"""Asserts specified output contains the substring specified by
the argument text. The exact number of occurrences can be
optionally specified by the argument n"""
assert output is not None, "Checking has_text assertion on empty output (None)"
_assert_presence_number(
output,
text,
n,
delta,
min,
max,
negate,
lambda o, t: o.find(t) >= 0,
lambda o, t: len(re.findall(re.escape(t), o)),
"{expected} text '{text}' in output ('{output}')",
"{expected} {n}+-{delta} occurences of '{text}' in output ('{output}')",
"{expected} that the number of occurences of '{text}' in output is in [{min}:{max}] ('{output}')",
)
def METHOD_NAME(output: str, text: str) -> None:
"""Asserts specified output does not contain the substring
specified by the argument text"""
assert output is not None, "Checking not_has_text assertion on empty output (None)"
assert output.find(text) < 0, f"Output file contains unexpected text '{text}'"
def assert_has_line(
output: str,
line: str,
n: Optional[Union[int, str]] = None,
delta: Union[int, str] = 0,
min: Optional[Union[int, str]] = None,
max: Optional[Union[int, str]] = None,
negate: Union[bool, str] = False,
) -> None:
"""Asserts the specified output contains the line specified by the
argument line. The exact number of occurrences can be optionally
specified by the argument n"""
assert output is not None, "Checking has_line assertion on empty output (None)"
_assert_presence_number(
output,
line,
n,
delta,
min,
max,
negate,
lambda o, t: re.search(f"^{re.escape(t)}$", o, flags=re.MULTILINE) is not None,
lambda o, t: len(re.findall(f"^{re.escape(t)}$", o, flags=re.MULTILINE)),
"{expected} line '{text}' in output ('{output}')",
"{expected} {n}+-{delta} lines '{text}' in output ('{output}')",
"{expected} that the number of lines '{text}' in output is in [{min}:{max}] ('{output}')",
)
def assert_has_n_lines(
output: str,
n: Optional[Union[int, str]] = None,
delta: Union[int, str] = 0,
min: Optional[Union[int, str]] = None,
max: Optional[Union[int, str]] = None,
negate: Union[bool, str] = False,
) -> None:
"""Asserts the specified output contains ``n`` lines allowing
for a difference in the number of lines (delta)
or relative differebce in the number of lines"""
assert output is not None, "Checking has_n_lines assertion on empty output (None)"
count = len(output.splitlines())
_assert_number(
count,
n,
delta,
min,
max,
negate,
"{expected} {n}+-{delta} lines in the output",
"{expected} the number of line to be in [{min}:{max}]",
)
def assert_has_text_matching(
output: str,
expression: str,
n: Optional[Union[int, str]] = None,
delta: Union[int, str] = 0,
min: Optional[Union[int, str]] = None,
max: Optional[Union[int, str]] = None,
negate: Union[bool, str] = False,
) -> None:
"""Asserts the specified output contains text matching the
regular expression specified by the argument expression.
If n is given the assertion checks for exacly n (nonoverlapping)
occurences.
"""
_assert_presence_number(
output,
expression,
n,
delta,
min,
max,
negate,
lambda o, e: re.search(e, o) is not None,
lambda o, e: len(re.findall(e, o)),
"{expected} text matching expression '{text}' in output ('{output}')",
"{expected} {n}+-{delta} (non-overlapping) matches for '{text}' in output ('{output}')",
"{expected} that the number of (non-overlapping) matches for '{text}' in output is in [{min}:{max}] ('{output}')",
)
def assert_has_line_matching(
output: str,
expression: str,
n: Optional[Union[int, str]] = None,
delta: Union[int, str] = 0,
min: Optional[Union[int, str]] = None,
max: Optional[Union[int, str]] = None,
negate: Union[bool, str] = False,
) -> None:
"""Asserts the specified output contains a line matching the
regular expression specified by the argument expression. If n is given
the assertion checks for exactly n occurences."""
_assert_presence_number(
output,
expression,
n,
delta,
min,
max,
negate,
lambda o, e: re.search(f"^{e}$", o, flags=re.MULTILINE) is not None,
lambda o, e: len(re.findall(f"^{e}$", o, flags=re.MULTILINE)),
"{expected} line matching expression '{text}' in output ('{output}')",
"{expected} {n}+-{delta} lines matching for '{text}' in output ('{output}')",
"{expected} that the number of lines matching for '{text}' in output is in [{min}:{max}] ('{output}')",
) | null |
612 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class RunCommandRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'RunCommand','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_WorkingDir(self): # String
return self.get_query_params().get('WorkingDir')
def set_WorkingDir(self, WorkingDir): # String
self.add_query_param('WorkingDir', WorkingDir)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_Frequency(self): # String
return self.get_query_params().get('Frequency')
def set_Frequency(self, Frequency): # String
self.add_query_param('Frequency', Frequency)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_RepeatMode(self): # String
return self.get_query_params().get('RepeatMode')
def set_RepeatMode(self, RepeatMode): # String
self.add_query_param('RepeatMode', RepeatMode)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_KeepCommand(self): # Boolean
return self.get_query_params().get('KeepCommand')
def set_KeepCommand(self, KeepCommand): # Boolean
self.add_query_param('KeepCommand', KeepCommand)
def get_Timed(self): # Boolean
return self.get_query_params().get('Timed')
def set_Timed(self, Timed): # Boolean
self.add_query_param('Timed', Timed)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceIds(self): # RepeatList
return self.get_query_params().get('InstanceId')
def set_InstanceIds(self, InstanceId): # RepeatList
for depth1 in range(len(InstanceId)):
self.add_query_param('InstanceId.' + str(depth1 + 1), InstanceId[depth1])
def get_Name(self): # String
return self.get_query_params().get('Name')
def METHOD_NAME(self, Name): # String
self.add_query_param('Name', Name)
def get_ContainerId(self): # String
return self.get_query_params().get('ContainerId')
def set_ContainerId(self, ContainerId): # String
self.add_query_param('ContainerId', ContainerId)
def get_Parameters(self): # Json
return self.get_query_params().get('Parameters')
def set_Parameters(self, Parameters): # Json
self.add_query_param('Parameters', Parameters)
def get_ContainerName(self): # String
return self.get_query_params().get('ContainerName')
def set_ContainerName(self, ContainerName): # String
self.add_query_param('ContainerName', ContainerName)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_CommandContent(self): # String
return self.get_query_params().get('CommandContent')
def set_CommandContent(self, CommandContent): # String
self.add_query_param('CommandContent', CommandContent)
def get_Timeout(self): # Long
return self.get_query_params().get('Timeout')
def set_Timeout(self, Timeout): # Long
self.add_query_param('Timeout', Timeout)
def get_ContentEncoding(self): # String
return self.get_query_params().get('ContentEncoding')
def set_ContentEncoding(self, ContentEncoding): # String
self.add_query_param('ContentEncoding', ContentEncoding)
def get_WindowsPasswordName(self): # String
return self.get_query_params().get('WindowsPasswordName')
def set_WindowsPasswordName(self, WindowsPasswordName): # String
self.add_query_param('WindowsPasswordName', WindowsPasswordName)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_EnableParameter(self): # Boolean
return self.get_query_params().get('EnableParameter')
def set_EnableParameter(self, EnableParameter): # Boolean
self.add_query_param('EnableParameter', EnableParameter)
def get_Username(self): # String
return self.get_query_params().get('Username')
def set_Username(self, Username): # String
self.add_query_param('Username', Username) | null |
613 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for (Fashion) MNIST."""
import numpy as np
import scipy
def one_hot(a, num_classes):
return np.squeeze(np.eye(num_classes)[a.reshape(-1)])
def METHOD_NAME(y, p):
"""Compute the Brier score.
Brier Score: see
https://www.stat.washington.edu/raftery/Research/PDF/Gneiting2007jasa.pdf,
page 363, Example 1
Args:
y: one-hot encoding of the true classes, size (?, num_classes)
p: numpy array, size (?, num_classes)
containing the output predicted probabilities
Returns:
bs: Brier score.
"""
return np.mean(np.power(p - y, 2))
def calibration(y, p_mean, num_bins=10):
"""Compute the calibration.
References:
https://arxiv.org/abs/1706.04599
https://arxiv.org/abs/1807.00263
Args:
y: one-hot encoding of the true classes, size (?, num_classes)
p_mean: numpy array, size (?, num_classes)
containing the mean output predicted probabilities
num_bins: number of bins
Returns:
ece: Expected Calibration Error
mce: Maximum Calibration Error
"""
# Compute for every test sample x, the predicted class.
class_pred = np.argmax(p_mean, axis=1)
# and the confidence (probability) associated with it.
conf = np.max(p_mean, axis=1)
# Convert y from one-hot encoding to the number of the class
y = np.argmax(y, axis=1)
# Storage
acc_tab = np.zeros(num_bins) # empirical (true) confidence
mean_conf = np.zeros(num_bins) # predicted confidence
nb_items_bin = np.zeros(num_bins) # number of items in the bins
tau_tab = np.linspace(0, 1, num_bins+1) # confidence bins
for i in np.arange(num_bins): # iterate over the bins
# select the items where the predicted max probability falls in the bin
# [tau_tab[i], tau_tab[i + 1)]
sec = (tau_tab[i + 1] > conf) & (conf >= tau_tab[i])
nb_items_bin[i] = np.sum(sec) # Number of items in the bin
# select the predicted classes, and the true classes
class_pred_sec, y_sec = class_pred[sec], y[sec]
# average of the predicted max probabilities
mean_conf[i] = np.mean(conf[sec]) if nb_items_bin[i] > 0 else np.nan
# compute the empirical confidence
acc_tab[i] = np.mean(
class_pred_sec == y_sec) if nb_items_bin[i] > 0 else np.nan
# Cleaning
mean_conf = mean_conf[nb_items_bin > 0]
acc_tab = acc_tab[nb_items_bin > 0]
nb_items_bin = nb_items_bin[nb_items_bin > 0]
# Expected Calibration Error
ece = np.average(
np.absolute(mean_conf - acc_tab),
weights=nb_items_bin.astype(float) / np.sum(nb_items_bin))
# Maximum Calibration Error
mce = np.max(np.absolute(mean_conf - acc_tab))
return ece, mce
def ensemble_metrics(x,
y,
model,
log_likelihood_fn,
n_samples=1,
weight_files=None):
"""Evaluate metrics of an ensemble.
Args:
x: numpy array of inputs
y: numpy array of labels
model: tf.keras.Model.
log_likelihood_fn: keras function of log likelihood. For classification
tasks, log_likelihood_fn(...)[1] should return the logits
n_samples: number of Monte Carlo samples to draw per ensemble member (each
weight file).
weight_files: to draw samples from multiple weight sets, specify a list of
weight files to load. These files must have been generated through
keras's model.save_weights(...).
Returns:
metrics_dict: dictionary containing the metrics
"""
if weight_files is None:
ensemble_logprobs = [log_likelihood_fn([x, y])[0] for _ in range(n_samples)]
metric_values = [model.evaluate(x, y, verbose=0)
for _ in range(n_samples)]
ensemble_logits = [log_likelihood_fn([x, y])[1] for _ in range(n_samples)]
else:
ensemble_logprobs = []
metric_values = []
ensemble_logits = []
for filename in weight_files:
model.load_weights(filename)
ensemble_logprobs.extend([log_likelihood_fn([x, y])[0]
for _ in range(n_samples)])
ensemble_logits.extend([log_likelihood_fn([x, y])[1]
for _ in range(n_samples)])
metric_values.extend([model.evaluate(x, y, verbose=0)
for _ in range(n_samples)])
metric_values = np.mean(np.array(metric_values), axis=0)
results = {}
for m, name in zip(metric_values, model.metrics_names):
results[name] = m
ensemble_logprobs = np.array(ensemble_logprobs)
probabilistic_log_likelihood = np.mean(
scipy.special.logsumexp(
np.sum(ensemble_logprobs, axis=2)
if len(ensemble_logprobs.shape) > 2 else ensemble_logprobs,
b=1. / ensemble_logprobs.shape[0],
axis=0),
axis=0)
results['probabilistic_log_likelihood'] = probabilistic_log_likelihood
ensemble_logits = np.array(ensemble_logits)
probs = np.mean(scipy.special.softmax(ensemble_logits, axis=2), axis=0)
class_pred = np.argmax(probs, axis=1)
probabilistic_accuracy = np.mean(np.equal(y, class_pred))
results['probabilistic_accuracy'] = probabilistic_accuracy
results['ece'], results['mce'] = calibration(
one_hot(y, probs.shape[1]), probs)
results['brier_score'] = METHOD_NAME(one_hot(y, probs.shape[1]), probs)
return results | null |
614 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkprivatelink.endpoint import endpoint_data
class ListVpcEndpointServicesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Privatelink', '2020-04-15', 'ListVpcEndpointServices','privatelink')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ServiceBusinessStatus(self): # String
return self.get_query_params().get('ServiceBusinessStatus')
def set_ServiceBusinessStatus(self, ServiceBusinessStatus): # String
self.add_query_param('ServiceBusinessStatus', ServiceBusinessStatus)
def get_AutoAcceptEnabled(self): # Boolean
return self.get_query_params().get('AutoAcceptEnabled')
def METHOD_NAME(self, AutoAcceptEnabled): # Boolean
self.add_query_param('AutoAcceptEnabled', AutoAcceptEnabled)
def get_ServiceStatus(self): # String
return self.get_query_params().get('ServiceStatus')
def set_ServiceStatus(self, ServiceStatus): # String
self.add_query_param('ServiceStatus', ServiceStatus)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_ZoneAffinityEnabled(self): # Boolean
return self.get_query_params().get('ZoneAffinityEnabled')
def set_ZoneAffinityEnabled(self, ZoneAffinityEnabled): # Boolean
self.add_query_param('ZoneAffinityEnabled', ZoneAffinityEnabled)
def get_ServiceName(self): # String
return self.get_query_params().get('ServiceName')
def set_ServiceName(self, ServiceName): # String
self.add_query_param('ServiceName', ServiceName)
def get_Tag(self): # Array
return self.get_query_params().get('Tag')
def set_Tag(self, Tag): # Array
for index1, value1 in enumerate(Tag):
if value1.get('Key') is not None:
self.add_query_param('Tag.' + str(index1 + 1) + '.Key', value1.get('Key'))
if value1.get('Value') is not None:
self.add_query_param('Tag.' + str(index1 + 1) + '.Value', value1.get('Value'))
def get_ResourceId(self): # String
return self.get_query_params().get('ResourceId')
def set_ResourceId(self, ResourceId): # String
self.add_query_param('ResourceId', ResourceId)
def get_ServiceResourceType(self): # String
return self.get_query_params().get('ServiceResourceType')
def set_ServiceResourceType(self, ServiceResourceType): # String
self.add_query_param('ServiceResourceType', ServiceResourceType)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_ServiceId(self): # String
return self.get_query_params().get('ServiceId')
def set_ServiceId(self, ServiceId): # String
self.add_query_param('ServiceId', ServiceId) | null |
615 | #!/usr/bin/env python3
""" DeepFaceLab SAE Model
Based on https://github.com/iperov/DeepFaceLab
"""
import logging
import numpy as np
# Ignore linting errors from Tensorflow's thoroughly broken import system
from tensorflow.keras.layers import Concatenate, Dense, Flatten, Input, LeakyReLU, Reshape # noqa:E501 # pylint:disable=import-error
from tensorflow.keras.models import Model as KModel # pylint:disable=import-error
from lib.model.nn_blocks import Conv2DOutput, Conv2DBlock, ResidualBlock, UpscaleBlock
from ._base import ModelBase
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Model(ModelBase):
""" SAE Model from DFL """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.input_shape = (self.config["input_size"], self.config["input_size"], 3)
self.architecture = self.config["architecture"].lower()
self.use_mask = self.config.get("learn_mask", False)
self.multiscale_count = 3 if self.config["multiscale_decoder"] else 1
self.encoder_dim = self.config["encoder_dims"]
self.decoder_dim = self.config["decoder_dims"]
self._patch_weights_management()
@property
def model_name(self):
""" str: The name of the keras model. Varies depending on selected architecture. """
return f"{self.name}_{self.architecture}"
@property
def ae_dims(self):
""" Set the Autoencoder Dimensions or set to default """
retval = self.config["autoencoder_dims"]
if retval == 0:
retval = 256 if self.architecture == "liae" else 512
return retval
def _patch_weights_management(self):
""" Patch in the correct encoder name into the config dictionary for freezing and loading
weights based on architecture.
"""
self.config["freeze_layers"] = [f"encoder_{self.architecture}"]
self.config["load_layers"] = [f"encoder_{self.architecture}"]
logger.debug("Patched encoder layers to config: %s",
{k: v for k, v in self.config.items()
if k in ("freeze_layers", "load_layers")})
def build_model(self, inputs):
""" Build the DFL-SAE Model """
encoder = getattr(self, f"encoder_{self.architecture}")()
enc_output_shape = encoder.output_shape[1:]
encoder_a = encoder(inputs[0])
encoder_b = encoder(inputs[1])
if self.architecture == "liae":
inter_both = self.inter_liae("both", enc_output_shape)
int_output_shape = (np.array(inter_both.output_shape[1:]) * (1, 1, 2)).tolist()
inter_a = Concatenate()([inter_both(encoder_a), inter_both(encoder_a)])
inter_b = Concatenate()([self.inter_liae("b", enc_output_shape)(encoder_b),
inter_both(encoder_b)])
decoder = self.decoder("both", int_output_shape)
outputs = [decoder(inter_a), decoder(inter_b)]
else:
outputs = [self.decoder("a", enc_output_shape)(encoder_a),
self.decoder("b", enc_output_shape)(encoder_b)]
autoencoder = KModel(inputs, outputs, name=self.model_name)
return autoencoder
def encoder_df(self):
""" DFL SAE DF Encoder Network"""
input_ = Input(shape=self.input_shape)
dims = self.input_shape[-1] * self.encoder_dim
lowest_dense_res = self.input_shape[0] // 16
var_x = Conv2DBlock(dims, activation="leakyrelu")(input_)
var_x = Conv2DBlock(dims * 2, activation="leakyrelu")(var_x)
var_x = Conv2DBlock(dims * 4, activation="leakyrelu")(var_x)
var_x = Conv2DBlock(dims * 8, activation="leakyrelu")(var_x)
var_x = Dense(self.ae_dims)(Flatten()(var_x))
var_x = Dense(lowest_dense_res * lowest_dense_res * self.ae_dims)(var_x)
var_x = Reshape((lowest_dense_res, lowest_dense_res, self.ae_dims))(var_x)
var_x = UpscaleBlock(self.ae_dims, activation="leakyrelu")(var_x)
return KModel(input_, var_x, name="encoder_df")
def METHOD_NAME(self):
""" DFL SAE LIAE Encoder Network """
input_ = Input(shape=self.input_shape)
dims = self.input_shape[-1] * self.encoder_dim
var_x = Conv2DBlock(dims, activation="leakyrelu")(input_)
var_x = Conv2DBlock(dims * 2, activation="leakyrelu")(var_x)
var_x = Conv2DBlock(dims * 4, activation="leakyrelu")(var_x)
var_x = Conv2DBlock(dims * 8, activation="leakyrelu")(var_x)
var_x = Flatten()(var_x)
return KModel(input_, var_x, name="encoder_liae")
def inter_liae(self, side, input_shape):
""" DFL SAE LIAE Intermediate Network """
input_ = Input(shape=input_shape)
lowest_dense_res = self.input_shape[0] // 16
var_x = input_
var_x = Dense(self.ae_dims)(var_x)
var_x = Dense(lowest_dense_res * lowest_dense_res * self.ae_dims * 2)(var_x)
var_x = Reshape((lowest_dense_res, lowest_dense_res, self.ae_dims * 2))(var_x)
var_x = UpscaleBlock(self.ae_dims * 2, activation="leakyrelu")(var_x)
return KModel(input_, var_x, name=f"intermediate_{side}")
def decoder(self, side, input_shape):
""" DFL SAE Decoder Network"""
input_ = Input(shape=input_shape)
outputs = []
dims = self.input_shape[-1] * self.decoder_dim
var_x = input_
var_x1 = UpscaleBlock(dims * 8, activation=None)(var_x)
var_x1 = LeakyReLU(alpha=0.2)(var_x1)
var_x1 = ResidualBlock(dims * 8)(var_x1)
var_x1 = ResidualBlock(dims * 8)(var_x1)
if self.multiscale_count >= 3:
outputs.append(Conv2DOutput(3, 5, name=f"face_out_32_{side}")(var_x1))
var_x2 = UpscaleBlock(dims * 4, activation=None)(var_x1)
var_x2 = LeakyReLU(alpha=0.2)(var_x2)
var_x2 = ResidualBlock(dims * 4)(var_x2)
var_x2 = ResidualBlock(dims * 4)(var_x2)
if self.multiscale_count >= 2:
outputs.append(Conv2DOutput(3, 5, name=f"face_out_64_{side}")(var_x2))
var_x3 = UpscaleBlock(dims * 2, activation=None)(var_x2)
var_x3 = LeakyReLU(alpha=0.2)(var_x3)
var_x3 = ResidualBlock(dims * 2)(var_x3)
var_x3 = ResidualBlock(dims * 2)(var_x3)
outputs.append(Conv2DOutput(3, 5, name=f"face_out_128_{side}")(var_x3))
if self.use_mask:
var_y = input_
var_y = UpscaleBlock(self.decoder_dim * 8, activation="leakyrelu")(var_y)
var_y = UpscaleBlock(self.decoder_dim * 4, activation="leakyrelu")(var_y)
var_y = UpscaleBlock(self.decoder_dim * 2, activation="leakyrelu")(var_y)
var_y = Conv2DOutput(1, 5, name=f"mask_out_{side}")(var_y)
outputs.append(var_y)
return KModel(input_, outputs=outputs, name=f"decoder_{side}")
def _legacy_mapping(self):
""" The mapping of legacy separate model names to single model names """
mappings = {"df": {f"{self.name}_encoder.h5": "encoder_df",
f"{self.name}_decoder_A.h5": "decoder_a",
f"{self.name}_decoder_B.h5": "decoder_b"},
"liae": {f"{self.name}_encoder.h5": "encoder_liae",
f"{self.name}_intermediate_B.h5": "intermediate_both",
f"{self.name}_intermediate.h5": "intermediate_b",
f"{self.name}_decoder.h5": "decoder_both"}}
return mappings[self.config["architecture"]] | null |
616 | #!/usr/bin/python3
# This file originally lived in
# https://github.com/coreos/fedora-coreos-releng-automation. See that repo for
# archeological git research.
"""
This script is meant to be run from the Fedora CoreOS build
pipeline (see https://github.com/coreos/fedora-coreos-pipeline.git)
It makes an OSTree import request to the coreos-ostree-importer
running in Fedora's Infra OpenShift cluster.
"""
import argparse
import sys
# Pick up libraries we use that are delivered along with COSA
sys.path.insert(0, '/usr/lib/coreos-assembler')
from cosalib.meta import GenericBuildMeta
from cosalib.fedora_messaging_request import send_request_and_wait_for_response
from cosalib.cmdlib import get_basearch
# Example datagrepper URLs to inspect sent messages:
# https://apps.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.prod.coreos.build.request.ostree-import&delta=100000
# https://apps.stg.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.stg.coreos.build.request.ostree-import&delta=100000
# https://apps.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.prod.coreos.build.request.ostree-import.finished&delta=100000
# https://apps.stg.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.stg.coreos.build.request.ostree-import.finished&delta=100000
# Give the importer some time to do the import
OSTREE_IMPORTER_REQUEST_TIMEOUT_SEC = 15 * 60
def METHOD_NAME():
parser = argparse.ArgumentParser()
parser.add_argument("--build", help="Build ID", required=True)
parser.add_argument(
"--arch", help="target architecture", default=get_basearch()
)
parser.add_argument(
"--fedmsg-conf",
metavar="CONFIG.TOML",
required=True,
help="fedora-messaging config file for publishing",
)
parser.add_argument(
"--stg", action="store_true", help="target the stg infra rather than prod"
)
parser.add_argument(
"--s3",
metavar="<BUCKET>[/PREFIX]",
required=True,
help="bucket and prefix to S3 builds/ dir",
)
parser.add_argument(
"--repo",
choices=["prod", "compose"],
required=True,
help="the name of the OSTree repo within Fedora to import into",
)
return parser.METHOD_NAME()
def send_ostree_import_request(args):
if args.build == 'latest':
raise Exception("Refusing to ostree import generic 'latest' build ID")
build = GenericBuildMeta(build=args.build, basearch=args.arch)
bucket, prefix = get_bucket_and_prefix(args.s3)
environment = "prod"
if args.stg:
environment = "stg"
# Example: https://fcos-builds.s3.amazonaws.com/prod/streams/stable/builds/31.20200127.3.0/x86_64/fedora-coreos-31.20200127.3.0-ostree.x86_64.tar
commit_url = f"https://{bucket}.s3.amazonaws.com/{prefix}/builds/{args.build}/{args.arch}/{build['images']['ostree']['path']}"
response = send_request_and_wait_for_response(
request_type="ostree-import",
config=args.fedmsg_conf,
environment=environment,
request_timeout=OSTREE_IMPORTER_REQUEST_TIMEOUT_SEC,
body={
"build_id": args.build,
"basearch": args.arch,
"commit_url": commit_url,
"checksum": "sha256:" + build["images"]["ostree"]["sha256"],
"ostree_ref": build["ref"],
"ostree_checksum": build["ostree-commit"],
"target_repo": args.repo,
},
)
validate_response(response)
def get_bucket_and_prefix(path):
split = path.split("/", 1)
if len(split) == 1:
return (split[0], "")
return split
def validate_response(response):
if response["status"].lower() == "failure":
# https://pagure.io/robosignatory/pull-request/38
if "failure-message" not in response:
raise Exception("Importing failed")
raise Exception(f"Importing failed: {response['failure-message']}")
assert response["status"].lower() == "success", str(response)
def main():
args = METHOD_NAME()
send_ostree_import_request(args)
if __name__ == "__main__":
sys.exit(main()) | null |
617 | import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from ai.backend.manager.models.minilang.ordering import QueryOrderParser
from ai.backend.manager.models.utils import agg_to_array
@pytest.fixture
async def METHOD_NAME(database_engine):
base = declarative_base()
metadata = base.metadata
grid = sa.Table(
"test_query_order_users",
metadata,
sa.Column("id", sa.Integer, sa.Sequence("user_id_seq"), primary_key=True),
sa.Column("data1", sa.Integer),
sa.Column("data2", sa.Float),
sa.Column("data3", sa.String(10)),
)
foreign_grid = sa.Table(
"test_query_order_dogs",
metadata,
sa.Column("id", sa.Integer, sa.Sequence("dog_id_seq"), primary_key=True),
sa.Column("user_id", sa.ForeignKey("test_query_order_users.id")),
sa.Column("name", sa.String(10)),
)
def _create_tables(conn, *args, **kwargs):
return metadata.create_all(conn, [grid, foreign_grid])
def _drop_tables(conn, *args, **kwargs):
return metadata.drop_all(conn, [grid, foreign_grid])
async with database_engine.begin() as conn:
await conn.run_sync(_create_tables)
await conn.execute(
grid.insert(),
[
{"data1": 10, "data2": 0.2, "data3": "a"},
{"data1": 10, "data2": 0.1, "data3": "c"},
{"data1": 20, "data2": 0.0, "data3": "b"},
{"data1": 20, "data2": -0.1, "data3": "d"},
],
)
await conn.execute(
foreign_grid.insert(),
[
{"user_id": 1, "name": "b"},
{"user_id": 1, "name": "c"},
{"user_id": 2, "name": "a"},
],
)
try:
yield conn, grid, foreign_grid
finally:
await conn.run_sync(_drop_tables)
async def test_select_queries(METHOD_NAME) -> None:
conn, grid, _ = METHOD_NAME
parser = QueryOrderParser()
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"+data1",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(1,), (2,), (3,), (4,)]
assert test_ret == actual_ret
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-data1",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(3,), (4,), (1,), (2,)]
assert test_ret == actual_ret
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-data1,+data2",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(4,), (3,), (2,), (1,)]
assert test_ret == actual_ret
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-data1,+data3,-data2",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(3,), (4,), (1,), (2,)]
assert test_ret == actual_ret
# default ordering
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(1,), (2,), (3,), (4,)]
assert test_ret == actual_ret
# without order marks, it's assumed to be ascending
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"data3,-data2,data1",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(1,), (3,), (2,), (4,)]
assert test_ret == actual_ret
# invalid syntax
with pytest.raises(ValueError):
parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"xxx",
)
async def test_column_map(METHOD_NAME) -> None:
conn, grid, _ = METHOD_NAME
parser = QueryOrderParser(
{
"v1": ("data1", None),
"v2": ("data2", None),
"v3": ("data3", None),
}
)
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-v3",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(4,), (2,), (3,), (1,)]
assert test_ret == actual_ret
# non-existent column in the column map
with pytest.raises(ValueError):
parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-data1,+data2",
)
async def test_aggregated_foreign_fields(METHOD_NAME) -> None:
conn, grid, foreign_grid = METHOD_NAME
parser = QueryOrderParser(
{
"dogs_name": ("test_query_order_dogs_name", agg_to_array),
}
)
orig_query = (
sa.select(
[
grid.c.id,
agg_to_array(foreign_grid.c.name).label("dogs_name"),
]
)
.select_from(sa.join(grid, foreign_grid, grid.c.id == foreign_grid.c.user_id))
.group_by(grid)
)
sa_query = parser.append_ordering(
orig_query,
"dogs_name",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(2, ["a"]), (1, ["b", "c"])]
assert test_ret == actual_ret | null |
618 | ######################################################################
# BioSimSpace: Making biomolecular simulation a breeze!
#
# Copyright: 2017-2023
#
# Authors: Lester Hedges <[email protected]>
#
# BioSimSpace is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BioSimSpace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BioSimSpace. If not, see <http://www.gnu.org/licenses/>.
#####################################################################
"""Functionality for initialising metadynamics simulation processes."""
__author__ = "Lester Hedges"
__email__ = "[email protected]"
__all__ = ["run"]
from .._SireWrappers import System as _System
from .. import Process as _Process
from .. import Protocol as _Protocol
# Import common objects from BioSimSpace.MD._md
from ..MD._md import _file_extensions, _md_engines, _find_md_engines
def METHOD_NAME(
system,
protocol,
engine="auto",
gpu_support=False,
auto_start=True,
name="metamd",
work_dir=None,
seed=None,
property_map={},
**kwargs,
):
"""
Auto-configure and run a metadynamics process.
Parameters
----------
system : :class:`System <BioSimSpace._SireWrappers.System>`
The molecular system.
protocol : :class:`Protocol <BioSimSpace.Protocol.Metadynamics>`
The metadynamics protocol.
engine : str
The molecular dynamics engine to use. If "auto", then a matching
engine will automatically be chosen. Supported engines can be
found using 'BioSimSpace.Metadynamics.engines()'.
gpu_support : bool
Whether to choose an engine with GPU support.
auto_start : bool
Whether to start the process automatically.
name : str
The name of the process.
work_dir : str
The working directory for the process.
seed : int
A random number seed.
property_map : dict
A dictionary that maps system "properties" to their user defined
values. This allows the user to refer to properties with their
own naming scheme, e.g. { "charge" : "my-charge" }
kwargs : dict
A dictionary of optional keyword arguments neeeded by the engine.
Returns
-------
process : :class:`Process <BioSimSpace.Process>`
A process to run the molecular dynamics protocol.
"""
# Check that the system is valid.
if not isinstance(system, _System):
raise TypeError("'system' must be of type 'BioSimSpace._SireWrappers.System'")
# Check that the protocol is valid.
if not isinstance(protocol, _Protocol.Metadynamics):
raise TypeError(
"'protocol' must be of type 'BioSimSpace.Protocol.Metadynamics'"
)
# Validate optional arguments.
if not isinstance(engine, str):
raise TypeError("'engine' must be of type 'str'.")
md_engine = engine.upper().replace(" ", "")
if not isinstance(gpu_support, bool):
raise TypeError("'gpu_support' must be of type 'bool'")
if not isinstance(auto_start, bool):
raise TypeError("'auto_start' must be of type 'bool'")
if not isinstance(name, str):
raise TypeError("'name' must be of type 'str'")
if work_dir is not None:
if not isinstance(work_dir, str):
raise TypeError("'work_dir' must be of type 'str'")
if seed is not None:
if not type(seed) is int:
raise TypeError("'seed' must be of type 'int'")
if not isinstance(property_map, dict):
raise TypeError("'property_map' must be of type 'dict'")
# Find a molecular dynamics engine and executable.
engines, exes = _find_md_engines(system, protocol, md_engine, gpu_support)
# Create the process object, return the first supported engine that can
# instantiate a process.
for engine, exe in zip(engines, exes):
try:
# AMBER.
if engine == "AMBER":
process = _Process.Amber(
system,
protocol,
exe=exe,
name=name,
work_dir=work_dir,
seed=seed,
property_map=property_map,
**kwargs,
)
# GROMACS.
elif engine == "GROMACS":
process = _Process.Gromacs(
system,
protocol,
exe=exe,
name=name,
work_dir=work_dir,
seed=seed,
property_map=property_map,
**kwargs,
)
# OPENMM.
elif engine == "OPENMM":
if gpu_support:
platform = "CUDA"
else:
platform = "CPU"
# Don't pass the executable name through so that this works on Windows too.
process = _Process.OpenMM(
system,
protocol,
exe=None,
name=name,
work_dir=work_dir,
seed=seed,
property_map=property_map,
platform=platform,
**kwargs,
)
# Start the process.
if auto_start:
return process.start()
else:
return process
except:
pass
# If we got here, then we couldn't create a process.
if md_engine == "AUTO":
raise Exception(
f"Unable to create a process using any supported engine: {engines}"
)
else:
raise Exception(
f"Unable to create a process using the chosen engine: {md_engine}"
) | null |
619 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
class CreateDeviceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vs', '2018-12-12', 'CreateDevice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_GbId(self):
return self.get_query_params().get('GbId')
def set_GbId(self,GbId):
self.add_query_param('GbId',GbId)
def get_Latitude(self):
return self.get_query_params().get('Latitude')
def set_Latitude(self,Latitude):
self.add_query_param('Latitude',Latitude)
def METHOD_NAME(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_AutoStart(self):
return self.get_query_params().get('AutoStart')
def set_AutoStart(self,AutoStart):
self.add_query_param('AutoStart',AutoStart)
def get_ParentId(self):
return self.get_query_params().get('ParentId')
def set_ParentId(self,ParentId):
self.add_query_param('ParentId',ParentId)
def get_Password(self):
return self.get_query_params().get('Password')
def set_Password(self,Password):
self.add_query_param('Password',Password)
def get_Vendor(self):
return self.get_query_params().get('Vendor')
def set_Vendor(self,Vendor):
self.add_query_param('Vendor',Vendor)
def get_AlarmMethod(self):
return self.get_query_params().get('AlarmMethod')
def set_AlarmMethod(self,AlarmMethod):
self.add_query_param('AlarmMethod',AlarmMethod)
def get_DirectoryId(self):
return self.get_query_params().get('DirectoryId')
def set_DirectoryId(self,DirectoryId):
self.add_query_param('DirectoryId',DirectoryId)
def get_Longitude(self):
return self.get_query_params().get('Longitude')
def set_Longitude(self,Longitude):
self.add_query_param('Longitude',Longitude)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_Ip(self):
return self.get_query_params().get('Ip')
def set_Ip(self,Ip):
self.add_query_param('Ip',Ip)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Params(self):
return self.get_query_params().get('Params')
def set_Params(self,Params):
self.add_query_param('Params',Params)
def get_Url(self):
return self.get_query_params().get('Url')
def set_Url(self,Url):
self.add_query_param('Url',Url)
def get_Port(self):
return self.get_query_params().get('Port')
def set_Port(self,Port):
self.add_query_param('Port',Port)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_PosInterval(self):
return self.get_query_params().get('PosInterval')
def set_PosInterval(self,PosInterval):
self.add_query_param('PosInterval',PosInterval)
def get_Dsn(self):
return self.get_query_params().get('Dsn')
def set_Dsn(self,Dsn):
self.add_query_param('Dsn',Dsn)
def get_Username(self):
return self.get_query_params().get('Username')
def set_Username(self,Username):
self.add_query_param('Username',Username)
def get_AutoPos(self):
return self.get_query_params().get('AutoPos')
def set_AutoPos(self,AutoPos):
self.add_query_param('AutoPos',AutoPos | null |
620 | # Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from guild import click_util
from . import ac_support
def _ac_python(_ctx, _param, incomplete):
return ac_support.ac_python(incomplete)
def _ac_guild_version_or_path(_ctx, _param, incomplete):
versions = [ver for ver in METHOD_NAME() if ver.startswith(incomplete)]
return versions + ac_support.ac_filename(["whl"], incomplete)
def METHOD_NAME():
import json
from urllib.request import urlopen
def f():
resp = urlopen("https://pypi.org/pypi/guildai/json")
data = json.loads(resp.read())
return sorted(data.get("releases") or {})
return ac_support.ac_safe_apply(f, []) or []
def _ac_guild_home(_ctx, _param, incomplete):
return ac_support.ac_dir(incomplete)
def _ac_requirement(_ctx, _param, incomplete):
return ac_support.ac_filename(["txt"], incomplete)
def _ac_dir(_ctx, _param, incomplete):
return ac_support.ac_dir(incomplete)
@click.command()
@click.argument("dir", default=None, required=False, shell_complete=_ac_dir)
@click.option(
"--venv",
is_flag=True,
help=(
"Creates a virtual environment in DIR. This option "
"enabled pre-0.9 init behavior and is implied when "
"specifying any of the virtual environment settings below."
),
)
@click.option(
"-n",
"--name",
metavar="NAME",
help=("Environment name (default is env parent directory name). Implies `--venv`."),
)
@click.option(
"-p",
"--python",
metavar="VERSION",
help=("Version of Python to use for the environment. Implies `--venv`."),
shell_complete=_ac_python,
)
@click.option(
"-g",
"--guild",
metavar="VERSION_OR_PATH",
help=(
"Version of Guild AI to use for the environment. The activate "
"version of Guild is installed by default. This value may "
"alternatively be a path to a Guild wheel distribution. Implies "
"`--venv`."
),
shell_complete=_ac_guild_version_or_path,
)
@click.option(
"-s",
"--system-site-packages",
is_flag=True,
help="Give environment access to system site packages. Implies `--venv`.",
)
@click.option(
"-H",
"--no-isolate",
is_flag=True,
help=(
"Use current Guild home for the environment. Ignored if `--guild-home` "
"is specified."
),
)
@click.option(
"-h",
"--guild-home",
metavar="PATH",
help=(
"Alternative Guild home location for with the environment. "
"By default, Guild home is '.guild' in `DIR`."
),
shell_complete=_ac_guild_home,
)
@click.option(
"-r",
"--requirements",
metavar="REQ",
multiple=True,
help=(
"Install required package or packages defined in a file. May be "
"used multiple times. Implies `--venv`."
),
shell_complete=_ac_requirement,
)
@click.option(
"-P",
"--path",
metavar="DIR",
multiple=True,
help="Include DIR as a Python path in the environment. Implies `--venv`.",
shell_complete=_ac_dir,
)
@click.option(
"--no-reqs",
is_flag=True,
help=(
"Don't install from requirements.txt or guild.yml in environment "
"parent directory. Implies `--venv`."
),
)
@click.option(
"-l",
"--isolate-resources",
is_flag=True,
help="Use a local cache when initializing an environment.",
)
@click.option(
"-y",
"--yes",
is_flag=True,
help="Initialize a Guild environment without prompting.",
)
@click.option(
"--no-progress",
is_flag=True,
help=(
"Don't show progress when installing environment packages. Ignored "
"if a virtual environment is not created."
),
)
@click.option(
"--pre",
"pre_release",
is_flag=True,
help="Install pre-release versions of applicable packages. Implies `--venv`.",
)
@click_util.use_args
def init(args):
"""Initialize a Guild environment.
By default, creates `.guild` under `DIR`, or the current directory
if `DIR` is omitted.
NOTE: As of 0.9, this command does NOT create a virtual directory
in `DIR`. To enable pre-0.9 behavior, specify `--venv` along with
any of the applicable virtual environment options. We recommend
creating a virtual environment using standard tools rather than
using this command. Backward compatibility will be maintained via
the `--venv` option.
Options that are associated with virtual environments are noted as
such below.
### Resource Cache
By default resources are cached and shared at the user level in
`~/.guild/cache/resources` so that resources downloaded from one
environment are available to other environments. You can modify
this behavior to have all resources downloaded local to the
environment by specifying `--local-resource-cache`.
### Python Interpreter (virtual environments only)
Use `--python` to specify the Python interpreter to use within the
generated virtual environment. By default, the default Python
interpreter for `virtualenv` is used unless `python` is explicitly
listed as a requirement. If `no-venv` is specified, `--python` is
ignored.
### Requirements (virtual environments only)
By default, any required packages listed under packages.requires
in `guild.yml` in the environment parent directory are installed
into the environment. Use `--no-reqs` to suppress this behavior.
Additionally, packages defined in `requirements.txt` in the
environment parent directory will be installed. Use `--no-reqs` to
suppress this behavior.
Note that packages defined in `guild.yml` use Guild package names
while packages defined in `requirements.txt` use PyPI package
names.
For information on requirements files, see:
https://bit.ly/guild-help-req-files
You may explicitly specify requirements file using `-r` or
`--requirement`. If `-r, --requirement` is specified, Guild will
not automatically install packages in `requirements.txt` -- that
file must be specified explicitly in the command.
### Guild AI Version (virtual environments only)
By default `init` installs the active version of Guild AI in the
initialized environment. To install a different version, or to
install a Guild wheel distribution file use the `--guild` option.
"""
from . import init_impl
init_impl.main(args) | null |
621 | # SPDX-License-Identifier: LGPL-2.1-or-later
import os
import shutil
import yaml
import pytest
import libnmstate
from libnmstate.schema import Interface
from libnmstate.schema import InterfaceType
from libnmstate.schema import InterfaceState
from .testlib.cmdlib import exec_cmd
from .testlib.assertlib import assert_absent
from .testlib.assertlib import assert_state_match
from .testlib.statelib import show_only
TEST_YAML1_CONTENT = """
---
interfaces:
- name: dummy0
type: dummy
state: up
ipv4:
enabled: false
ipv6:
enabled: false
"""
TEST_YAML2_CONTENT = """
---
interfaces:
- name: dummy0
type: dummy
state: up
ipv4:
address:
- ip: 192.0.2.252
prefix-length: 24
- ip: 192.0.2.251
prefix-length: 24
dhcp: false
enabled: true
ipv6:
address:
- ip: 2001:db8:2::1
prefix-length: 64
- ip: 2001:db8:1::1
prefix-length: 64
autoconf: false
dhcp: false
enabled: true
"""
TEST_YAML3_CONTENT = """
capture:
dummy_iface: interfaces.type == "dummy"
desired:
interfaces:
- name: "{{ capture.dummy_iface.interfaces.0.name }}"
state: absent
"""
CONFIG_DIR = "/etc/nmstate"
TEST_CONFIG1_FILE_PATH = f"{CONFIG_DIR}/01-nmstate-test.yml"
TEST_CONFIG1_APPLIED_FILE_PATH = f"{CONFIG_DIR}/01-nmstate-test.applied"
TEST_CONFIG2_FILE_PATH = f"{CONFIG_DIR}/02-nmstate-test.yml"
TEST_CONFIG2_APPLIED_FILE_PATH = f"{CONFIG_DIR}/02-nmstate-test.applied"
TEST_CONFIG3_FILE_PATH = f"{CONFIG_DIR}/03-nmstate-policy-test.yml"
TEST_CONFIG3_APPLIED_FILE_PATH = f"{CONFIG_DIR}/03-nmstate-policy-test.applied"
DUMMY1 = "dummy1"
@pytest.fixture
def nmstate_etc_config():
if not os.path.isdir(CONFIG_DIR):
os.mkdir(CONFIG_DIR)
for file_path, content in [
(
TEST_CONFIG1_FILE_PATH,
TEST_YAML1_CONTENT,
),
(
TEST_CONFIG2_FILE_PATH,
TEST_YAML2_CONTENT,
),
]:
with open(file_path, "w") as fd:
fd.write(content)
yield
libnmstate.apply(
{
Interface.KEY: [
{
Interface.NAME: "dummy0",
Interface.STATE: InterfaceState.ABSENT,
}
]
}
)
os.remove(TEST_CONFIG1_APPLIED_FILE_PATH)
os.remove(TEST_CONFIG2_APPLIED_FILE_PATH)
def test_nmstate_service_apply(nmstate_etc_config):
exec_cmd("systemctl restart nmstate".split(), check=True)
desire_state = yaml.load(TEST_YAML2_CONTENT, Loader=yaml.SafeLoader)
assert_state_match(desire_state)
assert not os.path.exists(TEST_CONFIG1_FILE_PATH)
assert os.path.isfile(TEST_CONFIG1_APPLIED_FILE_PATH)
assert not os.path.exists(TEST_CONFIG2_FILE_PATH)
assert os.path.isfile(TEST_CONFIG2_APPLIED_FILE_PATH)
@pytest.fixture
def dummy1_up():
libnmstate.apply(
{
Interface.KEY: [
{
Interface.NAME: DUMMY1,
Interface.STATE: InterfaceState.UP,
Interface.TYPE: InterfaceType.DUMMY,
}
]
}
)
yield
libnmstate.apply(
{
Interface.KEY: [
{
Interface.NAME: DUMMY1,
Interface.STATE: InterfaceState.ABSENT,
}
]
}
)
def test_nmstate_service_apply_nmpolicy(dummy1_up):
with open(TEST_CONFIG3_FILE_PATH, "w") as fd:
fd.write(TEST_YAML3_CONTENT)
current_state = show_only((DUMMY1,))
assert current_state[Interface.KEY][0][Interface.NAME] == DUMMY1
try:
exec_cmd("systemctl restart nmstate".split(), check=True)
assert_absent(DUMMY1)
assert os.path.isfile(TEST_CONFIG3_APPLIED_FILE_PATH)
finally:
os.remove(TEST_CONFIG3_APPLIED_FILE_PATH)
def METHOD_NAME():
if os.path.isdir(CONFIG_DIR):
shutil.rmtree(CONFIG_DIR, ignore_errors=True)
exec_cmd("nmstatectl service".split(), check=True) | null |
622 | from shared.database.common import *
from shared.database.account.transaction import Transaction
from shared.database.account.address import Address
from shared.database.user import User
class Account(Base):
__tablename__ = 'account'
"""
An abstract holding object for transactions.
For example, could be declared as "billing" account.
And link to billing transactions
"""
# Balance is stored in transaction_previous.balance_new
id = Column(Integer, primary_key = True)
nickname = Column(String)
mode_trainer_or_builder = Column(String) # 'trainer', 'builder'
account_type = Column(String) # 'billing'
# or "CASH" or "Billing"? # "Invoice" / monthly...
# This should be differet from type?
credit_limit = Column(Integer)
payment_method_on_file = Column(Boolean)
security_disable = Column(Boolean)
transaction_previous_id = Column(Integer, ForeignKey('transaction.id'))
transaction_previous = relationship(Transaction,
foreign_keys = [transaction_previous_id])
address_primary_id = Column(Integer, ForeignKey('address.id'))
address_primary = relationship(Address,
foreign_keys = [address_primary_id])
primary_user_id = Column(Integer, ForeignKey('userbase.id'))
primary_user = relationship(User,
foreign_keys = [primary_user_id])
# org_id = Column(Integer, ForeignKey('org.id'))
# org = relationship('Org', foreign_keys=[org_id])
stripe_id = Column(String)
member_created_id = Column(Integer, ForeignKey('member.id'))
member_created = relationship("Member", foreign_keys = [member_created_id])
member_updated_id = Column(Integer, ForeignKey('member.id'))
member_updated = relationship("Member", foreign_keys = [member_updated_id])
time_created = Column(DateTime, default = datetime.datetime.utcnow)
time_updated = Column(DateTime, onupdate = datetime.datetime.utcnow)
def get_by_id(session,
account_id):
if account_id is None:
return False
return session.query(Account).filter(
Account.id == account_id).first()
def get_list(session,
mode_trainer_or_builder,
user_id = None,
account_type = None,
by_primary_user = True
):
"""
mode_trainer_or_builder ['trainer', 'builder']
Only supports by_primary_user at the moment
"""
# Do we actually need this check?
if user_id is None:
return False
if mode_trainer_or_builder is None:
return False
if by_primary_user is True:
query = session.query(Account).filter(
Account.primary_user_id == user_id)
if account_type:
query = query.filter(Account.account_type == account_type)
query = query.filter(Account.mode_trainer_or_builder == mode_trainer_or_builder)
return query.all()
def serialize(self):
transaction_previous_serialized = None
if self.transaction_previous:
transaction_previous_serialized = self.transaction_previous.serialize_for_account()
return {
'id': self.id,
'nickname': self.nickname,
'account_type': self.account_type,
'transaction_previous': transaction_previous_serialized,
'payment_method_on_file': self.payment_method_on_file
}
@staticmethod
def METHOD_NAME(
session,
primary_user,
mode_trainer_or_builder,
account_type = None,
nickname = "My Account"):
"""
"""
account = Account(nickname = nickname,
mode_trainer_or_builder = mode_trainer_or_builder,
account_type = account_type,
primary_user = primary_user,
member_created_id = primary_user.member_id)
session.add(account)
return account | null |
623 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDBInstancesOverviewRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dds', '2015-12-01', 'DescribeDBInstancesOverview','dds')
self.set_method('POST')
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def METHOD_NAME(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_InstanceClass(self): # String
return self.get_query_params().get('InstanceClass')
def set_InstanceClass(self, InstanceClass): # String
self.add_query_param('InstanceClass', InstanceClass)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_InstanceStatus(self): # String
return self.get_query_params().get('InstanceStatus')
def set_InstanceStatus(self, InstanceStatus): # String
self.add_query_param('InstanceStatus', InstanceStatus)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_InstanceIds(self): # String
return self.get_query_params().get('InstanceIds')
def set_InstanceIds(self, InstanceIds): # String
self.add_query_param('InstanceIds', InstanceIds)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
624 | #/*##########################################################################
# Copyright (C) 2004-2022 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
"""
This plugin open a file selection dialog to open one or more images in a
new window. Usual image data formats are supported, as well as standard
image formats (JPG, PNG).
The tool is meant to view an alternative view of the data, such as a
photograph of the sample or a different type of scientific measurement
of the same sample.
The window offer a cropping tool, to crop the image to the current visible
zoomed area and then resize it to fit the original size.
The mask of this plot widget is synchronized with the main stack widget.
"""
__author__ = "V.A. Sole - ESRF"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
import os
import logging
from PyMca5.PyMcaGui import PyMcaQt as qt
from PyMca5.PyMcaGui.pymca import ExternalImagesWindow
from PyMca5.PyMcaGui.pymca import ExternalImagesStackPluginBase
from PyMca5.PyMcaGui.pymca import StackPluginResultsWindow
from PyMca5.PyMcaGui.plotting import PyMca_Icons as PyMca_Icons
_logger = logging.getLogger(__name__)
class ExternalImagesStackPlugin( \
ExternalImagesStackPluginBase.ExternalImagesStackPluginBase):
def __init__(self, stackWindow, **kw):
ExternalImagesStackPluginBase.ExternalImagesStackPluginBase.__init__(self, stackWindow, **kw)
self.methodDict = {'Load': [self._loadImageFiles,
"Load Images",
PyMca_Icons.fileopen],
'Show': [self._showWidget,
"Show Image Browser",
PyMca_Icons.brushselect]}
self.__methodKeys = ['Load', 'Show']
self.widget = None
def stackUpdated(self):
self.widget = None
def selectionMaskUpdated(self):
if self.widget is None:
return
if self.widget.isHidden():
return
mask = self.getStackSelectionMask()
self.widget.setSelectionMask(mask)
def mySlot(self, ddict):
_logger.debug("mySlot %s %s", ddict['event'], ddict.keys())
if ddict['event'] == "selectionMaskChanged":
self.setStackSelectionMask(ddict['current'])
elif ddict['event'] == "addImageClicked":
self.addImage(ddict['image'], ddict['title'])
elif ddict['event'] == "removeImageClicked":
self.removeImage(ddict['title'])
elif ddict['event'] == "replaceImageClicked":
self.replaceImage(ddict['image'], ddict['title'])
elif ddict['event'] == "resetSelection":
self.setStackSelectionMask(None)
#Methods implemented by the plugin
def getMethods(self):
if self.widget is None:
return [self.__methodKeys[0]]
else:
return self.__methodKeys
def getMethodToolTip(self, name):
return self.methodDict[name][1]
def getMethodPixmap(self, name):
return self.methodDict[name][2]
def applyMethod(self, name):
return self.methodDict[name][0]()
def _createStackPluginWindow(self, imagenames, imagelist):
self.widget = StackPluginResultsWindow.StackPluginResultsWindow(parent=None,
usetab=False)
self.widget.buildAndConnectImageButtonBox()
self.widget.sigMaskImageWidgetSignal.connect(self.mySlot)
self.widget.setStackPluginResults(imagelist,
image_names=imagenames)
self._showWidget()
def _createStackPluginWindowQImage(self, imagenames, imagelist):
self.widget = ExternalImagesWindow.ExternalImagesWindow(parent=None,
rgbwidget=None,
selection=True,
colormap=True,
imageicons=True,
standalonesave=True)
self.widget.buildAndConnectImageButtonBox()
self.widget.sigMaskImageWidgetSignal.connect(self.mySlot)
self.widget.setImageData(None)
shape = self._requiredShape
self.widget.setQImageList(imagelist, shape[1], shape[0],
clearmask=False,
data=None,
imagenames=imagenames)
#data=self.__stackImageData)
self._showWidget()
def _showWidget(self):
if self.widget is None:
return
self.widget.show()
self.widget.raise_()
self.selectionMaskUpdated()
@property
def _dialogParent(self):
return self.widget
MENU_TEXT = "External Images Tool"
def METHOD_NAME(stackWindow, **kw):
ob = ExternalImagesStackPlugin(stackWindow)
return ob | null |
625 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, Optional
import torch
from reagent.core import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler
from reagent.model_managers.model_manager import ModelManager
from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider
from reagent.net_builder.discrete_dqn.dueling import Dueling
from reagent.net_builder.unions import (
DiscreteDQNNetBuilder__Union,
ValueNetBuilder__Union,
)
from reagent.training import (
ReAgentLightningModule,
ReinforceTrainer,
ReinforceTrainerParameters,
)
from reagent.workflow.types import ModelFeatureConfigProvider__Union, RewardOptions
logger = logging.getLogger(__name__)
@dataclass
class Reinforce(ModelManager):
__hash__ = param_hash
trainer_param: ReinforceTrainerParameters = field(
default_factory=ReinforceTrainerParameters
)
# using DQN net here because it supports `possible_actions_mask`
policy_net_builder: DiscreteDQNNetBuilder__Union = field(
# pyre-ignore
default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling())
)
value_net_builder: Optional[ValueNetBuilder__Union] = None
state_feature_config_provider: ModelFeatureConfigProvider__Union = field(
# pyre-ignore
default_factory=lambda: ModelFeatureConfigProvider__Union(
raw=RawModelFeatureConfigProvider(float_feature_infos=[])
)
)
sampler_temperature: float = 1.0
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
self._policy: Optional[Policy] = None
assert (
len(self.action_names) > 1
), f"REINFORCE needs at least 2 actions. Got {self.action_names}."
@property
def action_names(self):
return self.trainer_param.actions
def build_trainer(
self,
normalization_data_map: Dict[str, NormalizationData],
use_gpu: bool,
reward_options: Optional[RewardOptions] = None,
) -> ReinforceTrainer:
policy_net_builder = self.policy_net_builder.value
policy_network = policy_net_builder.build_q_network(
self.state_feature_config,
normalization_data_map[NormalizationKey.STATE],
len(self.action_names),
)
value_net = None
value_net_builder = self.value_net_builder
if value_net_builder:
value_net_builder = value_net_builder.value
value_net = value_net_builder.build_value_network(
normalization_data_map[NormalizationKey.STATE]
)
trainer = ReinforceTrainer(
policy=self._create_policy(policy_network),
value_net=value_net,
**self.trainer_param.asdict(), # pyre-ignore
)
return trainer
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
):
assert isinstance(trainer_module, ReinforceTrainer)
if serving:
assert normalization_data_map is not None
return create_predictor_policy_from_model(
self.METHOD_NAME(trainer_module, normalization_data_map)
)
else:
return self._create_policy(trainer_module.scorer)
def _create_policy(self, policy_network):
if self._policy is None:
sampler = SoftmaxActionSampler(temperature=self.sampler_temperature)
self._policy = Policy(scorer=policy_network, sampler=sampler)
return self._policy
def METHOD_NAME(
self,
trainer_module: ReAgentLightningModule,
normalization_data_map: Dict[str, NormalizationData],
) -> torch.nn.Module:
assert isinstance(trainer_module, ReinforceTrainer)
policy_serving_module = self.policy_net_builder.value.METHOD_NAME(
q_network=trainer_module.scorer,
state_normalization_data=normalization_data_map[NormalizationKey.STATE],
action_names=self.action_names,
state_feature_config=self.state_feature_config,
)
return policy_serving_module
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return self.state_feature_config_provider.value.get_model_feature_config() | null |
626 | import unittest
from typing import List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from lightly.loss import VICRegLLoss
class TestVICRegLLoss(unittest.TestCase):
def test_forward(self) -> None:
torch.manual_seed(0)
criterion = VICRegLLoss()
global_view_features = [
(torch.randn((2, 32)), torch.randn((2, 7, 7, 8))) for _ in range(2)
]
global_view_grids = [torch.randn((2, 7, 7, 2)) for _ in range(2)]
local_view_features = [
(torch.randn((2, 32)), torch.randn((2, 4, 4, 8))) for _ in range(6)
]
local_view_grids = [torch.randn((2, 4, 4, 2)) for _ in range(6)]
loss = criterion.forward(
global_view_features=global_view_features,
global_view_grids=global_view_grids,
local_view_features=local_view_features,
local_view_grids=local_view_grids,
)
assert loss > 0
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available")
def test_forward__cuda(self) -> None:
torch.manual_seed(0)
criterion = VICRegLLoss()
global_view_features = [
(torch.randn((2, 32)).cuda(), torch.randn((2, 7, 7, 8)).cuda())
for _ in range(2)
]
global_view_grids = [torch.randn((2, 7, 7, 2)).cuda() for _ in range(2)]
local_view_features = [
(torch.randn((2, 32)).cuda(), torch.randn((2, 4, 4, 8)).cuda())
for _ in range(6)
]
local_view_grids = [torch.randn((2, 4, 4, 2)).cuda() for _ in range(6)]
loss = criterion.forward(
global_view_features=global_view_features,
global_view_grids=global_view_grids,
local_view_features=local_view_features,
local_view_grids=local_view_grids,
)
assert loss > 0
def test_forward__error_global_view_features_and_grids_not_same_length(
self,
) -> None:
criterion = VICRegLLoss()
global_view_features = [
(torch.randn((2, 32)), torch.randn((2, 7, 7, 8))) for _ in range(2)
]
global_view_grids = [torch.randn((2, 7, 7, 2)) for _ in range(1)]
error_msg = (
"global_view_features and global_view_grids must have same length but "
"found 2 and 1."
)
with self.assertRaisesRegex(ValueError, error_msg):
criterion.forward(
global_view_features=global_view_features,
global_view_grids=global_view_grids,
)
def test_forward__error_local_view_features_and_grids_not_same_length(self) -> None:
criterion = VICRegLLoss()
local_view_features = [
(torch.randn((2, 32)), torch.randn((2, 4, 4, 8))) for _ in range(2)
]
local_view_grids = [torch.randn((2, 4, 4, 2)) for _ in range(1)]
error_msg = (
"local_view_features and local_view_grids must have same length but found "
"2 and 1."
)
with self.assertRaisesRegex(ValueError, error_msg):
criterion.forward(
global_view_features=[],
global_view_grids=[],
local_view_features=local_view_features,
local_view_grids=local_view_grids,
)
def test_forward__error_local_view_features_and_grids_must_both_be_set(
self,
) -> None:
criterion = VICRegLLoss()
local_view_features = [
(torch.randn((2, 32)), torch.randn((2, 4, 4, 8))) for _ in range(2)
]
local_view_grids = [torch.randn((2, 4, 4, 2)) for _ in range(2)]
error_msg = (
"local_view_features and local_view_grids must either both be set or None "
"but found <class 'list'> and <class 'NoneType'>."
)
with self.assertRaisesRegex(ValueError, error_msg):
criterion.forward(
global_view_features=[],
global_view_grids=[],
local_view_features=local_view_features,
local_view_grids=None,
)
error_msg = (
"local_view_features and local_view_grids must either both be set or None "
"but found <class 'NoneType'> and <class 'list'>."
)
with self.assertRaisesRegex(ValueError, error_msg):
criterion.forward(
global_view_features=[],
global_view_grids=[],
local_view_features=None,
local_view_grids=local_view_grids,
)
def METHOD_NAME(self):
# Compare against original implementation.
torch.manual_seed(0)
criterion = VICRegLLoss()
global_view_features = [
(torch.randn((2, 32)), torch.randn((2, 7, 7, 8))) for _ in range(2)
]
local_view_features = [
(torch.randn((2, 32)), torch.randn((2, 4, 4, 8))) for _ in range(6)
]
loss = criterion._global_loss(
global_view_features=global_view_features,
local_view_features=local_view_features,
)
embedding = [x for x, _ in global_view_features + local_view_features]
expected_loss = _reference_global_loss(embedding=embedding)
assert loss == expected_loss
# Note: We cannot compare our local loss implementation against the original code
# because the resulting values slightly differ. See VICRegLLoss._local_loss for
# details.
def _reference_global_loss(
embedding: List[Tensor],
inv_coeff: float = 25.0,
var_coeff: float = 25.0,
cov_coeff: float = 1.0,
) -> Tensor:
# Original global loss from VICRegL:
# https://github.com/facebookresearch/VICRegL/blob/803ae4c8cd1649a820f03afb4793763e95317620/main_vicregl.py#L421
def center(x):
return x - x.mean(dim=0)
def off_diagonal(x: Tensor) -> Tensor:
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
num_views = len(embedding)
inv_loss = 0.0
iter_ = 0
for i in range(2):
for j in np.delete(np.arange(np.sum(num_views)), i):
inv_loss = inv_loss + F.mse_loss(embedding[i], embedding[j])
iter_ = iter_ + 1
inv_loss = inv_coeff * inv_loss / iter_
var_loss = 0.0
cov_loss = 0.0
iter_ = 0
embedding_dim = embedding[0].shape[1]
for i in range(num_views):
x = center(embedding[i])
std_x = torch.sqrt(x.var(dim=0) + 0.0001)
var_loss = var_loss + torch.mean(torch.relu(1.0 - std_x))
cov_x = (x.T @ x) / (x.size(0) - 1)
cov_loss = cov_loss + off_diagonal(cov_x).pow_(2).sum().div(embedding_dim)
iter_ = iter_ + 1
var_loss = var_coeff * var_loss / iter_
cov_loss = cov_coeff * cov_loss / iter_
return inv_loss + var_loss + cov_loss | null |
627 | from galaxy.model.dataset_collections import (
matching,
registry,
type_description,
)
TYPE_REGISTRY = registry.DatasetCollectionTypesRegistry()
TYPE_DESCRIPTION_FACTORY = type_description.CollectionTypeDescriptionFactory(TYPE_REGISTRY)
def test_pairs_match():
METHOD_NAME(pair_instance(), pair_instance())
def test_lists_of_same_cardinality_match():
METHOD_NAME(list_instance(), list_instance())
def test_nested_lists_match():
nested_list = list_instance(
elements=[
pair_element("data1"),
pair_element("data2"),
pair_element("data3"),
],
collection_type="list:paired",
)
METHOD_NAME(nested_list, nested_list)
def test_different_types_cannot_match():
assert_cannot_match(list_instance(), pair_instance())
assert_cannot_match(pair_instance(), list_instance())
def test_lists_of_different_cardinality_do_not_match():
list_1 = list_instance(ids=["data1", "data2"])
list_2 = list_instance(ids=["data1", "data2", "data3"])
assert_cannot_match(list_1, list_2)
assert_cannot_match(list_2, list_1)
def test_valid_collection_subcollection_matching():
flat_list = list_instance(ids=["data1", "data2", "data3"])
nested_list = list_instance(
elements=[
pair_element("data11"),
pair_element("data21"),
pair_element("data31"),
],
collection_type="list:paired",
)
assert_cannot_match(flat_list, nested_list)
assert_cannot_match(nested_list, flat_list)
METHOD_NAME((nested_list, "paired"), flat_list)
def METHOD_NAME(*items):
to_match = build_collections_to_match(*items)
matching.MatchingCollections.for_collections(to_match, TYPE_DESCRIPTION_FACTORY)
def assert_cannot_match(*items):
to_match = build_collections_to_match(*items)
threw_exception = False
try:
matching.MatchingCollections.for_collections(to_match, TYPE_DESCRIPTION_FACTORY)
except Exception:
threw_exception = True
assert threw_exception
def build_collections_to_match(*items):
to_match = matching.CollectionsToMatch()
for i, item in enumerate(items):
if isinstance(item, tuple):
collection_instance, subcollection_type = item
else:
collection_instance, subcollection_type = item, None
to_match.add("input_%d" % i, collection_instance, subcollection_type)
return to_match
def pair_element(element_identifier):
return collection_element(element_identifier, pair_instance().collection)
def pair_instance():
paired_collection_instance = collection_instance(
collection_type="paired",
elements=[
hda_element("left"),
hda_element("right"),
],
)
return paired_collection_instance
def list_instance(collection_type="list", elements=None, ids=None):
if not elements:
if ids is None:
ids = ["data1", "data2"]
elements = [hda_element(_) for _ in ids]
list_collection_instance = collection_instance(collection_type=collection_type, elements=elements)
return list_collection_instance
class MockCollectionInstance:
def __init__(self, collection_type, elements):
self.collection = MockCollection(collection_type, elements)
class MockCollection:
def __init__(self, collection_type, elements):
self.collection_type = collection_type
self.elements = elements
self.populated = True
class MockCollectionElement:
def __init__(self, element_identifier, collection):
self.element_identifier = element_identifier
self.child_collection = collection
self.hda = None
class MockHDAElement:
def __init__(self, element_identifier):
self.element_identifier = element_identifier
self.child_collection = False
self.hda = object()
collection_instance = MockCollectionInstance
collection = MockCollection
collection_element = MockCollectionElement
hda_element = MockHDAElement | null |
628 | import random
import string
from contextlib import ExitStack
from django.core.cache import cache
from django.utils import translation
from test_plus.test import TestCase
from pola.s3 import create_s3_client, create_s3_resource
class TestPolaWebView(TestCase):
def METHOD_NAME(self) -> None:
random_prefix = "".join(random.choices(list(string.ascii_lowercase), k=10))
self.bucket_name = f"test-bucket-{random_prefix}"
self.s3_client = create_s3_client()
self.s3_client.create_bucket(Bucket=self.bucket_name)
self.customization_settings = ExitStack()
self.customization_settings.enter_context(translation.override('pl'))
self.customization_settings.__enter__()
def tearDown(self) -> None:
bucket = create_s3_resource().Bucket(self.bucket_name)
bucket.objects.all().delete()
self.s3_client.delete_bucket(Bucket=self.bucket_name)
self.customization_settings.close()
def test_should_return_404_for_invalid_cms_view(self):
with self.settings(AWS_STORAGE_WEB_BUCKET_NAME=self.bucket_name):
response = self.client.get('/cms/invalid')
self.assertEqual(response.status_code, 404)
self.assertIn("<title>Nie ma takiej strony</title>", response.content.decode())
self.assertIn("<h1>Nie ma takiej strony</h1>", response.content.decode())
def test_should_return_404_for_invalid_normal_view(self):
with self.settings(AWS_STORAGE_WEB_BUCKET_NAME=self.bucket_name):
response = self.client.get('/invalid')
self.assertEqual(response.status_code, 404)
self.assertIn("<title>Nie ma takiej strony</title>", response.content.decode())
self.assertIn("<h1>Nie ma takiej strony</h1>", response.content.decode())
def test_should_return_404_when_404_html_exists(self):
content = "test-404.html"
self.s3_client.put_object(
Body=content,
Bucket=self.bucket_name,
Key="404.html",
)
with self.settings(AWS_STORAGE_WEB_BUCKET_NAME=self.bucket_name):
response = self.client.get('/invalid')
self.assertEqual(response.status_code, 404)
self.assertEqual(content, response.content.decode())
def test_should_return_200_when_index_exists(self):
content = "index.html"
self.s3_client.put_object(
Body=content,
Bucket=self.bucket_name,
Key="article/index.html",
)
with self.settings(AWS_STORAGE_WEB_BUCKET_NAME=self.bucket_name):
response = self.client.get('/article/')
self.assertEqual(response.status_code, 200)
self.assertEqual(content, response.content.decode())
def test_should_return_200_for_home_page(self):
content = "index.html"
self.s3_client.put_object(
Body=content,
Bucket=self.bucket_name,
Key="index.html",
)
with self.settings(AWS_STORAGE_WEB_BUCKET_NAME=self.bucket_name):
response = self.client.get('')
self.assertEqual(response.status_code, 200)
self.assertEqual(content, response.content.decode())
def test_should_return_200_when_file_exists(self):
content = "test.js"
self.s3_client.put_object(
Body=content,
Bucket=self.bucket_name,
Key="test.js",
)
with self.settings(AWS_STORAGE_WEB_BUCKET_NAME=self.bucket_name):
response = self.client.get('/test.js')
self.assertEqual(response.status_code, 200)
self.assertEqual(content, response.content.decode())
def test_should_support_caching_based_on_etag(self):
content = "test.js"
self.s3_client.put_object(
Body=content,
Bucket=self.bucket_name,
Key="test.js",
)
with self.settings(AWS_STORAGE_WEB_BUCKET_NAME=self.bucket_name):
response = self.client.get('/test.js')
self.assertEqual(response.status_code, 200)
self.assertEqual(content, response.content.decode())
valid_etag = response.headers['ETag']
invalid_etag = response.headers['ETag'] + "2"
for method, header_name, etag, expected_code, expected_content in (
('get', 'HTTP_IF_NONE_MATCH', valid_etag, 304, ''),
('head', 'HTTP_IF_NONE_MATCH', valid_etag, 304, ''),
('get', 'HTTP_IF_MATCH', valid_etag, 200, content),
('head', 'HTTP_IF_MATCH', valid_etag, 200, ''),
('get', 'HTTP_IF_NONE_MATCH', invalid_etag, 200, content),
('head', 'HTTP_IF_NONE_MATCH', invalid_etag, 200, ''),
('get', 'HTTP_IF_MATCH', invalid_etag, 200, content),
('head', 'HTTP_IF_MATCH', invalid_etag, 200, ''),
):
cache.clear()
if method == 'get':
response = self.client.get('/test.js', **{header_name: etag})
elif method == 'head':
response = self.client.head('/test.js', **{header_name: etag})
self.assertEqual(response.status_code, expected_code)
self.assertEqual(expected_content, response.content.decode())
def test_should_support_conditional_requests(self):
content = "test.js"
self.s3_client.put_object(
Body=content,
Bucket=self.bucket_name,
Key="test.js",
)
with self.settings(AWS_STORAGE_WEB_BUCKET_NAME=self.bucket_name):
response = self.client.get('/test.js')
self.assertEqual(response.status_code, 200)
self.assertEqual(content, response.content.decode())
response = self.client.get('/test.js', **{'HTTP_IF_MODIFIED_SINCE': response.headers['Last-Modified']})
self.assertEqual(response.status_code, 304)
self.assertEqual('', response.content.decode())
response = self.client.head('/test.js', **{'HTTP_IF_MODIFIED_SINCE': response.headers['Last-Modified']})
self.assertEqual(response.status_code, 304)
self.assertEqual('', response.content.decode()) | null |
629 | # Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import io
import logging
import sys
from pipefuse.chain import ChainingService
_DEBUG_INPUT_OPERATIONS = ['read', 'write', 'getxattr']
_DEBUG_OUTPUT_OPERATIONS = ['getxattr', 'listxattr']
if sys.version_info >= (3, 0):
_BYTE_TYPES = (bytearray, bytes)
else:
_BYTE_TYPES = (bytearray, bytes, str)
def METHOD_NAME(args, kwargs):
args_string = _args_string(args)
kwargs_string = _kwargs_string(kwargs)
if args_string and kwargs_string:
complete_args_string = args_string + ', ' + kwargs_string
elif args_string:
complete_args_string = args_string
else:
complete_args_string = kwargs_string
return complete_args_string
def _args_string(args):
return ', '.join(_trimmed(v) for v in args)
def _kwargs_string(kwargs):
return ', '.join(str(k) + '=' + _trimmed(v) for k, v in kwargs.items())
def _trimmed(value):
if isinstance(value, io.BytesIO):
return 'BYTES'
elif isinstance(value, _BYTE_TYPES):
return 'BYTES#' + str(len(value))
else:
return str(value)
def _merge_outputs(outputs):
return str(outputs)
class RecordingFS(ChainingService):
def __init__(self, inner):
"""
Recording File System.
It records any call to the inner file system.
:param inner: Recording file system.
"""
self._inner = inner
self._tag = type(inner).__name__
def __getattr__(self, name):
if not hasattr(self._inner, name):
return None
attr = getattr(self._inner, name)
if not callable(attr):
return attr
return self._wrap(attr, name=name)
def __call__(self, name, *args, **kwargs):
if not hasattr(self._inner, name):
return getattr(self, name)(*args, **kwargs)
attr = getattr(self._inner, name)
return self._wrap(attr, name=name)(*args, **kwargs)
def _wrap(self, attr, name=None):
@functools.wraps(attr)
def _wrapped_attr(*args, **kwargs):
method_name = name or args[0]
complete_args_string = METHOD_NAME(args, kwargs)
if method_name in _DEBUG_INPUT_OPERATIONS:
logging.debug('[%s Input Recorder] %s (%s)' % (self._tag, method_name, complete_args_string))
else:
logging.info('[%s Input Recorder] %s (%s)' % (self._tag, method_name, complete_args_string))
outputs = attr(*args, **kwargs)
if method_name in _DEBUG_OUTPUT_OPERATIONS:
logging.debug('[%s Output Recorder] %s (%s) -> (%s)' % (self._tag, method_name, complete_args_string,
_merge_outputs(outputs)))
return outputs
return _wrapped_attr
class RecordingFileSystemClient(ChainingService):
def __init__(self, inner):
"""
Recording File System Client.
It records any call to the inner file system client.
:param inner: Recording file system client.
"""
self._inner = inner
self._tag = type(inner).__name__
def __getattr__(self, name):
if hasattr(self._inner, name):
attr = getattr(self._inner, name)
if callable(attr):
def _wrapped_attr(*args, **kwargs):
complete_args_string = METHOD_NAME(args, kwargs)
logging.info('[%s Input Recorder] %s (%s)' % (self._tag, name, complete_args_string))
return attr(*args, **kwargs)
return _wrapped_attr
else:
return attr
else:
return getattr(self._inner, name) | null |
630 | import unittest
import subprocess
import os
import utils
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
cleanup_py = os.path.join(TOPDIR, 'cleanup_code.py')
class Tests(unittest.TestCase):
def make_dummy_format(self, name, d, exitval=0):
"""Make a dummy formmater binary in the directory d.
Return the modified environment needed to put this binary
in the PATH."""
fname = os.path.join(d, name)
utils.write_file(fname, "#!/bin/sh\necho foo\nexit %d\n" % exitval)
os.chmod(fname, 493) # 493 = 0755
env = os.environ.copy()
env['PATH'] = d + os.pathsep + env['PATH']
return env
def test_python_reindent(self):
"""Test cleanup_code script on Python files with reindent."""
for args in ['--all', 'test.py']:
with utils.TempDir() as tmpdir:
pyfile = os.path.join(tmpdir, 'test.py')
utils.write_file(pyfile, 'def foo():\n bar\n')
p = subprocess.Popen([cleanup_py, args], cwd=tmpdir)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
# 2-space indentation should have been corrected to 4-space
self.assertEqual(utils.read_file(pyfile),
'def foo():\n bar\n')
def test_python_autopep8(self):
"""Test cleanup_code script on Python files with autopep8."""
for args in ['--all', 'test.py']:
with utils.TempDir() as tmpdir:
env = self.make_dummy_format('autopep8', tmpdir)
pyfile = os.path.join(tmpdir, 'test.py')
utils.write_file(pyfile, 'def foo():\n bar\n')
p = subprocess.Popen([cleanup_py, '-a', '-v', args],
cwd=tmpdir, env=env)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
# dummy autopep8 should have written out 'foo'
self.assertEqual(utils.read_file(pyfile), 'foo\n')
def test_cpp_clang(self):
"""Test cleanup_code script on C++ files with clang-format."""
# directories that should be ignored
igdirs = ['dependency', 'eigen3', 'igdir', 'git-repo']
for args in ['--all', 'test.cpp']:
with utils.TempDir() as tmpdir:
env = self.make_dummy_format('clang-format', tmpdir)
for d in igdirs:
os.mkdir(os.path.join(tmpdir, d))
utils.write_file(os.path.join(tmpdir, d, 'test.h'), 'bar')
# git-repo is a git submodule so shouldn't be descended into
os.mkdir(os.path.join(tmpdir, 'git-repo', '.git'))
cppfile = os.path.join(tmpdir, 'test.cpp')
utils.write_file(cppfile, 'bar')
p = subprocess.Popen([cleanup_py, '-v', '-e', 'igdir', args],
cwd=tmpdir, env=env)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
# dummy clang-format should have written out 'foo'
self.assertEqual(utils.read_file(cppfile), 'foo\n')
# ignored directories should be unchanged
for d in igdirs:
con = utils.read_file(os.path.join(tmpdir, d, 'test.h'))
self.assertEqual(con, 'bar')
def METHOD_NAME(self):
"""Test cleanup_code script with no files selected."""
with utils.TempDir() as tmpdir:
p = subprocess.Popen([cleanup_py], cwd=tmpdir)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 2)
def test_run_error(self):
"""Test cleanup_code handling of subprocess error"""
with utils.TempDir() as tmpdir:
env = self.make_dummy_format('autopep8', tmpdir, exitval=1)
pyfile = os.path.join(tmpdir, 'test.py')
utils.write_file(pyfile, 'bar')
p = subprocess.Popen([cleanup_py, '-a', '--all'], cwd=tmpdir,
env=env)
stdout, stderr = p.communicate()
# error should be caught and not fail entire job
self.assertEqual(p.returncode, 0)
# file should be unchanged
self.assertEqual(utils.read_file(pyfile), 'bar')
if __name__ == '__main__':
unittest.main() | null |
631 | """
About the Bengali.AI Speech corpus
The competition dataset comprises about 1200 hours of recordings of Bengali speech.
Your goal is to transcribe recordings of speech that is out-of-distribution with respect to the training set.
Note that this is a Code Competition, in which the actual test set is hidden.
In this public version, we give some sample data in the correct format to help you author your solutions.
The full test set contains about 20 hours of speech in almost 8000 MP3 audio files.
All of the files in the test set are encoded at a sample rate of 32k, a bit rate of 48k, in one channel.
It is covered in more detail at https://arxiv.org/abs/2305.09688
Please download manually by
kaggle competitions download -c bengaliai-speech
"""
import logging
import os
from collections import defaultdict
from concurrent.futures.process import ProcessPoolExecutor
from contextlib import contextmanager
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Tuple, Union
from tqdm.auto import tqdm
from lhotse import (
get_ffmpeg_torchaudio_info_enabled,
set_ffmpeg_torchaudio_info_enabled,
)
from lhotse.audio import Recording, RecordingSet
from lhotse.qa import fix_manifests, validate_recordings_and_supervisions
from lhotse.recipes.utils import manifests_exist
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike
BENGALIAI_SPEECH = ("train", "valid", "test")
@contextmanager
def disable_ffmpeg_torchaudio_info() -> None:
enabled = get_ffmpeg_torchaudio_info_enabled()
set_ffmpeg_torchaudio_info_enabled(False)
try:
yield
finally:
set_ffmpeg_torchaudio_info_enabled(enabled)
def _parse_utterance(
corpus_dir: Pathlike,
audio_path: Pathlike,
audio_id: str,
text: Optional[str] = "",
) -> Optional[Tuple[Recording, SupervisionSegment]]:
audio_path = audio_path.resolve()
if not audio_path.is_file():
logging.warning(f"No such file: {audio_path}")
return None
recording = Recording.from_file(
path=audio_path,
recording_id=audio_id,
)
segment = SupervisionSegment(
id=audio_id,
recording_id=audio_id,
text=text,
start=0.0,
duration=recording.duration,
channel=0,
language="Bengali",
)
return recording, segment
def _prepare_subset(
subset: str,
corpus_dir: Pathlike,
audio_info: Optional[dict] = None,
num_jobs: int = 1,
) -> Tuple[RecordingSet, SupervisionSet]:
"""
Returns the RecodingSet and SupervisionSet given a dataset part.
:param subset: str, the name of the subset.
:param corpus_dir: Pathlike, the path of the data dir.
:return: the RecodingSet and SupervisionSet for train and valid.
"""
corpus_dir = Path(corpus_dir)
if subset == "test":
part_path = corpus_dir / "test_mp3s"
else:
part_path = corpus_dir / "train_mp3s"
audio_paths = list(part_path.rglob("*.mp3"))
with disable_ffmpeg_torchaudio_info():
with ProcessPoolExecutor(num_jobs) as ex:
futures = []
recordings = []
supervisions = []
for audio_path in tqdm(audio_paths, desc="Distributing tasks"):
audio_id = os.path.split(str(audio_path))[1].replace(".mp3", "")
if audio_info is not None:
if audio_id not in audio_info.keys():
continue
text = audio_info[audio_id]
else:
text = None
futures.append(
ex.submit(_parse_utterance, corpus_dir, audio_path, audio_id, text)
)
for future in tqdm(futures, desc="Processing"):
result = future.result()
if result is None:
continue
recording, segment = result
recordings.append(recording)
supervisions.append(segment)
recording_set = RecordingSet.from_recordings(recordings)
supervision_set = SupervisionSet.from_segments(supervisions)
return recording_set, supervision_set
def METHOD_NAME(
corpus_dir: Pathlike,
output_dir: Optional[Pathlike] = None,
num_jobs: int = 1,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Returns the manifests which consist of the Recordings and Supervisions
:param corpus_dir: Path to the Bengali.AI Speech dataset.
:param output_dir: Pathlike, the path where to write the manifests.
:return: a Dict whose key is the dataset part, and the value is Dicts with the keys 'recordings' and 'supervisions'.
"""
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
logging.info("Preparing Bengali.AI Speech...")
subsets = BENGALIAI_SPEECH
if output_dir is not None:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
manifests = defaultdict(dict)
with open(corpus_dir / "train.csv") as f:
audio_info = f.read().splitlines()
train_info = {}
valid_info = {}
for line in audio_info[1:]:
if ",train" in line:
line = line.replace(",train", "").split(",", 1)
train_info[line[0]] = line[1]
elif ",valid" in line:
line = line.replace(",valid", "").split(",", 1)
valid_info[line[0]] = line[1]
for part in tqdm(subsets, desc="Dataset parts"):
logging.info(f"Processing Bengali.AI Speech subset: {part}")
if manifests_exist(
part=part,
output_dir=output_dir,
prefix="bengaliai_speech",
suffix="jsonl.gz",
):
logging.info(
f"Bengali.AI Speech subset: {part} already prepared - skipping."
)
continue
recording_set, supervision_set = _prepare_subset(
subset=part,
corpus_dir=corpus_dir,
audio_info=train_info
if part == "train"
else valid_info
if part == "valid"
else None,
num_jobs=num_jobs,
)
# Fix manifests
recording_set, supervision_set = fix_manifests(recording_set, supervision_set)
validate_recordings_and_supervisions(recording_set, supervision_set)
if output_dir is not None:
supervision_set.to_file(
output_dir / f"bengaliai_speech_supervisions_{part}.jsonl.gz"
)
recording_set.to_file(
output_dir / f"bengaliai_speech_recordings_{part}.jsonl.gz"
)
manifests[part] = {"recordings": recording_set, "supervisions": supervision_set}
return manifests | null |
632 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class AddCasterLayoutRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCasterLayout','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BlendLists(self): # RepeatList
return self.get_query_params().get('BlendList')
def set_BlendLists(self, BlendList): # RepeatList
for depth1 in range(len(BlendList)):
self.add_query_param('BlendList.' + str(depth1 + 1), BlendList[depth1])
def get_CasterId(self): # String
return self.get_query_params().get('CasterId')
def set_CasterId(self, CasterId): # String
self.add_query_param('CasterId', CasterId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_AudioLayers(self): # RepeatList
return self.get_query_params().get('AudioLayer')
def METHOD_NAME(self, AudioLayer): # RepeatList
for depth1 in range(len(AudioLayer)):
if AudioLayer[depth1].get('VolumeRate') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.VolumeRate', AudioLayer[depth1].get('VolumeRate'))
if AudioLayer[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.FixedDelayDuration', AudioLayer[depth1].get('FixedDelayDuration'))
if AudioLayer[depth1].get('ValidChannel') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.ValidChannel', AudioLayer[depth1].get('ValidChannel'))
def get_VideoLayers(self): # RepeatList
return self.get_query_params().get('VideoLayer')
def set_VideoLayers(self, VideoLayer): # RepeatList
for depth1 in range(len(VideoLayer)):
if VideoLayer[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FixedDelayDuration', VideoLayer[depth1].get('FixedDelayDuration'))
if VideoLayer[depth1].get('FillMode') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FillMode', VideoLayer[depth1].get('FillMode'))
if VideoLayer[depth1].get('HeightNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.HeightNormalized', VideoLayer[depth1].get('HeightNormalized'))
if VideoLayer[depth1].get('PositionRefer') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionRefer', VideoLayer[depth1].get('PositionRefer'))
if VideoLayer[depth1].get('PositionNormalized') is not None:
for depth2 in range(len(VideoLayer[depth1].get('PositionNormalized'))):
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionNormalized.' + str(depth2 + 1), VideoLayer[depth1].get('PositionNormalized')[depth2])
if VideoLayer[depth1].get('WidthNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.WidthNormalized', VideoLayer[depth1].get('WidthNormalized'))
def get_MixLists(self): # RepeatList
return self.get_query_params().get('MixList')
def set_MixLists(self, MixList): # RepeatList
for depth1 in range(len(MixList)):
self.add_query_param('MixList.' + str(depth1 + 1), MixList[depth1]) | null |
633 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic import Extra, BaseModel, Field, StrictStr, conint, conlist, constr, validator
from lightly.openapi_generated.swagger_client.models.docker_worker_state import DockerWorkerState
from lightly.openapi_generated.swagger_client.models.docker_worker_type import DockerWorkerType
class DockerWorkerRegistryEntryData(BaseModel):
"""
DockerWorkerRegistryEntryData
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
user_id: StrictStr = Field(..., alias="userId")
name: constr(strict=True, min_length=3) = Field(...)
worker_type: DockerWorkerType = Field(..., alias="workerType")
state: DockerWorkerState = Field(...)
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
last_modified_at: conint(strict=True, ge=0) = Field(..., alias="lastModifiedAt", description="unix timestamp in milliseconds")
labels: conlist(StrictStr) = Field(..., description="The labels used for specifying the run-worker-relationship")
docker_version: Optional[StrictStr] = Field(None, alias="dockerVersion")
__properties = ["id", "userId", "name", "workerType", "state", "createdAt", "lastModifiedAt", "labels", "dockerVersion"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('name')
def name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 _-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 _-]+$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def METHOD_NAME(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DockerWorkerRegistryEntryData:
"""Create an instance of DockerWorkerRegistryEntryData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> DockerWorkerRegistryEntryData:
"""Create an instance of DockerWorkerRegistryEntryData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DockerWorkerRegistryEntryData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DockerWorkerRegistryEntryData) in the input: " + str(obj))
_obj = DockerWorkerRegistryEntryData.parse_obj({
"id": obj.get("id"),
"user_id": obj.get("userId"),
"name": obj.get("name"),
"worker_type": obj.get("workerType"),
"state": obj.get("state"),
"created_at": obj.get("createdAt"),
"last_modified_at": obj.get("lastModifiedAt"),
"labels": obj.get("labels"),
"docker_version": obj.get("dockerVersion")
})
return _obj
| null |
634 | # Drakkar-Software OctoBot-Tentacles
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import flask_socketio
import octobot_commons.pretty_printer as pretty_printer
import octobot_trading.enums as trading_enums
import octobot_services.interfaces as services_interfaces
import octobot_trading.api as octobot_trading_api
import tentacles.Services.Interfaces.web_interface as web_interface
import tentacles.Services.Interfaces.web_interface.models as models
import tentacles.Services.Interfaces.web_interface.websockets as websockets
class DashboardNamespace(websockets.AbstractWebSocketNamespaceNotifier):
@staticmethod
def _get_profitability():
profitability_digits = None
has_real_trader, has_simulated_trader, \
real_global_profitability, simulated_global_profitability, \
real_percent_profitability, simulated_percent_profitability, \
real_no_trade_profitability, simulated_no_trade_profitability, \
market_average_profitability = services_interfaces.get_global_profitability()
profitability_data = {
"market_average_profitability": pretty_printer.round_with_decimal_count(market_average_profitability,
profitability_digits)
}
if has_real_trader:
profitability_data["bot_real_profitability"] = \
pretty_printer.round_with_decimal_count(real_percent_profitability, profitability_digits)
profitability_data["bot_real_flat_profitability"] = \
pretty_printer.round_with_decimal_count(real_global_profitability, profitability_digits)
profitability_data["real_no_trade_profitability"] = \
pretty_printer.round_with_decimal_count(real_no_trade_profitability, profitability_digits)
if has_simulated_trader:
profitability_data["bot_simulated_profitability"] = \
pretty_printer.round_with_decimal_count(simulated_percent_profitability, profitability_digits)
profitability_data["bot_simulated_flat_profitability"] = \
pretty_printer.round_with_decimal_count(simulated_global_profitability, profitability_digits)
profitability_data["simulated_no_trade_profitability"] = \
pretty_printer.round_with_decimal_count(simulated_no_trade_profitability, profitability_digits)
return profitability_data
@staticmethod
def _format_new_data(exchange_id=None, trades=None, order=None, symbol=None):
exchange_manager = octobot_trading_api.get_exchange_manager_from_exchange_id(exchange_id)
return {
"trades": models.format_trades(trades),
"orders": models.format_orders(octobot_trading_api.get_open_orders(exchange_manager, symbol=symbol), 0),
"simulated": octobot_trading_api.is_trader_simulated(exchange_manager),
"symbol": symbol,
"exchange_id": exchange_id
}
@websockets.websocket_with_login_required_when_activated
def on_profitability(self):
flask_socketio.emit("profitability", self._get_profitability())
def all_clients_send_notifications(self, **kwargs) -> bool:
if self._has_clients():
try:
self.socketio.emit("new_data",
{
"data": self._format_new_data(**kwargs)
},
namespace=self.namespace)
return True
except Exception as e:
self.logger.exception(e, True, f"Error when sending web notification: {e}")
return False
@websockets.websocket_with_login_required_when_activated
def METHOD_NAME(self, data):
try:
flask_socketio.emit("candle_graph_update_data", {
"request": data,
"data": models.get_currency_price_graph_update(data["exchange_id"],
models.get_value_from_dict_or_string(data["symbol"]),
data["time_frame"],
backtesting=False,
minimal_candles=True,
ignore_trades=True,
ignore_orders=not models.get_display_orders())
})
except KeyError:
flask_socketio.emit("error", "missing exchange manager")
@websockets.websocket_with_login_required_when_activated
def on_connect(self):
super().on_connect()
self.on_profitability()
notifier = DashboardNamespace('/dashboard')
web_interface.register_notifier(web_interface.DASHBOARD_NOTIFICATION_KEY, notifier)
websockets.namespaces.append(notifier) | null |
635 | import asyncio
import json
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from hummingbot.connector.exchange.bitmart import bitmart_constants as CONSTANTS, bitmart_utils as utils
from hummingbot.connector.exchange.bitmart.bitmart_auth import BitmartAuth
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.core.web_assistant.connections.data_types import WSJSONRequest, WSResponse
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
from hummingbot.core.web_assistant.ws_assistant import WSAssistant
from hummingbot.logger import HummingbotLogger
if TYPE_CHECKING:
from hummingbot.connector.exchange.bitmart.bitmart_exchange import BitmartExchange
class BitmartAPIUserStreamDataSource(UserStreamTrackerDataSource):
_logger: Optional[HummingbotLogger] = None
def __init__(
self,
auth: BitmartAuth,
trading_pairs: List[str],
connector: 'BitmartExchange',
api_factory: WebAssistantsFactory
):
super().__init__()
self._auth: BitmartAuth = auth
self._trading_pairs = trading_pairs
self._connector = connector
self._api_factory = api_factory
async def _connected_websocket_assistant(self) -> WSAssistant:
"""
Creates an instance of WSAssistant connected to the exchange
"""
ws: WSAssistant = await self.METHOD_NAME()
await ws.connect(
ws_url=CONSTANTS.WSS_PRIVATE_URL,
ping_timeout=CONSTANTS.WS_PING_TIMEOUT)
payload = {
"op": "login",
"args": self._auth.websocket_login_parameters()
}
login_request: WSJSONRequest = WSJSONRequest(payload=payload)
async with self._api_factory.throttler.execute_task(limit_id=CONSTANTS.WS_SUBSCRIBE):
await ws.send(login_request)
response: WSResponse = await ws.receive()
message = response.data
if "errorCode" in message or "error_code" in message or message.get("event") != "login":
self.logger().error("Error authenticating the private websocket connection")
raise IOError(f"Private websocket connection authentication failed ({message})")
return ws
async def _subscribe_channels(self, websocket_assistant: WSAssistant):
try:
symbols = [await self._connector.exchange_symbol_associated_to_pair(trading_pair=trading_pair)
for trading_pair in self._trading_pairs]
payload = {
"op": "subscribe",
"args": [f"{CONSTANTS.PRIVATE_ORDER_PROGRESS_CHANNEL_NAME}:{symbol}" for symbol in symbols]
}
subscribe_request: WSJSONRequest = WSJSONRequest(payload=payload)
async with self._api_factory.throttler.execute_task(limit_id=CONSTANTS.WS_SUBSCRIBE):
await websocket_assistant.send(subscribe_request)
self.logger().info("Subscribed to private account and orders channels...")
except asyncio.CancelledError:
raise
except Exception:
self.logger().exception("Unexpected error occurred subscribing to order book trading and delta streams...")
raise
async def _process_websocket_messages(self, websocket_assistant: WSAssistant, queue: asyncio.Queue):
async for ws_response in websocket_assistant.iter_messages():
data: Dict[str, Any] = ws_response.data
decompressed_data = utils.decompress_ws_message(data)
try:
if type(decompressed_data) == str:
json_data = json.loads(decompressed_data)
else:
json_data = decompressed_data
except asyncio.CancelledError:
raise
except Exception:
self.logger().warning(f"Invalid event message received through the order book data source "
f"connection ({decompressed_data})")
continue
if "errorCode" in json_data or "errorMessage" in json_data:
raise ValueError(f"Error message received in the order book data source: {json_data}")
await self._process_event_message(event_message=json_data, queue=queue)
async def _process_event_message(self, event_message: Dict[str, Any], queue: asyncio.Queue):
if len(event_message) > 0 and "table" in event_message and "data" in event_message:
queue.put_nowait(event_message)
async def METHOD_NAME(self) -> WSAssistant:
if self._ws_assistant is None:
self._ws_assistant = await self._api_factory.get_ws_assistant()
return self._ws_assistant | null |
636 | from plugin import plugin
@plugin('timeconv')
class timeconv():
"""
timeconv Documentation.
timeconv is a time converter.
Supports: picosecond, nanosecond, microsecond, millisecond, second, minute, hour, day, week, month, year
Usage: The input time measurement units are:
ps : picosecond,
ns : nanosecond,
mum : microsecond,
mm : millisecond,
s : second,
min : minute,
h : hour,
d : day,
wk : week,
mon : month,
yr : year
First you will be asked to enter the amount you want to convert.
Second you will be asked to enter the time measurement unit of the amount.
And then you will be asked to enter to which time measurement unit you want to convert.
"""
time_units = [
"yr", "mon", "wk", "d", "h", "min", "s", "ms", "mus", "ns", "ps"
]
units = {
"ps": "picosecond",
"ns": "nanosecond",
"mus": "microsecond",
"ms": "millisecond",
"s": "second",
"min": "minute",
"h": "hour",
"d": "day",
"wk": "week",
"mon": "month",
"yr": "year"
}
units_data = {
"yr2mon": 12,
"mon2wk": 4.34812141,
"wk2d": 7,
"d2h": 24,
"h2min": 60,
"min2s": 60,
"s2ms": 1000,
"ms2mus": 1000,
"mus2ns": 1000,
"ns2ps": 1000
}
def __call__(self, jarvis, s):
while True:
amount = jarvis.input_number('Enter an amount: ')
from_unit = self.get_units(jarvis, 'Enter from which unit: ')
to_unit = self.get_units(jarvis, 'Enter to which unit: ')
if (from_unit != to_unit):
break
else:
jarvis.say('Please enter different units')
convamount = self.time_convert(jarvis, amount, from_unit, to_unit)
precision = 0
if (convamount.is_integer() is False):
precision = jarvis.input_number("Please enter precision (max:12): ")
while True:
if (precision.is_integer() and precision <= 12):
break
else:
precision = jarvis.input_number("Please enter an integer (max:12): ")
convamount = round(convamount, int(precision))
outputText = self.METHOD_NAME(amount, convamount, from_unit, to_unit)
jarvis.say(outputText)
def time_convert(self, jarvis, amount, fr, to):
for i in range(len(self.time_units)):
if (self.time_units[i] == fr):
start = i
if (self.time_units[i] == to):
end = i
if ((end - start) > 0):
reverse = False
if ((end - start) < 0):
reverse = True
tmp = start
start = end
end = tmp
multiplier = 1
convamount = multiplier
for i in range(start, end, 1):
kbuild = self.time_units[i] + "2" + self.time_units[i + 1]
multiplier = multiplier * self.units_data.get(kbuild)
mulitplier = round(multiplier, 17)
if reverse:
convamount = (1 / multiplier) * amount
else:
convamount = multiplier * amount
convamount = round(convamount, 12)
return convamount
def get_units(self, jarvis, prompt):
while True:
u = jarvis.input(prompt).lower()
if u in self.time_units:
return u
else:
prompt = 'Please enter a valid unit: '
continue
def METHOD_NAME(self, amount, convamount, from_unit, to_unit):
if (amount == 1):
fromdisp = self.units.get(from_unit)
else:
fromdisp = self.units.get(from_unit) + "s"
if (convamount == 1):
todisp = self.units.get(to_unit)
else:
todisp = self.units.get(to_unit) + "s"
txt = str(amount) + " " + fromdisp + " is equal to " + str(convamount) + " " + todisp
return txt | null |
637 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for the Multi-Genre Natural Language Inference (MNLI) dataset.
MNLI corpus is a crowd-sourced collection of 433k sentence pairs annotated with
textual entailment labels (3 classes: entailment, contradiction and neutral).
The corpus covers a range of genres of spoken and written text from 5 domains:
fiction, government, telephone, travel, slate (i.e., popular magazine).
It also contains ~20k evaluation data from out-of-domain genre (9/11,
face-to-face, letters, oup, verbatim). This evaluation dataset can be used for
evaluating model robustness under distribution shift and for out-of-domain
detection.
See https://cims.nyu.edu/~sbowman/multinli/ and corpus paper for further detail.
## References:
[1] Adina Williams, Nikita Nangia, Samuel Bowman.
A Broad-Coverage Challenge Corpus for Sentence Understanding through
Inference.
In _Proceedings of the 2018 Conference of the North American Chapter of
the Association for Computational Linguistics_, 2018.
https://www.aclweb.org/anthology/N18-1101/
"""
from typing import Any, Dict, Optional
import tensorflow as tf
import tensorflow_datasets as tfds
from uncertainty_baselines.datasets import base
class MnliDataset(base.BaseDataset):
"""Multi-NLI dataset builder class."""
def __init__(self,
split: str,
shuffle_buffer_size: Optional[int] = None,
num_parallel_parser_calls: int = 64,
mode: str = 'matched',
try_gcs: bool = False,
download_data: bool = False,
data_dir: Optional[str] = None,
is_training: Optional[bool] = None):
"""Create an Genomics OOD tf.data.Dataset builder.
Args:
split: a dataset split, either a custom tfds.Split or one of the
tfds.Split enums [TRAIN, VALIDAITON, TEST] or their lowercase string
names.
shuffle_buffer_size: the number of example to use in the shuffle buffer
for tf.data.Dataset.shuffle().
num_parallel_parser_calls: the number of parallel threads to use while
preprocessing in tf.data.Dataset.map().
mode: Type of data to import. If mode = "matched", import the in-domain
data (glue/mnli_matched). If mode = "mismatched", import the
out-of-domain data (glue/mnli_mismatched).
try_gcs: Whether or not to try to use the GCS stored versions of dataset
files. Currently unsupported.
download_data: Whether or not to download data before loading. Currently
unsupported.
data_dir: Directory to read/write data, that is passed to the
tfds dataset_builder as a data_dir parameter.
is_training: Whether or not the given `split` is the training split. Only
required when the passed split is not one of ['train', 'validation',
'test', tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST].
"""
if mode not in ('matched', 'mismatched'):
raise ValueError('"mode" must be either "matched" or "mismatched".'
'Got {}'.format(mode))
if mode == 'mismatched' and split == tfds.Split.TRAIN:
raise ValueError('No training data for mismatched domains.')
if is_training is None:
is_training = split in ['train', tfds.Split.TRAIN]
if split == tfds.Split.VALIDATION:
split = 'validation_' + mode
if split == tfds.Split.TEST:
split = 'test_' + mode
name = 'glue/mnli'
dataset_builder = tfds.builder(name, try_gcs=try_gcs, data_dir=data_dir)
super().__init__(
name=name,
dataset_builder=dataset_builder,
split=split,
is_training=is_training,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_parser_calls=num_parallel_parser_calls,
fingerprint_key='idx',
download_data=download_data)
def _create_process_example_fn(self) -> base.PreProcessFn:
"""Create a pre-process function to return labels and sentence tokens."""
def METHOD_NAME(example: Dict[str, tf.Tensor]) -> Dict[str, Any]:
"""Parse sentences and labels from a serialized tf.train.Example."""
idx = example['idx']
label = example['label']
text_a = example['premise']
text_b = example['hypothesis']
return {
'text_a': text_a,
'text_b': text_b,
'labels': label,
'idx': idx
}
return METHOD_NAME | null |
638 | # -*- coding: utf-8 -*-
"""Handle dashboard embed related tests.
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
from datetime import timedelta
from django.contrib.auth.models import User
from django.db.models import Count, Q
from django.test.client import RequestFactory
from django.utils import timezone
from dashboard.models import Bounty, BountyFulfillment, Profile
from dashboard.views import users_fetch
from test_plus.test import TestCase
CURRENT_USERNAME = "asdfasdf"
def setup_bounties():
owners = [CURRENT_USERNAME, 'user2']
for owner in owners:
Bounty.objects.create(
title='foo',
value_in_token=3,
token_name='USDT',
network='rinkeby',
web3_created=timezone.now() - timedelta(days=7),
github_url='https://github.com/oogetyboogety/gitcointestproject/issues/28',
token_address='0x0',
issue_description='hello world',
bounty_owner_github_username=owner,
is_open=True,
accepted=True,
expires_date=timezone.now() + timedelta(days=1, hours=1),
idx_project_length=5,
project_length='Months',
bounty_type='Feature',
experience_level='Intermediate',
raw_data={},
idx_status='submitted',
bounty_owner_email='[email protected]',
current_bounty=True
)
BountyFulfillment.objects.create(
fulfiller_address='0x0000000000000000000000000000000000000000',
accepted=True,
bounty=Bounty.objects.first(),
token_name='USDT',
payout_amount=1.5,
profile=User.objects.filter(username='user1').first().profile
)
BountyFulfillment.objects.create(
fulfiller_address='0x0000000000000000000000000000000000000000',
accepted=True,
bounty=Bounty.objects.last(),
token_name='USDT',
payout_amount=1.5,
profile=User.objects.last().profile
)
class UsersListTest(TestCase):
"""Define tests for the user list."""
fixtures = ['tokens.json']
def METHOD_NAME(self):
self.request = RequestFactory()
self.current_user = User.objects.create(
password="asdfasdf", username=CURRENT_USERNAME)
current_user_profile = Profile.objects.create(
user=self.current_user, data={}, hide_profile=False, handle=CURRENT_USERNAME)
for i in range(20):
user = User.objects.create(password="{}".format(i),
username="user{}".format(i))
profile = Profile.objects.create(
user=user, data={}, hide_profile=False, handle="{}".format(i))
def test_user_list(self):
request = self.request
request.user = self.current_user
assert json.loads(users_fetch(request.get('/api/v0.1/users_fetch?user={}'.format(self.current_user.id))).content)['count'] == 21
def test_default_users_ordering_with_previous_workers_at_the_top(self):
setup_bounties()
all_profiles = Profile.objects.annotate(
worked_with=Count(
'fulfilled', filter=Q(
fulfilled__accepted=True,
fulfilled__bounty__bounty_owner_github_username__iexact=CURRENT_USERNAME
)
)
).order_by('-worked_with')
#assert all_profiles.values('user__username', 'worked_with')[0] == {'user__username': 'user1', 'worked_with': 1} | null |
639 | import socket
import json
import logManager
import requests
from time import sleep
from zeroconf import IPVersion, ServiceBrowser, ServiceStateChange, Zeroconf, ZeroconfServiceTypes
logging = logManager.logger.get_logger(__name__)
discovered_lights = []
def METHOD_NAME(zeroconf, service_type, name, state_change):
if "Elgato Key Light" in name and state_change is ServiceStateChange.Added:
info = zeroconf.get_service_info(service_type, name)
if info:
addresses = ["%s" % (socket.inet_ntoa(addr))
for addr in info.addresses]
discovered_lights.append([addresses[0], name])
logging.debug('<Elgato> mDNS device discovered:'+ addresses[0])
def discover(detectedLights, elgato_ips):
mdns_string = "_elgo._tcp.local."
logging.info('<Elgato> mDNS discovery for ' + mdns_string + ' started')
ip_version = IPVersion.V4Only
zeroconf = Zeroconf(ip_version=ip_version)
ServiceBrowser(zeroconf, mdns_string, handlers=[METHOD_NAME])
sleep(2)
if len(discovered_lights) == 0:
# Didn't find anything using mdns, trying elgato_ips
logging.info("<Elgato> Nothing found using mDNS, trying to find lights by IP")
for ip in elgato_ips:
try:
response = requests.get(
"http://"+ ip +":9123/elgato/accessory-info", timeout=3)
if response.status_code == 200:
json_resp = json.loads(response.content)
if json_resp['productName'] in ["Elgato Key Light Mini", "Elgato Key Light Air", "Elgato Key Light"]:
discovered_lights.append([ip, json_resp['displayName']])
except Exception as e:
logging.warning("<Elgato> ip %s is unknown device", ip)
lights = []
for device in discovered_lights:
try:
response = requests.get("http://"+ device[0] +":9123/elgato/accessory-info", timeout=3)
if response.status_code == 200:
json_accessory_info = json.loads(response.content)
logging.info("<Elgato> Found device: %s at IP %s" % (device[1], device[0]))
lights.append({"protocol": "elgato",
"name": json_accessory_info["displayName"] ,
"modelid": "LTW001", #Colortemp Bulb
"protocol_cfg": {
"ip": device[0],
"mdns_name": device[1],
"mac": json_accessory_info["macAddress"],
}
})
except Exception as e:
logging.warning("<Elgato> EXCEPTION: " + str(e))
break
for light in lights:
detectedLights.append(light)
def translate_range(value, old_min, old_max, new_min, new_max):
old_range = old_max - old_min
new_range = new_max - new_min
scaled_value = (((value - old_min) * new_range) / old_range) + new_min
new_value = max(min(scaled_value, new_max), new_min)
return int(new_value)
def set_light(light, data):
light_state = {}
if 'on' in data:
light_state['on'] = 1 if data['on'] else 0
if 'bri' in data and data['bri'] > 0:
light_state['brightness'] = round((data['bri'] / 255) * 100)
if 'ct' in data:
light_state['temperature'] = translate_range(data['ct'], 153, 500, 143, 344)
# Ingore unsupported values (xy,hue,sat)
if light_state:
json_data = json.dumps({"lights": [light_state]})
response = requests.put("http://"+light.protocol_cfg["ip"]+":9123/elgato/lights", data = json_data, headers={'Content-type': 'application/json'}, timeout=3)
return response.text
def get_light_state(light):
response = requests.get("http://"+light.protocol_cfg["ip"]+":9123/elgato/lights", timeout=3)
state = response.json()
light_info = state['lights'][0]
if light_info['on'] == 1:
light_state_on = True
else:
light_state_on = False
converted_state = {
'bri': round((light_info['brightness']/100)*255),
'on': light_state_on,
'ct': translate_range(light_info['temperature'], 143, 344, 153, 500),
'colormode': 'ct'
}
return converted_state | null |
640 | """
Unit tests for ``galaxy.web.framework.webapp``
"""
import logging
import re
from typing import (
cast,
Optional,
)
import galaxy.config
from galaxy.app_unittest_utils import galaxy_mock
from galaxy.structured_app import BasicSharedApp
from galaxy.webapps.base.webapp import (
GalaxyWebTransaction,
WebApplication,
)
log = logging.getLogger(__name__)
class StubGalaxyWebTransaction(GalaxyWebTransaction):
def _ensure_valid_session(self, session_cookie: str, create: bool = True) -> None:
pass
class CORSParsingMockConfig(galaxy_mock.MockAppConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.allowed_origin_hostnames = self._parse_allowed_origin_hostnames(kwargs)
@staticmethod
def _parse_allowed_origin_hostnames(kwargs):
hostnames = kwargs.get("allowed_origin_hostnames")
return galaxy.config.Configuration._parse_allowed_origin_hostnames(hostnames)
class TestGalaxyWebTransactionHeaders:
def METHOD_NAME(self, allowed_origin_hostnames: Optional[str] = None) -> StubGalaxyWebTransaction:
app = cast(BasicSharedApp, galaxy_mock.MockApp())
app.config = CORSParsingMockConfig(allowed_origin_hostnames=allowed_origin_hostnames)
webapp = cast(WebApplication, galaxy_mock.MockWebapp(app.security))
environ = galaxy_mock.buildMockEnviron()
trans = StubGalaxyWebTransaction(environ, app, webapp, "session_cookie")
return trans
def assert_cors_header_equals(self, headers, should_be):
assert headers.get("access-control-allow-origin", None) == should_be
def assert_cors_header_missing(self, headers):
assert "access-control-allow-origin" not in headers
def test_parse_allowed_origin_hostnames(self) -> None:
"""Should return a list of (possibly) mixed strings and regexps"""
config = CORSParsingMockConfig()
# falsy listify value should return None
assert config._parse_allowed_origin_hostnames({"allowed_origin_hostnames": ""}) is None
# should parse regex if using fwd slashes, string otherwise
hostnames = config._parse_allowed_origin_hostnames(
{"allowed_origin_hostnames": r"/host\d{2}/,geocities.com,miskatonic.edu"}
)
assert isinstance(hostnames[0], re.Pattern)
assert isinstance(hostnames[1], str)
assert isinstance(hostnames[2], str)
def test_default_set_cors_headers(self) -> None:
"""No CORS headers should be set (or even checked) by default"""
trans = self.METHOD_NAME(allowed_origin_hostnames=None)
assert isinstance(trans, GalaxyWebTransaction)
trans.request.headers["Origin"] = "http://lisaskelprecipes.pinterest.com?id=kelpcake"
trans.set_cors_headers()
self.assert_cors_header_missing(trans.response.headers)
def test_set_cors_headers(self) -> None:
"""Origin should be echo'd when it matches an allowed hostname"""
# an asterisk is a special 'allow all' string
trans = self.METHOD_NAME(allowed_origin_hostnames="*,beep.com")
trans.request.headers["Origin"] = "http://xxdarkhackerxx.disney.com"
trans.set_cors_headers()
self.assert_cors_header_equals(trans.response.headers, "http://xxdarkhackerxx.disney.com")
# subdomains should pass
trans = self.METHOD_NAME(allowed_origin_hostnames=r"something.com,/^[\w\.]*beep\.com/")
trans.request.headers["Origin"] = "http://boop.beep.com"
trans.set_cors_headers()
self.assert_cors_header_equals(trans.response.headers, "http://boop.beep.com")
# ports should work
trans = self.METHOD_NAME(allowed_origin_hostnames=r"somethingelse.com,/^[\w\.]*beep\.com/")
trans.request.headers["Origin"] = "http://boop.beep.com:8080"
trans.set_cors_headers()
self.assert_cors_header_equals(trans.response.headers, "http://boop.beep.com:8080")
# localhost should work
trans = self.METHOD_NAME(allowed_origin_hostnames="/localhost/")
trans.request.headers["Origin"] = "http://localhost:8080"
trans.set_cors_headers()
self.assert_cors_header_equals(trans.response.headers, "http://localhost:8080")
# spoofing shouldn't be easy
trans.response.headers = {}
trans.request.headers["Origin"] = "http://localhost.badstuff.tv"
trans.set_cors_headers()
self.assert_cors_header_missing(trans.response.headers)
# unicode should work
trans = self.METHOD_NAME(allowed_origin_hostnames=r"/öbb\.at/")
trans.request.headers["Origin"] = "http://öbb.at"
trans.set_cors_headers()
assert trans.response.headers["access-control-allow-origin"] == "http://öbb.at" | null |
641 | import pytest
import annif.corpus
from annif.backend import get_backend
from annif.exception import NotInitializedException, NotSupportedException
stwfsa = pytest.importorskip("annif.backend.stwfsa")
stwfsa_backend_name = stwfsa.StwfsaBackend.name
_backend_conf = {
"language": "fi",
"concept_type_uri": "http://www.w3.org/2004/02/skos/core#Concept",
"sub_thesaurus_type_uri": "http://www.w3.org/2004/02/skos/core#Collection",
"thesaurus_relation_type_uri": "http://www.w3.org/2004/02/skos/core#member",
"thesaurus_relation_is_specialisation": True,
}
def METHOD_NAME(project):
stwfsa_type = get_backend(stwfsa_backend_name)
stwfsa = stwfsa_type(
backend_id=stwfsa_backend_name, config_params={}, project=project
)
expected_default_params = {
"limit": 100,
"concept_type_uri": "http://www.w3.org/2004/02/skos/core#Concept",
"sub_thesaurus_type_uri": "http://www.w3.org/2004/02/skos/core#Collection",
"thesaurus_relation_type_uri": "http://www.w3.org/2004/02/skos/core#member",
"thesaurus_relation_is_specialisation": True,
"remove_deprecated": True,
"handle_title_case": True,
"extract_upper_case_from_braces": True,
"extract_any_case_from_braces": False,
"expand_ampersand_with_spaces": True,
"expand_abbreviation_with_punctuation": True,
"simple_english_plural_rules": False,
"use_txt_vec": False,
}
actual_params = stwfsa.params
assert expected_default_params == actual_params
def test_stwfsa_not_initialized(project):
stwfsa_type = get_backend(stwfsa_backend_name)
stwfsa = stwfsa_type(backend_id="stwfsa", config_params={}, project=project)
with pytest.raises(NotInitializedException):
stwfsa.suggest(["example text"])[0]
def test_stwfsa_train(document_corpus, project, datadir):
stwfsa_type = get_backend(stwfsa_backend_name)
stwfsa = stwfsa_type(
backend_id=stwfsa_backend_name, config_params=_backend_conf, project=project
)
stwfsa.train(document_corpus)
assert stwfsa._model is not None
model_file = datadir.join(stwfsa.MODEL_FILE)
assert model_file.exists()
assert model_file.size() > 0
def test_empty_corpus(project):
corpus = annif.corpus.DocumentList([])
stwfsa_type = get_backend(stwfsa_backend_name)
stwfsa = stwfsa_type(
backend_id=stwfsa_backend_name, config_params={"limit": 10}, project=project
)
with pytest.raises(NotSupportedException):
stwfsa.train(corpus)
def test_cached_corpus(project):
corpus = "cached"
stwfsa_type = get_backend(stwfsa_backend_name)
stwfsa = stwfsa_type(
backend_id=stwfsa_backend_name, config_params={"limit": 10}, project=project
)
with pytest.raises(NotSupportedException):
stwfsa.train(corpus)
def test_stwfsa_suggest_unknown(project):
stwfsa_type = get_backend(stwfsa_backend_name)
stwfsa = stwfsa_type(
backend_id=stwfsa_backend_name, config_params={"limit": 10}, project=project
)
results = stwfsa.suggest(["1234"])[0]
assert len(results) == 0
def test_stwfsa_suggest(project, datadir):
stwfsa_type = get_backend(stwfsa_backend_name)
stwfsa = stwfsa_type(
backend_id=stwfsa_backend_name, config_params={"limit": 10}, project=project
)
# Just some randomly selected words, taken from YSO archaeology group.
# And "random" words between them
results = stwfsa.suggest(
[
"""random
muinais-DNA random random
labyrintit random random random
Eurooppalainen yleissopimus arkeologisen perinnön suojelusta random
Indus-kulttuuri random random random random
kiinteät muinaisjäännökset random random
makrofossiilit random
Mesa Verde random random random random
muinaismuistoalueet random random random
zikkuratit random random
termoluminesenssi random random random"""
]
)[0]
assert len(results) == 10
labyrinths = project.subjects.by_uri("http://www.yso.fi/onto/yso/p14174")
assert labyrinths in [result.subject_id for result in results] | null |
642 | import hashlib
import logging as log
import re
import shutil
import subprocess
from pathlib import Path
import termcolor
from tree_sitter import Node
def convert_loglevel(level: str) -> int:
if level == "debug":
return log.DEBUG
elif level == "info":
return log.INFO
elif level == "warning":
return log.WARNING
elif level == "error":
return log.ERROR
elif level == "fatal":
return log.FATAL
elif level == "critical":
return log.CRITICAL
raise ValueError(f'Unknown loglevel "{level}"')
def find_id_by_type(node: Node, node_types: [str], type_must_match: bool) -> bytes:
"""
Recursively searches for a node sequence with given node types.
A valid sequence is a path from !\f$node_n\f$ to !\f$node_{(n + |node\_types|-1)}\f$ where
!\f$\forall i \in \{0, ..., |node\_types|-1\}: type(node_{(n + i)}) = node\_types_i\f$.
If a node sequence is found, this functions returns the text associated with the
last node in the sequence.
:param node: Current node.
:param node_types: List of node types.
:param type_must_match: If true, it is mandatory for the current node that its type matches node_types[0]
:return: The nodes text of the last node in a valid sequence of and empty string of no such sequence exists.
"""
if len(node_types) == 0:
# No ids left to compare to: Nothing found
return b""
# Set true if:
# current node type matches.
# OR
# parent dictates that node type match
type_must_match = node.type == node_types[0] or type_must_match
if type_must_match and node.type != node_types[0]:
# This child has no matching type. Return.
return b""
if len(node_types) == 1 and type_must_match:
if node.type == node_types[0]:
# Found it
return node.text
else:
# Not found. Return to parent
return b""
# If this nodes type matches the first in the list
# we remove this one from the list.
# Otherwise, give the whole list to the child (since our type does not matter).
children_id_types = node_types[1:] if type_must_match else node_types
# Check if any child has a matching type.
for child in node.named_children:
res = find_id_by_type(child, children_id_types, type_must_match)
if res:
# A path from this node matches the id_types!
return res
# None of our children matched the type list.
return b""
def print_prominent_warning(msg: str) -> None:
print("\n" + separator_line_1("yellow"))
print(termcolor.colored("WARNING", "yellow", attrs=["bold"]) + "\n")
print(msg)
print(separator_line_1("yellow"))
input("Press enter to continue...\n")
def METHOD_NAME() -> int:
return shutil.get_terminal_size()[0]
def print_prominent_info(msg: str) -> None:
print("\n" + separator_line_1("blue"))
print(msg)
print(separator_line_1("blue"))
input("Press enter to continue...\n")
def bold(msg: str, color: str = None) -> str:
if color:
return termcolor.colored(msg, attrs=["bold"], color=color)
return termcolor.colored(msg, attrs=["bold"])
def colored(msg: str, color: str) -> str:
return termcolor.colored(msg, color=color)
def separator_line_1(color: str = None) -> str:
return f"{bold(f'⎼' * int(METHOD_NAME() / 2), color)}\n"
def separator_line_2(color: str = None) -> str:
return f"{bold(f'═' * int(METHOD_NAME() / 2), color)}\n"
def get_sha256(data: bytes) -> str:
h = hashlib.sha256()
h.update(data)
return h.hexdigest()
def get_header() -> str:
return (
"/* Capstone Disassembly Engine, http://www.capstone-engine.org */\n"
"/* By Nguyen Anh Quynh <[email protected]>, 2013-2022, */\n"
"/* Rot127 <[email protected]> 2022-2023 */\n"
"/* Automatically translated source file from LLVM. */\n\n"
"/* LLVM-commit: <commit> */\n"
"/* LLVM-tag: <tag> */\n\n"
"/* Only small edits allowed. */\n"
"/* For multiple similar edits, please create a Patch for the translator. */\n\n"
"/* Capstone's C++ file translator: */\n"
"/* https://github.com/capstone-engine/capstone/tree/next/suite/auto-sync */\n\n"
)
def run_clang_format(out_paths: list[Path], clang_format_file: Path):
for out_file in out_paths:
log.info(f"Format {out_file}")
subprocess.run(["clang-format-16", f"-style=file:{clang_format_file}", "-i", out_file])
def get_path(config_path: str) -> Path:
try:
res = subprocess.run(["git", "rev-parse", "--show-toplevel"], check=True, stdout=subprocess.PIPE)
except subprocess.CalledProcessError:
log.fatal("Could not get repository top level directory.")
exit(1)
repo_root = res.stdout.decode("utf8").strip("\n")
if not Path(repo_root).exists():
log.fatal(f'The repository root directory is not not a valid path "{repo_root}"')
exit(1)
if "{CS_ROOT}" in config_path:
p = Path(re.sub(r"\{CS_ROOT}", repo_root, config_path))
elif "{AUTO_SYNC_ROOT}" in config_path:
auto_sync_root = Path(repo_root).joinpath(Path("suite/auto-sync/"))
config_path = re.sub(r"\{AUTO_SYNC_ROOT}", ".", config_path)
p = auto_sync_root.joinpath(Path(config_path))
elif "{CPP_TRANSLATOR_ROOT}" in config_path:
cppt_root = Path(repo_root).joinpath(Path("suite/auto-sync/CppTranslator/"))
config_path = re.sub(r"\{CPP_TRANSLATOR_ROOT}", ".", config_path)
p = cppt_root.joinpath(Path(config_path))
else:
p = Path(config_path)
if not p.exists():
log.fatal(f'Path "{p.absolute().name}" in config does not exist.')
exit(1)
return p | null |
643 | from galaxy.managers import hdas
from galaxy.managers.histories import HistoryManager
from galaxy.model.tags import GalaxyTagHandler
from galaxy.util import unicodify
from .base import BaseTestCase
# =============================================================================
default_password = "123456"
user2_data = dict(email="[email protected]", username="user2", password=default_password)
# =============================================================================
class TestTagHandler(BaseTestCase):
def METHOD_NAME(self):
super().METHOD_NAME()
self.app.hda_manager = self.app[hdas.HDAManager]
self.app.history_manager = self.app[HistoryManager]
self.tag_handler = self.app[GalaxyTagHandler]
self.user = self.user_manager.create(**user2_data)
def _create_vanilla_hda(self, user=None):
owner = user or self.user
history1 = self.app.history_manager.create(name="history1", user=owner)
return self.app.hda_manager.create(history=history1)
def _check_tag_list(self, tags, expected_tags):
assert len(tags) == len(expected_tags)
actual_tags = []
for tag in tags:
if tag.user_value:
tag = f"{tag.user_tname}:{tag.user_value}"
else:
tag = tag.user_tname
actual_tags.append(tag)
expected = [unicodify(e) for e in expected_tags]
assert sorted(expected) == sorted(actual_tags), f"{expected} vs {actual_tags}"
def test_apply_item_tags(self):
tag_strings = [
"tag1",
"tag1:value1",
"tag1:value1:value11",
"\x00tag1",
"tag1:\x00value1",
"tag1,tag2",
"...",
".test",
"test.a.b",
]
expected_tags = [
["tag1"],
["tag1:value1"],
["tag1:value1:value11"],
["tag1"],
["tag1:value1"],
["tag1", "tag2"],
[],
["test"],
["test.a.b"],
]
for tag_string, expected_tag in zip(tag_strings, expected_tags):
hda = self._create_vanilla_hda()
self.tag_handler.apply_item_tags(user=self.user, item=hda, tags_str=tag_string)
self._check_tag_list(hda.tags, expected_tag)
def test_set_tag_from_list(self):
hda = self._create_vanilla_hda()
tags = ["tag1", "tag2"]
self.tag_handler.set_tags_from_list(self.user, hda, tags)
self._check_tag_list(hda.tags, tags)
# Setting tags should erase previous tags
self.tag_handler.set_tags_from_list(self.user, hda, ["tag1"])
self._check_tag_list(hda.tags, expected_tags=["tag1"])
def test_add_tag_from_list(self):
hda = self._create_vanilla_hda()
tags = ["tag1", "tag2"]
self.tag_handler.add_tags_from_list(self.user, hda, tags)
self._check_tag_list(tags=hda.tags, expected_tags=tags)
# Adding tags should keep previous tags
self.tag_handler.add_tags_from_list(self.user, hda, ["tag3"])
self._check_tag_list(hda.tags, expected_tags=["tag1", "tag2", "tag3"])
def test_remove_tag_from_list(self):
hda = self._create_vanilla_hda()
tags = ["tag1", "tag2", "tag3"]
self.tag_handler.set_tags_from_list(self.user, hda, tags)
self._check_tag_list(hda.tags, tags)
self.tag_handler.remove_tags_from_list(self.user, hda, ["tag1", "tag3"])
self._check_tag_list(hda.tags, ["tag2"])
def test_delete_item_tags(self):
hda = self._create_vanilla_hda()
tags = ["tag1"]
self.tag_handler.set_tags_from_list(self.user, hda, tags)
self.tag_handler.delete_item_tags(user=self.user, item=hda)
assert hda.tags == []
def test_unique_constraint_applied(self):
tag_name = "abc"
tag = self.tag_handler._create_tag_instance(tag_name)
same_tag = self.tag_handler._create_tag_instance(tag_name)
assert tag.id == same_tag.id
def test_item_has_tag(self):
hda = self._create_vanilla_hda()
tags = ["tag1"]
self.tag_handler.set_tags_from_list(self.user, hda, tags)
assert self.tag_handler.item_has_tag(self.user, item=hda, tag="tag1")
# ItemTagAssociation
assert self.tag_handler.item_has_tag(self.user, item=hda, tag=hda.tags[0])
# Tag
assert self.tag_handler.item_has_tag(self.user, item=hda, tag=hda.tags[0].tag)
assert not self.tag_handler.item_has_tag(self.user, item=hda, tag="tag2") | null |
644 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Sigforwarder catches signals and forwards them to its grandchildren processes
To use sigforwarder, configure CMake with
-DQDROUTERD_RUNNER="/absolute/path/to/sigforwarder.py rr record --print-trace-dir=1"
The parameter to rr causes it to print the trace name for each given router to ctest log.
Same output can be found in files in the build directory, such as `build/tests/system_test.dir/system_tests_autolinks/AutoLinkRetryTest/setUpClass/A-2.out`
```
$ ctest -I 15,15 -VV
[...]
15: Router A output file:
15: >>>>
15: /home/jdanek/.local/share/rr/skrouterd-22
15:
15: <<<<
```
## Motivation
The router tests offer the `-DQDROUTERD_RUNNER` CMake option.
This allows interposing any program of our choice between the Python test process and the skrouterd instances running under test.
For example, we may use the rr (record-reply debugger from Mozilla) as runner and record router execution this way.
The tests send SIGTERM to the immediate child when it is time to stop the router.
This is problematic with rr, because in response to SIGTERM, rr quits itself and does not record shutdown of its child (skrouterd).
Therefore, add sigforwarder on top, so the process tree looks like this
```
Python (test process)
|
sigforwarder (this program)
|
rr record
|
skrouterd (the Software Under Test)
```
Now, sigforwarder intercepts the SIGTERM and delivers it to skrouterd (actually, to all grandchildren processes we might have).
Note that rr option `--continue-through-signal` does something different than sigforwarder and it does not help here.
"""
import ctypes
import ctypes.util
import logging
import os
import signal
import subprocess
import sys
import unittest
from typing import List, Optional
import psutil
# logging.basicConfig(level=logging.DEBUG) # pytest with `-p no:logging -s`
libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
"""Only one child process; we are single-threaded, no need to protect it"""
P: Optional[subprocess.Popen] = None
def pre_exec() -> int:
"""This will be run in the child before doing exec syscall; to be extra sure
we don't leave any zombies around. Because Linux has this feature."""
PR_SET_PDEATHSIG = 1
ret = libc.prctl(PR_SET_PDEATHSIG, signal.SIGHUP)
if ret != 0:
raise OSError("Failed to run prctl: " + os.strerror(ctypes.get_errno()))
return ret
def handle_signal(sig_number: int, stackframe):
"""Forward the signal we got to the grandchildren"""
del stackframe # unused
logging.debug(f"Sigforwarder got signal: {sig_number}")
if not P:
return
logging.debug("We have a child, forwarding signal to all our grandchildren")
for prc in psutil.process_iter(attrs=('pid', 'ppid')):
if prc.ppid() == P.pid:
logging.debug(f"Grandchild pid: {prc.pid}, sending signal: {sig_number} to it")
os.kill(prc.pid, sig_number)
def sigforwarder(program: str, program_args: List[str]) -> int:
global P
P = subprocess.Popen(
args=(program, *program_args),
preexec_fn=pre_exec
)
# first start the child, then install signal handler, so that we cannot
# be asked to handle a signal when child is not yet present
for s in (signal.SIGHUP, signal.SIGQUIT, signal.SIGTERM, signal.SIGINT):
signal.signal(s, handle_signal)
# rr will propagate exit code from skrouterd, so we only need to propagate from rr
logging.debug(f"Sigforwarder running {program}, waiting for status")
return P.wait()
class SigforwarderTests(unittest.TestCase):
def test_preexec_fn(self):
res = pre_exec()
self.assertEqual(res, 0)
def test_run_child_process_propagate_exit_code(self):
res = sigforwarder("/bin/sh", ["-c", "exit 42"])
self.assertEqual(res, 42)
def METHOD_NAME(self):
res = sigforwarder("/usr/bin/bash", ["-c", r"""
kill -SIGTERM $PPID
"""])
self.assertEqual(res, 0)
def test_run_child_process_grandchild_gets_the_kill(self):
# be careful to not leave `sleep infinity` behind,
# http://mywiki.wooledge.org/SignalTrap#When_is_the_signal_handled.3F
res = sigforwarder("/usr/bin/bash", ["-c", r"""
trap "echo 'WRONG: child has trapped'; exit 11" TERM
bash -c $'pid=baf; trap \'[[ -v pid ]] && kill $pid; exit 22\' TERM; sleep infinity & pid=$!; wait $pid' &
grandchild=$!
kill -SIGTERM $PPID
wait $grandchild
exit $?
"""])
self.assertEqual(res, 22)
def main():
if len(sys.argv) < 2:
RuntimeError("At least one argument is required")
argv0, program, *program_args = sys.argv
code = sigforwarder(program, program_args)
sys.exit(code)
if __name__ == '__main__':
main() | null |
645 | # Copyright (C) 2021-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from copy import deepcopy
from random import shuffle
import numpy as np
import openvino.runtime as ov
import pytest
from otx.core.ov.graph.graph import Graph, SortedDict
from tests.test_suite.e2e_test_system import e2e_pytest_unit
class TestSortedDict:
@e2e_pytest_unit
def test(self):
instance = SortedDict("key")
orders = list("abcdefghijklmnopqrstuvwxyz")
cands = list("abcdefghijklmnopqrstuvwxyz")
shuffle(cands)
for cand in cands:
instance[cand] = {"edge": {"key": ord(cand)}}
idx = 0
for key in instance:
assert key == orders[idx]
idx += 1
idx = len(orders) - 1
for key in reversed(instance):
assert key == orders[idx]
idx -= 1
repr(instance.keys())
idx = 0
for key in instance.keys():
assert key == orders[idx]
idx += 1
idx = len(orders) - 1
for key in reversed(instance.keys()):
assert key == orders[idx]
idx -= 1
repr(instance.values())
idx = 0
for value in instance.values():
assert value["edge"]["key"] == ord(orders[idx])
idx += 1
idx = len(orders) - 1
for value in reversed(instance.values()):
assert value["edge"]["key"] == ord(orders[idx])
idx -= 1
repr(instance.values())
idx = 0
for key, value in instance.items():
assert key == orders[idx]
assert value["edge"]["key"] == ord(orders[idx])
idx += 1
idx = len(orders) - 1
for key, value in reversed(instance.items()):
assert key == orders[idx]
assert value["edge"]["key"] == ord(orders[idx])
idx -= 1
instance2 = deepcopy(instance)
idx = 0
for key, value in instance2.items():
assert key == orders[idx]
assert value["edge"]["key"] == ord(orders[idx])
idx += 1
instance.pop("i")
assert "i" not in instance
assert len(instance) == len(orders) - 1
instance.clear()
assert len(instance) == 0
class TestGraph:
@pytest.fixture(autouse=True)
def setup(self) -> None:
param = ov.opset10.parameter([1, 3, 64, 64], ov.Type.f32, name="in")
constant = ov.opset10.constant(np.array([103.0, 116.0, 123.0]).reshape(1, 3, 1, 1), ov.Type.f32)
node = ov.opset10.subtract(param, constant, "numpy")
constant = ov.opset10.constant(np.random.normal(size=(32, 3, 3, 3)), ov.Type.f32)
node = ov.opset10.convolution(node, constant, [2, 2], [1, 1], [1, 1], [1, 1], "explicit")
constant = ov.opset10.constant(np.random.normal(size=(1, 32, 1, 1)), ov.Type.f32)
node = ov.opset10.add(node, constant, "numpy")
node = ov.opset10.clamp(node, 0, 6)
result = ov.opset10.result(node, name="out")
ov_model = ov.Model([result], [param], "model")
self.graph = Graph.from_ov(ov_model)
assert isinstance(self.graph, Graph)
@e2e_pytest_unit
def test_get_edge_data(self):
nodes = [node for node in self.graph]
assert self.graph.get_edge_data(nodes[0], nodes[-1]) is None
assert self.graph.get_edge_data(nodes[0], nodes[2])
@e2e_pytest_unit
def test_remove_node(self):
node = self.graph.get_nodes_by_types(["Subtract"])[0]
predecessor = list(self.graph.predecessors(node))[0]
successor = list(self.graph.successors(node))[0]
self.graph.remove_node(node, keep_connect=True)
assert self.graph.get_edge_data(predecessor, successor)
node = self.graph.get_nodes_by_types(["Convolution"])[0]
predecessor = list(self.graph.predecessors(node))[0]
successor = list(self.graph.successors(node))[0]
self.graph.remove_node(node, keep_connect=False)
assert self.graph.get_edge_data(predecessor, successor) is None
@e2e_pytest_unit
def test_replace_node(self):
node = self.graph.get_nodes_by_types(["Subtract"])[0]
new_node = deepcopy(node)
predecessors = list(self.graph.predecessors(node))
successors = list(self.graph.successors(node))
self.graph.replace_node(node, new_node)
assert node not in self.graph
assert new_node in self.graph
assert predecessors == list(self.graph.predecessors(new_node))
assert successors == list(self.graph.successors(new_node))
@e2e_pytest_unit
def test_add_edge(self):
node = self.graph.get_nodes_by_types(["Subtract"])[0]
new_node = deepcopy(node)
predecessors = list(self.graph.predecessors(node))
successors = list(self.graph.successors(node))
self.graph.remove_node(node)
for predecessor in predecessors:
assert self.graph.get_edge_data(predecessor, new_node) is None
self.graph.add_edge(predecessor, new_node)
assert self.graph.get_edge_data(predecessor, new_node)
for successor in successors:
assert self.graph.get_edge_data(new_node, successor) is None
self.graph.add_edge(new_node, successor)
assert self.graph.get_edge_data(new_node, successor)
assert new_node in self.graph
@e2e_pytest_unit
def METHOD_NAME(self):
node = self.graph.get_nodes_by_types(["Subtract"])[0]
founds = self.graph.get_nodes_by_type_pattern(["Subtract", "Clamp"], node)
for found in founds:
start, end = found
assert start == node
assert start.type == "Subtract"
assert end.type == "Clamp"
@e2e_pytest_unit
def test_remove_normalize_nodes(self):
self.graph.remove_normalize_nodes()
assert len(self.graph._normalize_nodes) == 0
@e2e_pytest_unit
def test_topological_sort(self):
assert len(list(self.graph.topological_sort())) == len(self.graph)
@e2e_pytest_unit
def test_clean_up(self):
nodes = self.graph.get_nodes_by_types(["Subtract"])
self.graph.remove_node(nodes[0])
n_nodes = len(self.graph)
self.graph.clean_up()
assert n_nodes > len(self.graph) | null |
646 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
import math
from enum import Enum
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
PADDING_SYMBOL = 0
DECODER_START_SYMBOL = 1
class Seq2SlateMode(Enum):
RANK_MODE = "rank"
PER_SEQ_LOG_PROB_MODE = "per_sequence_log_prob"
PER_SYMBOL_LOG_PROB_DIST_MODE = "per_symbol_log_prob_dist"
DECODE_ONE_STEP_MODE = "decode_one_step"
ENCODER_SCORE_MODE = "encoder_score_mode"
class Seq2SlateOutputArch(Enum):
# Only output encoder scores
ENCODER_SCORE = "encoder_score"
# A decoder outputs a sequence in an autoregressive way
AUTOREGRESSIVE = "autoregressive"
# Using encoder scores, a decoder outputs a sequence using
# frechet sort (equivalent to iterative softmax)
FRECHET_SORT = "frechet_sort"
def print_model_info(seq2slate):
def METHOD_NAME(model):
return len(torch.cat([p.flatten() for p in model.parameters()]))
logger.info(f"Num of total params: {METHOD_NAME(seq2slate)}")
logger.info(f"Num of Encoder params: {METHOD_NAME(seq2slate.encoder)}")
logger.info(
f"Num of Candidate Embedder params: {METHOD_NAME(seq2slate.candidate_embedder)}"
)
logger.info(
f"Num of State Embedder params: {METHOD_NAME(seq2slate.state_embedder)}"
)
if seq2slate.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
logger.info(
f"Num of Encoder_Scorer params: {METHOD_NAME(seq2slate.encoder_scorer)}"
)
elif seq2slate.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
logger.info(
f"Num of Positional Encoding params: {METHOD_NAME(seq2slate.positional_encoding_decoder)}"
)
logger.info(f"Num of Decoder params: {METHOD_NAME(seq2slate.decoder)}")
elif seq2slate.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
logger.info(
f"Num of Encoder_Scorer params: {METHOD_NAME(seq2slate.encoder_scorer)}"
)
def mask_logits_by_idx(logits, tgt_in_idx):
# logits shape: batch_size, seq_len, candidate_size
# tgt_in_idx shape: batch_size, seq_len
# the first two symbols are reserved for padding and decoder-starting symbols
# so they should never be a possible output label
logits[:, :, :2] = float("-inf")
batch_size, seq_len = tgt_in_idx.shape
mask_indices = torch.tril(
tgt_in_idx.repeat(1, seq_len).reshape(batch_size, seq_len, seq_len), diagonal=0
)
logits = logits.scatter(2, mask_indices, float("-inf"))
return logits
def subsequent_mask(size: int, device: torch.device):
"""
Mask out subsequent positions. Mainly used in the decoding process,
in which an item should not attend subsequent items.
mask_ijk = 0 if the item should be ignored; 1 if the item should be paid attention
"""
subsequent_mask = ~torch.triu(
torch.ones(1, size, size, device=device, dtype=torch.bool), diagonal=1
)
return subsequent_mask
# TODO (@czxttkl): use when we introduce padding
def subsequent_and_padding_mask(tgt_in_idx):
"""Create a mask to hide padding and future items"""
# tgt_in_idx shape: batch_size, seq_len
# tgt_tgt_mask shape: batch_size, 1, seq_len
tgt_tgt_mask = (tgt_in_idx != PADDING_SYMBOL).unsqueeze(-2).type(torch.int8)
# subseq_mask shape: 1, seq_len, seq_len
subseq_mask = subsequent_mask(tgt_in_idx.size(-1), tgt_in_idx.device)
# tgt_tgt_mask shape: batch_size, seq_len, seq_len
tgt_tgt_mask = tgt_tgt_mask & subseq_mask
return tgt_tgt_mask
def clones(module, N):
"""
Produce N identical layers.
:param module: nn.Module class
:param N: number of copies
"""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def attention(query, key, value, mask, d_k):
"""Scaled Dot Product Attention"""
# mask shape: batch_size x 1 x seq_len x seq_len
# scores shape: batch_size x num_heads x seq_len x seq_len
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
scores = scores.masked_fill(mask == 0, float("-inf"))
# p_attn shape: batch_size x num_heads x seq_len x seq_len
p_attn = F.softmax(scores, dim=3)
# attn shape: batch_size x num_heads x seq_len x d_k
attn = torch.matmul(p_attn, value)
return attn, p_attn
def per_symbol_to_per_seq_log_probs(per_symbol_log_probs, tgt_out_idx):
"""Gather per-symbol log probabilities into per-seq log probabilities"""
# per_symbol_log_probs shape: batch_size, seq_len, candidate_size
# tgt_out_idx shape: batch_size, seq_len
# per_symbol_log_probs is log probability of each symbol in the tgt_out_idx
# shape: batch_size, seq_len
log_probs = torch.gather(per_symbol_log_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(
2
)
# shape: batch_size, 1
return log_probs.sum(dim=1, keepdim=True)
def per_symbol_to_per_seq_probs(per_symbol_probs, tgt_out_idx):
"""Gather per-symbol probabilities into per-seq probabilities"""
# per_symbol_probs shape: batch_size, seq_len, candidate_size
# tgt_out_idx shape: batch_size, seq_len
# output shape: batch_size, 1
return torch.clamp(
torch.prod(
torch.gather(per_symbol_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(2),
dim=1,
keepdim=True,
),
# prevent zero probabilities, which cause torch.log return -inf
min=1e-40,
)
def pytorch_decoder_mask(
memory: torch.Tensor, tgt_in_idx: torch.Tensor, num_heads: int
):
"""
Compute the masks used in the PyTorch Transformer-based decoder for
self-attention and attention over encoder outputs
mask_ijk = 1 if the item should be ignored; 0 if the item should be paid attention
Input:
memory shape: batch_size, src_seq_len, dim_model
tgt_in_idx (+2 offseted) shape: batch_size, tgt_seq_len
Return:
tgt_tgt_mask shape: batch_size * num_heads, tgt_seq_len, tgt_seq_len
tgt_src_mask shape: batch_size * num_heads, tgt_seq_len, src_seq_len
"""
batch_size, src_seq_len, _ = memory.shape
tgt_seq_len = tgt_in_idx.shape[1]
device = memory.device
mask_indices = torch.tril(
tgt_in_idx.repeat(1, tgt_seq_len).reshape(batch_size, tgt_seq_len, tgt_seq_len),
diagonal=0,
).to(device)
tgt_src_mask_augmented = torch.zeros(
batch_size, tgt_seq_len, src_seq_len + 2, dtype=torch.bool, device=device
).scatter(2, mask_indices, 1)
tgt_src_mask = tgt_src_mask_augmented[:, :, 2:].repeat_interleave(num_heads, dim=0)
tgt_tgt_mask = (subsequent_mask(tgt_seq_len, device) == 0).repeat(
batch_size * num_heads, 1, 1
)
return tgt_tgt_mask, tgt_src_mask | null |
647 | from past.builtins import basestring
import json
import collections
import bleach
def is_iterable(obj):
return isinstance(obj, collections.Iterable)
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (is_iterable(obj) and not hasattr(obj, 'strip'))
def strip_html(unclean, tags=None):
"""Sanitize a string, removing (as opposed to escaping) HTML tags
:param unclean: A string to be stripped of HTML tags
:return: stripped string
:rtype: str
"""
if isinstance(unclean, bytes):
unclean = unclean.decode()
if not tags:
tags = []
if unclean is None:
return u''
elif isinstance(unclean, dict) or isinstance(unclean, list):
return bleach.clean(str(unclean), strip=True, tags=[], attributes=[], styles=[])
# We make this noop for non-string, non-collection inputs so this function can be used with higher-order
# functions, such as rapply (recursively applies a function to collections)
# If it's not a string and not an iterable (string, list, dict, return unclean)
elif not isinstance(unclean, basestring) and not is_iterable(unclean):
return unclean
else:
return bleach.clean(unclean, strip=True, tags=tags, attributes=[], styles=[])
# TODO: Remove unescape_entities when mako html safe comes in
def unescape_entities(value, safe=None):
"""
Convert HTML-encoded data (stored in the database) to literal characters.
Intended primarily for endpoints consumed by frameworks that handle their own escaping (eg Knockout)
:param value: A string, dict, or list
:param safe: A dict of escape sequences and characters that can be used to extend the set of
characters that this function will unescape. Use with caution as there are few cases in which
there will be reason to unescape characters beyond '&'.
:return: A string or list or dict without html escape characters
"""
safe_characters = {
'&': '&',
}
if safe and isinstance(safe, dict):
safe_characters.update(safe)
if isinstance(value, dict):
return {
key: unescape_entities(value, safe=safe_characters)
for (key, value) in value.items()
}
if is_iterable_but_not_string(value):
return [
unescape_entities(each, safe=safe_characters)
for each in value
]
if isinstance(value, basestring):
for escape_sequence, character in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
def METHOD_NAME(value):
"""
Dump a string to JSON in a manner that can be used for JS strings in mako templates.
Providing additional forward-slash escaping to prevent injection of closing markup in strings. See:
http://benalpert.com/2012/08/03/preventing-xss-json.html
:param value: A string to be converted
:return: A JSON-formatted string that explicitly escapes forward slashes when needed
"""
return json.dumps(value).replace('</', '<\\/') # Fix injection of closing markup in strings
"""
The following algorithm is based on code from
https://dev.to/alvaromontoro/building-your-own-color-contrast-checker-4j7o
"""
def is_a11y(value_one, value_two='#FFFFFF', min_ratio=1 / 3):
"""
Compares two colors and determines if they are above the minimum
contrast ratio. The default is 1 / 3, which is the contrast ratio for
large text graphics. See https://color.a11y.com/ for more details.
:param value_one: a hex formatted color value.
:param value_two: another hex formatted color value. If none is specified, value_one is
compared against white (#FFFFFF).
:param min_ratio: The ratio to compare against. The default is 1 / 3, which is the minimum
contrast ratio for large text graphics.
:return: A boolean of whether or not the two colors meet compliance.
"""
color_rgb_one = hex_to_rgb(value_one)
color_rgb_two = hex_to_rgb(value_two)
color_luminance_one = calculate_luminance(color_rgb_one)
color_luminance_two = calculate_luminance(color_rgb_two)
if color_luminance_one > color_luminance_two:
contrast_ratio = ((color_luminance_two + 0.05) / (color_luminance_one + 0.05))
else:
contrast_ratio = ((color_luminance_one + 0.05) / (color_luminance_two + 0.05))
if contrast_ratio < min_ratio:
return True
else:
return False
def hex_to_rgb(value):
color = value[1:]
return tuple(int(color[i:i + 2], 16) for i in range(0, 6, 6 // 3))
def calculate_luminance(rgb_color):
rgb_list = []
for value in rgb_color:
value = value / 255
if value <= 0.03928:
rgb_list.append(value / 12.92)
else:
rgb_list.append(pow((value + 0.055) / 1.055, 2.4))
return rgb_list[0] * 0.2126 + rgb_list[1] * 0.7152 + rgb_list[2] * 0.0722 | null |
648 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
class DescribeHanaBackupsAsyncRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'DescribeHanaBackupsAsync')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RecoveryPointInTime(self): # Long
return self.get_query_params().get('RecoveryPointInTime')
def set_RecoveryPointInTime(self, RecoveryPointInTime): # Long
self.add_query_param('RecoveryPointInTime', RecoveryPointInTime)
def get_LogPosition(self): # Long
return self.get_query_params().get('LogPosition')
def set_LogPosition(self, LogPosition): # Long
self.add_query_param('LogPosition', LogPosition)
def get_VaultId(self): # String
return self.get_query_params().get('VaultId')
def set_VaultId(self, VaultId): # String
self.add_query_param('VaultId', VaultId)
def get_IncludeLog(self): # Boolean
return self.get_query_params().get('IncludeLog')
def set_IncludeLog(self, IncludeLog): # Boolean
self.add_query_param('IncludeLog', IncludeLog)
def get_Source(self): # String
return self.get_query_params().get('Source')
def set_Source(self, Source): # String
self.add_query_param('Source', Source)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_Mode(self): # String
return self.get_query_params().get('Mode')
def set_Mode(self, Mode): # String
self.add_query_param('Mode', Mode)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_IncludeIncremental(self): # Boolean
return self.get_query_params().get('IncludeIncremental')
def set_IncludeIncremental(self, IncludeIncremental): # Boolean
self.add_query_param('IncludeIncremental', IncludeIncremental)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('UseBackint')
def set_UseBackint(self, UseBackint): # Boolean
self.add_query_param('UseBackint', UseBackint)
def get_DatabaseName(self): # String
return self.get_query_params().get('DatabaseName')
def set_DatabaseName(self, DatabaseName): # String
self.add_query_param('DatabaseName', DatabaseName)
def get_VolumeId(self): # Integer
return self.get_query_params().get('VolumeId')
def set_VolumeId(self, VolumeId): # Integer
self.add_query_param('VolumeId', VolumeId)
def get_SourceClusterId(self): # String
return self.get_query_params().get('SourceClusterId')
def set_SourceClusterId(self, SourceClusterId): # String
self.add_query_param('SourceClusterId', SourceClusterId)
def get_IncludeDifferential(self): # Boolean
return self.get_query_params().get('IncludeDifferential')
def set_IncludeDifferential(self, IncludeDifferential): # Boolean
self.add_query_param('IncludeDifferential', IncludeDifferential)
def get_SystemCopy(self): # Boolean
return self.get_query_params().get('SystemCopy')
def set_SystemCopy(self, SystemCopy): # Boolean
self.add_query_param('SystemCopy', SystemCopy) | null |
649 | """
@file
@brief This file contains the project file listview, used by the main window
@author Noah Figg <[email protected]>
@author Jonathan Thomas <[email protected]>
@section LICENSE
Copyright (c) 2008-2018 OpenShot Studios, LLC
(http://www.openshotstudios.com). This file is part of
OpenShot Video Editor (http://www.openshot.org), an open-source project
dedicated to delivering high quality video editing and animation solutions
to the world.
OpenShot Video Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenShot Video Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
"""
from PyQt5.QtCore import QSize, Qt, QPoint, QRegExp
from PyQt5.QtGui import QDrag, QCursor
from PyQt5.QtWidgets import QListView, QAbstractItemView, QMenu
from classes import info
from classes.app import get_app
from classes.logger import log
from classes.query import File
class FilesListView(QListView):
""" A ListView QWidget used on the main window """
drag_item_size = QSize(48, 48)
drag_item_center = QPoint(24, 24)
def contextMenuEvent(self, event):
event.accept()
# Set context menu mode
app = get_app()
app.context_menu_object = "files"
index = self.indexAt(event.pos())
# Build menu
menu = QMenu(self)
menu.addAction(self.win.actionImportFiles)
menu.addAction(self.win.actionDetailsView)
if index.isValid():
# Look up the model item and our unique ID
model = self.model()
# Look up file_id from 5th column of row
id_index = index.sibling(index.row(), 5)
file_id = model.data(id_index, Qt.DisplayRole)
# If a valid file selected, show file related options
menu.addSeparator()
# Add edit title option (if svg file)
file = File.get(id=file_id)
if file and file.data.get("path").endswith(".svg"):
menu.addAction(self.win.actionEditTitle)
menu.addAction(self.win.actionDuplicateTitle)
menu.addSeparator()
menu.addAction(self.win.actionPreview_File)
menu.addSeparator()
menu.addAction(self.win.actionSplitClip)
menu.addAction(self.win.actionExportClips)
menu.addSeparator()
menu.addAction(self.win.actionAdd_to_Timeline)
menu.addAction(self.win.actionFile_Properties)
menu.addSeparator()
menu.addAction(self.win.actionRemove_from_Project)
menu.addSeparator()
# Show menu
menu.popup(event.globalPos())
def dragEnterEvent(self, event):
# If dragging urls onto widget, accept
if not event.mimeData().hasUrls():
event.ignore()
return
event.accept()
event.setDropAction(Qt.CopyAction)
def startDrag(self, supportedActions):
""" Override startDrag method to display custom icon """
# Get first column indexes for all selected rows
selected = self.selectionModel().selectedRows(0)
# Get image of current item
current = self.selectionModel().currentIndex()
if not current.isValid() and selected:
current = selected[0]
if not current.isValid():
log.warning("No draggable items found in model!")
return False
# Get icon from column 0 on same row as current item
icon = current.sibling(current.row(), 0).data(Qt.DecorationRole)
# Start drag operation
drag = QDrag(self)
drag.setMimeData(self.model().mimeData(selected))
drag.setPixmap(icon.pixmap(self.drag_item_size))
drag.setHotSpot(self.drag_item_center)
drag.exec_()
# Without defining this method, the 'copy' action doesn't show with cursor
def dragMoveEvent(self, event):
event.accept()
# Handle a drag and drop being dropped on widget
def dropEvent(self, event):
if not event.mimeData().hasUrls():
# Nothing we're interested in
event.reject()
return
event.accept()
# Use try/finally so we always reset the cursor
try:
# Set cursor to waiting
get_app().setOverrideCursor(QCursor(Qt.WaitCursor))
qurl_list = event.mimeData().urls()
log.info("Processing drop event for {} urls".format(len(qurl_list)))
self.files_model.process_urls(qurl_list)
finally:
# Restore cursor
get_app().restoreOverrideCursor()
# Pass file add requests to the model
def add_file(self, filepath):
self.files_model.add_files(filepath)
def filter_changed(self):
self.METHOD_NAME()
def METHOD_NAME(self):
"""Filter files with proxy class"""
model = self.model()
filter_text = self.win.filesFilter.text()
model.setFilterRegExp(QRegExp(filter_text.replace(' ', '.*'), Qt.CaseInsensitive))
col = model.sortColumn()
model.sort(col)
def resize_contents(self):
pass
def __init__(self, model, *args):
# Invoke parent init
super().__init__(*args)
# Get a reference to the window object
self.win = get_app().window
# Get Model data
self.files_model = model
self.setModel(self.files_model.proxy_model)
# Remove the default selection model and wire up to the shared one
self.selectionModel().deleteLater()
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionModel(self.files_model.selection_model)
# Keep track of mouse press start position to determine when to start drag
self.setAcceptDrops(True)
self.setDragEnabled(True)
self.setDropIndicatorShown(True)
# Setup header columns and layout
self.setIconSize(info.LIST_ICON_SIZE)
self.setGridSize(info.LIST_GRID_SIZE)
self.setViewMode(QListView.IconMode)
self.setResizeMode(QListView.Adjust)
self.setUniformItemSizes(True)
self.setStyleSheet('QListView::item { padding-top: 2px; }')
self.setWordWrap(False)
self.setTextElideMode(Qt.ElideRight)
self.files_model.ModelRefreshed.connect(self.METHOD_NAME)
# setup filter events
app = get_app()
app.window.filesFilter.textChanged.connect(self.filter_changed) | null |
650 | """ test for app action functionality """
import json
from unittest.mock import patch
from django.contrib.auth.models import AnonymousUser
from django.http import JsonResponse
from django.template.response import TemplateResponse
from django.test import TestCase
from django.test.client import RequestFactory
from bookwyrm import models, views
from bookwyrm.book_search import SearchResult
from bookwyrm.settings import DOMAIN
from bookwyrm.tests.validate_html import validate_html
class Views(TestCase):
"""tag views"""
def setUp(self): # pylint: disable=invalid-name
"""we need basic test data and mocks"""
self.factory = RequestFactory()
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"[email protected]",
"[email protected]",
"mouseword",
local=True,
localname="mouse",
remote_id="https://example.com/users/mouse",
)
self.work = models.Work.objects.create(title="Test Work")
self.book = models.Edition.objects.create(
title="Test Book",
remote_id="https://example.com/book/1",
parent_work=self.work,
)
models.SiteSettings.objects.create()
def test_search_json_response(self):
"""searches local data only and returns book data in json format"""
view = views.Search.as_view()
request = self.factory.get("", {"q": "Test Book"})
with patch("bookwyrm.views.search.is_api_request") as is_api:
is_api.return_value = True
response = view(request)
self.assertIsInstance(response, JsonResponse)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["title"], "Test Book")
self.assertEqual(data[0]["key"], f"https://{DOMAIN}/book/{self.book.id}")
def test_search_no_query(self):
"""just the search page"""
view = views.Search.as_view()
# we need a connector for this, sorry
request = self.factory.get("")
with patch("bookwyrm.views.search.is_api_request") as is_api:
is_api.return_value = False
response = view(request)
self.assertIsInstance(response, TemplateResponse)
validate_html(response.render())
def METHOD_NAME(self):
"""searches remote connectors"""
view = views.Search.as_view()
connector = models.Connector.objects.create(
identifier="example.com",
connector_file="openlibrary",
base_url="https://example.com",
books_url="https://example.com/books",
covers_url="https://example.com/covers",
search_url="https://example.com/search?q=",
)
mock_result = SearchResult(title="Mock Book", connector=connector, key="hello")
request = self.factory.get("", {"q": "Test Book", "remote": True})
request.user = self.local_user
with patch("bookwyrm.views.search.is_api_request") as is_api:
is_api.return_value = False
with patch("bookwyrm.connectors.connector_manager.search") as remote_search:
remote_search.return_value = [
{"results": [mock_result], "connector": connector}
]
response = view(request)
self.assertIsInstance(response, TemplateResponse)
validate_html(response.render())
local_results = response.context_data["results"]
self.assertEqual(local_results[0].title, "Test Book")
connector_results = response.context_data["remote_results"]
self.assertEqual(connector_results[0]["results"][0].title, "Mock Book")
def test_search_book_anonymous(self):
"""Don't search remote for logged out user"""
view = views.Search.as_view()
connector = models.Connector.objects.create(
identifier="example.com",
connector_file="openlibrary",
base_url="https://example.com",
books_url="https://example.com/books",
covers_url="https://example.com/covers",
search_url="https://example.com/search?q=",
)
mock_result = SearchResult(title="Mock Book", connector=connector, key="hello")
request = self.factory.get("", {"q": "Test Book", "remote": True})
anonymous_user = AnonymousUser
anonymous_user.is_authenticated = False
request.user = anonymous_user
with patch("bookwyrm.views.search.is_api_request") as is_api:
is_api.return_value = False
with patch("bookwyrm.connectors.connector_manager.search") as remote_search:
remote_search.return_value = [
{"results": [mock_result], "connector": connector}
]
response = view(request)
self.assertIsInstance(response, TemplateResponse)
validate_html(response.render())
local_results = response.context_data["results"]
self.assertEqual(local_results[0].title, "Test Book")
connector_results = response.context_data.get("remote_results")
self.assertIsNone(connector_results)
def test_search_users(self):
"""searches remote connectors"""
view = views.Search.as_view()
request = self.factory.get("", {"q": "mouse", "type": "user"})
request.user = self.local_user
response = view(request)
self.assertIsInstance(response, TemplateResponse)
validate_html(response.render())
self.assertEqual(response.context_data["results"][0], self.local_user)
def test_search_users_logged_out(self):
"""searches remote connectors"""
view = views.Search.as_view()
request = self.factory.get("", {"q": "mouse", "type": "user"})
anonymous_user = AnonymousUser
anonymous_user.is_authenticated = False
request.user = anonymous_user
response = view(request)
validate_html(response.render())
self.assertFalse("results" in response.context_data)
def test_search_lists(self):
"""searches remote connectors"""
with patch(
"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"
), patch("bookwyrm.lists_stream.remove_list_task.delay"):
booklist = models.List.objects.create(
user=self.local_user, name="test list"
)
view = views.Search.as_view()
request = self.factory.get("", {"q": "test", "type": "list"})
request.user = self.local_user
response = view(request)
self.assertIsInstance(response, TemplateResponse)
validate_html(response.render())
self.assertEqual(response.context_data["results"][0], booklist) | null |
651 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeAffectedMaliciousFileImagesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeAffectedMaliciousFileImages')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RepoId(self): # String
return self.get_query_params().get('RepoId')
def set_RepoId(self, RepoId): # String
self.add_query_param('RepoId', RepoId)
def get_Pod(self): # String
return self.get_query_params().get('Pod')
def set_Pod(self, Pod): # String
self.add_query_param('Pod', Pod)
def get_ClusterName(self): # String
return self.get_query_params().get('ClusterName')
def set_ClusterName(self, ClusterName): # String
self.add_query_param('ClusterName', ClusterName)
def get_RepoNamespace(self): # String
return self.get_query_params().get('RepoNamespace')
def set_RepoNamespace(self, RepoNamespace): # String
self.add_query_param('RepoNamespace', RepoNamespace)
def get_ImageDigest(self): # String
return self.get_query_params().get('ImageDigest')
def set_ImageDigest(self, ImageDigest): # String
self.add_query_param('ImageDigest', ImageDigest)
def get_ScanRanges(self): # RepeatList
return self.get_query_params().get('ScanRange')
def set_ScanRanges(self, ScanRange): # RepeatList
for depth1 in range(len(ScanRange)):
self.add_query_param('ScanRange.' + str(depth1 + 1), ScanRange[depth1])
def get_PageSize(self): # String
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # String
self.add_query_param('PageSize', PageSize)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_ImageTag(self): # String
return self.get_query_params().get('ImageTag')
def set_ImageTag(self, ImageTag): # String
self.add_query_param('ImageTag', ImageTag)
def get_Image(self): # String
return self.get_query_params().get('Image')
def set_Image(self, Image): # String
self.add_query_param('Image', Image)
def get_MaliciousMd5(self): # String
return self.get_query_params().get('MaliciousMd5')
def set_MaliciousMd5(self, MaliciousMd5): # String
self.add_query_param('MaliciousMd5', MaliciousMd5)
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_RepoName(self): # String
return self.get_query_params().get('RepoName')
def set_RepoName(self, RepoName): # String
self.add_query_param('RepoName', RepoName)
def get_Namespace(self): # String
return self.get_query_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_query_param('Namespace', Namespace)
def get_RepoInstanceId(self): # String
return self.get_query_params().get('RepoInstanceId')
def set_RepoInstanceId(self, RepoInstanceId): # String
self.add_query_param('RepoInstanceId', RepoInstanceId)
def get_ImageLayer(self): # String
return self.get_query_params().get('ImageLayer')
def set_ImageLayer(self, ImageLayer): # String
self.add_query_param('ImageLayer', ImageLayer)
def METHOD_NAME(self): # String
return self.get_query_params().get('ContainerId')
def set_ContainerId(self, ContainerId): # String
self.add_query_param('ContainerId', ContainerId)
def get_Levels(self): # String
return self.get_query_params().get('Levels')
def set_Levels(self, Levels): # String
self.add_query_param('Levels', Levels)
def get_RepoRegionId(self): # String
return self.get_query_params().get('RepoRegionId')
def set_RepoRegionId(self, RepoRegionId): # String
self.add_query_param('RepoRegionId', RepoRegionId) | null |
652 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class CreateClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'CreateCluster','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterSpecification(self): # String
return self.get_query_params().get('ClusterSpecification')
def set_ClusterSpecification(self, ClusterSpecification): # String
self.add_query_param('ClusterSpecification', ClusterSpecification)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_RequestPars(self): # String
return self.get_query_params().get('RequestPars')
def set_RequestPars(self, RequestPars): # String
self.add_query_param('RequestPars', RequestPars)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_ClusterType(self): # String
return self.get_query_params().get('ClusterType')
def set_ClusterType(self, ClusterType): # String
self.add_query_param('ClusterType', ClusterType)
def get_InstanceName(self): # String
return self.get_query_params().get('InstanceName')
def set_InstanceName(self, InstanceName): # String
self.add_query_param('InstanceName', InstanceName)
def get_NetType(self): # String
return self.get_query_params().get('NetType')
def set_NetType(self, NetType): # String
self.add_query_param('NetType', NetType)
def get_MseVersion(self): # String
return self.get_query_params().get('MseVersion')
def set_MseVersion(self, MseVersion): # String
self.add_query_param('MseVersion', MseVersion)
def get_Region(self): # String
return self.get_query_params().get('Region')
def set_Region(self, Region): # String
self.add_query_param('Region', Region)
def get_PubSlbSpecification(self): # String
return self.get_query_params().get('PubSlbSpecification')
def set_PubSlbSpecification(self, PubSlbSpecification): # String
self.add_query_param('PubSlbSpecification', PubSlbSpecification)
def METHOD_NAME(self): # String
return self.get_query_params().get('PrivateSlbSpecification')
def set_PrivateSlbSpecification(self, PrivateSlbSpecification): # String
self.add_query_param('PrivateSlbSpecification', PrivateSlbSpecification)
def get_InstanceCount(self): # Integer
return self.get_query_params().get('InstanceCount')
def set_InstanceCount(self, InstanceCount): # Integer
self.add_query_param('InstanceCount', InstanceCount)
def get_ConnectionType(self): # String
return self.get_query_params().get('ConnectionType')
def set_ConnectionType(self, ConnectionType): # String
self.add_query_param('ConnectionType', ConnectionType)
def get_ClusterVersion(self): # String
return self.get_query_params().get('ClusterVersion')
def set_ClusterVersion(self, ClusterVersion): # String
self.add_query_param('ClusterVersion', ClusterVersion)
def get_DiskType(self): # String
return self.get_query_params().get('DiskType')
def set_DiskType(self, DiskType): # String
self.add_query_param('DiskType', DiskType)
def get_PubNetworkFlow(self): # String
return self.get_query_params().get('PubNetworkFlow')
def set_PubNetworkFlow(self, PubNetworkFlow): # String
self.add_query_param('PubNetworkFlow', PubNetworkFlow)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_AcceptLanguage(self): # String
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_query_param('AcceptLanguage', AcceptLanguage)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
653 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkconfig.endpoint import endpoint_data
class CreateAggregateConfigRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Config', '2020-09-07', 'CreateAggregateConfigRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TagKeyScope(self): # String
return self.get_body_params().get('TagKeyScope')
def set_TagKeyScope(self, TagKeyScope): # String
self.add_body_params('TagKeyScope', TagKeyScope)
def get_ClientToken(self): # String
return self.get_body_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_body_params('ClientToken', ClientToken)
def get_ResourceTypesScope(self): # Array
return self.get_body_params().get('ResourceTypesScope')
def set_ResourceTypesScope(self, ResourceTypesScope): # Array
for index1, value1 in enumerate(ResourceTypesScope):
self.add_body_params('ResourceTypesScope.' + str(index1 + 1), value1)
def get_Description(self): # String
return self.get_body_params().get('Description')
def set_Description(self, Description): # String
self.add_body_params('Description', Description)
def get_AggregatorId(self): # String
return self.get_body_params().get('AggregatorId')
def set_AggregatorId(self, AggregatorId): # String
self.add_body_params('AggregatorId', AggregatorId)
def get_ConfigRuleTriggerTypes(self): # String
return self.get_body_params().get('ConfigRuleTriggerTypes')
def set_ConfigRuleTriggerTypes(self, ConfigRuleTriggerTypes): # String
self.add_body_params('ConfigRuleTriggerTypes', ConfigRuleTriggerTypes)
def get_SourceIdentifier(self): # String
return self.get_body_params().get('SourceIdentifier')
def set_SourceIdentifier(self, SourceIdentifier): # String
self.add_body_params('SourceIdentifier', SourceIdentifier)
def get_TagValueScope(self): # String
return self.get_body_params().get('TagValueScope')
def set_TagValueScope(self, TagValueScope): # String
self.add_body_params('TagValueScope', TagValueScope)
def get_ExcludeAccountIdsScope(self): # String
return self.get_body_params().get('ExcludeAccountIdsScope')
def set_ExcludeAccountIdsScope(self, ExcludeAccountIdsScope): # String
self.add_body_params('ExcludeAccountIdsScope', ExcludeAccountIdsScope)
def get_RegionIdsScope(self): # String
return self.get_body_params().get('RegionIdsScope')
def set_RegionIdsScope(self, RegionIdsScope): # String
self.add_body_params('RegionIdsScope', RegionIdsScope)
def get_ExcludeFolderIdsScope(self): # String
return self.get_body_params().get('ExcludeFolderIdsScope')
def set_ExcludeFolderIdsScope(self, ExcludeFolderIdsScope): # String
self.add_body_params('ExcludeFolderIdsScope', ExcludeFolderIdsScope)
def METHOD_NAME(self): # Integer
return self.get_body_params().get('RiskLevel')
def set_RiskLevel(self, RiskLevel): # Integer
self.add_body_params('RiskLevel', RiskLevel)
def get_SourceOwner(self): # String
return self.get_body_params().get('SourceOwner')
def set_SourceOwner(self, SourceOwner): # String
self.add_body_params('SourceOwner', SourceOwner)
def get_ResourceGroupIdsScope(self): # String
return self.get_body_params().get('ResourceGroupIdsScope')
def set_ResourceGroupIdsScope(self, ResourceGroupIdsScope): # String
self.add_body_params('ResourceGroupIdsScope', ResourceGroupIdsScope)
def get_InputParameters(self): # String
return self.get_body_params().get('InputParameters')
def set_InputParameters(self, InputParameters): # String
self.add_body_params('InputParameters', InputParameters)
def get_ConfigRuleName(self): # String
return self.get_body_params().get('ConfigRuleName')
def set_ConfigRuleName(self, ConfigRuleName): # String
self.add_body_params('ConfigRuleName', ConfigRuleName)
def get_TagKeyLogicScope(self): # String
return self.get_body_params().get('TagKeyLogicScope')
def set_TagKeyLogicScope(self, TagKeyLogicScope): # String
self.add_body_params('TagKeyLogicScope', TagKeyLogicScope)
def get_MaximumExecutionFrequency(self): # String
return self.get_body_params().get('MaximumExecutionFrequency')
def set_MaximumExecutionFrequency(self, MaximumExecutionFrequency): # String
self.add_body_params('MaximumExecutionFrequency', MaximumExecutionFrequency)
def get_FolderIdsScope(self): # String
return self.get_body_params().get('FolderIdsScope')
def set_FolderIdsScope(self, FolderIdsScope): # String
self.add_body_params('FolderIdsScope', FolderIdsScope)
def get_ExcludeResourceIdsScope(self): # String
return self.get_body_params().get('ExcludeResourceIdsScope')
def set_ExcludeResourceIdsScope(self, ExcludeResourceIdsScope): # String
self.add_body_params('ExcludeResourceIdsScope', ExcludeResourceIdsScope) | null |
654 | import imdb
from colorama import Fore, Style
from plugin import plugin, require
from functools import lru_cache
app = imdb.IMDb()
def main(jarvis, movie):
movie_id = search_movie(jarvis, movie)
if movie_id is None:
return None
return get_movie_by_id(movie_id)
@lru_cache(maxsize=50, typed=False)
def search_movie(jarvis, movie, all_results=False):
if movie == '':
jarvis.say("Please add movie name!", Fore.RED)
return None
results = app.search_movie(movie, results=10)
if not results:
jarvis.say("Error: Did not find movie!", Fore.RED)
return None
if not all_results:
first = results[0]
return first.movieID
return results
@lru_cache(maxsize=20, typed=False)
def get_movie_by_id(movie_id):
return app.get_movie(movie_id)
@require(network=True)
@plugin('movie cast')
def METHOD_NAME(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['cast']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie director')
def movie_director(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['director']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie plot')
def movie_plot(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
if 'plot outline' in data:
jarvis.say('Plot outline:', Fore.GREEN)
jarvis.say(data['plot outline'])
jarvis.say('')
if 'plot' in data:
jarvis.say('Plot:', Fore.GREEN)
for d in data['plot']:
jarvis.say(d)
@require(network=True)
@plugin('movie producer')
def movie_producer(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['producers']:
jarvis.say(d['name'])
@require(network=True)
@plugin('movie rating')
def movie_rating(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
jarvis.say(str(data['rating']))
@require(network=True)
@plugin('movie year')
def movie_year(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
jarvis.say(str(data['year']))
@require(network=True)
@plugin('movie runtime')
def movie_runtime(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
if 'runtimes' in data:
jarvis.say(str(data['runtimes'][0]) + ' minutes')
else:
jarvis.say("No runtime data present")
@require(network=True)
@plugin('movie countries')
def movie_countries(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['countries']:
jarvis.say(str(d))
@require(network=True)
@plugin('movie genres')
def movie_genres(jarvis, movie):
""""""
data = main(jarvis, movie)
if data is not None:
for d in data['genres']:
jarvis.say(d)
@require(network=True)
@plugin('movie info')
def movie_info(jarvis, movie):
"""
Display table with various information
"""
data = main(jarvis, movie)
if data is not None:
get_movie_info(jarvis, data)
@require(network=True)
@plugin('movie search')
def movie_search(jarvis, movie):
""" search for a movie on IMDB"""
results = search_movie(jarvis, movie, all_results=True)
# if results is None or empty
if not results:
return None
# get only movies from the results, filtering out TV series, etc
movie_results = []
for item in results:
if item['kind'] == 'movie':
movie_results.append(item)
if len(movie_results) > 5:
count = 5
else:
count = len(movie_results)
jarvis.say('')
space = ' '
text = 'ID'
text += space * 3 + 'Movie title'
jarvis.say(text, Fore.GREEN)
for i in range(count):
item = movie_results[i]
text = Fore.GREEN + str(i + 1) + space * 4
text += Fore.RESET + item['smart long imdb canonical title']
jarvis.say(text)
jarvis.say('')
jarvis.say('Please enter ID to know more(q - quit):')
input_id = jarvis.input()
# If nothing is entered, just return
if input_id == '':
return None
if len(input_id) != 1:
return jarvis.say(Fore.RED + 'Please enter valid value')
elif input_id in '123456789':
input_id = int(input_id)
elif input_id == 'q':
return None
# if entered input is out of the given list of ID's
if (int(input_id) > count) or (int(input_id) < 1):
return jarvis.say(Fore.RED + 'Please enter id from the given list')
movie_id = movie_results[input_id - 1].movieID
data = get_movie_by_id(movie_id)
get_movie_info(jarvis, data)
def colorized_output(key, value):
"""
pretty print key value pair
"""
green_text = Fore.GREEN + "{:<14}".format(key)
normal_text = Style.RESET_ALL + ": " + str(value)
return green_text + normal_text
def get_movie_info(jarvis, data):
"""
Takes a movie attributes as input and prints them accordingly
"""
jarvis.say('')
jarvis.say(
'What type of information do you want: cast, producers, genres, etc.?')
jarvis.say('Write one after another separated by space, please:')
movie_attributes = jarvis.input()
movie_attributes = movie_attributes.split()
jarvis.say('')
for attribute in movie_attributes:
if attribute in data:
value = data[attribute]
if attribute == 'genres':
value = ', '.join(value)
if attribute == 'cast':
lst = [person['name'] for person in value]
value = ', '.join(lst[0:3])
if isinstance(value, list):
value = value[0]
jarvis.say(colorized_output(attribute.capitalize(), str(value)))
else:
jarvis.say(
colorized_output(
attribute.capitalize(),
'no information retrieved'))
# print IMDB url of the movie
movie_url = app.urls['movie_base'] + 'tt' + data.movieID
jarvis.say(colorized_output('IMDB url', movie_url))
jarvis.say('') | null |
655 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class ModifyInstanceSpecRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'ModifyInstanceSpec','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_CouponNo(self): # String
return self.get_query_params().get('CouponNo')
def set_CouponNo(self, CouponNo): # String
self.add_query_param('CouponNo', CouponNo)
def get_InstanceClass(self): # String
return self.get_query_params().get('InstanceClass')
def set_InstanceClass(self, InstanceClass): # String
self.add_query_param('InstanceClass', InstanceClass)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_EffectiveTime(self): # String
return self.get_query_params().get('EffectiveTime')
def set_EffectiveTime(self, EffectiveTime): # String
self.add_query_param('EffectiveTime', EffectiveTime)
def get_SourceBiz(self): # String
return self.get_query_params().get('SourceBiz')
def set_SourceBiz(self, SourceBiz): # String
self.add_query_param('SourceBiz', SourceBiz)
def get_BusinessInfo(self): # String
return self.get_query_params().get('BusinessInfo')
def set_BusinessInfo(self, BusinessInfo): # String
self.add_query_param('BusinessInfo', BusinessInfo)
def METHOD_NAME(self): # Integer
return self.get_query_params().get('ShardCount')
def set_ShardCount(self, ShardCount): # Integer
self.add_query_param('ShardCount', ShardCount)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_MajorVersion(self): # String
return self.get_query_params().get('MajorVersion')
def set_MajorVersion(self, MajorVersion): # String
self.add_query_param('MajorVersion', MajorVersion)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_ReadOnlyCount(self): # Integer
return self.get_query_params().get('ReadOnlyCount')
def set_ReadOnlyCount(self, ReadOnlyCount): # Integer
self.add_query_param('ReadOnlyCount', ReadOnlyCount)
def get_ForceUpgrade(self): # Boolean
return self.get_query_params().get('ForceUpgrade')
def set_ForceUpgrade(self, ForceUpgrade): # Boolean
self.add_query_param('ForceUpgrade', ForceUpgrade)
def get_OrderType(self): # String
return self.get_query_params().get('OrderType')
def set_OrderType(self, OrderType): # String
self.add_query_param('OrderType', OrderType) | null |
656 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkgdb.endpoint import endpoint_data
class CreateDBInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'gdb', '2019-09-03', 'CreateDBInstance','gds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_DBInstanceCategory(self):
return self.get_query_params().get('DBInstanceCategory')
def METHOD_NAME(self,DBInstanceCategory):
self.add_query_param('DBInstanceCategory',DBInstanceCategory)
def get_DBNodeStorageType(self):
return self.get_query_params().get('DBNodeStorageType')
def set_DBNodeStorageType(self,DBNodeStorageType):
self.add_query_param('DBNodeStorageType',DBNodeStorageType)
def get_DBInstanceDescription(self):
return self.get_query_params().get('DBInstanceDescription')
def set_DBInstanceDescription(self,DBInstanceDescription):
self.add_query_param('DBInstanceDescription',DBInstanceDescription)
def get_AutoRenewPeriod(self):
return self.get_query_params().get('AutoRenewPeriod')
def set_AutoRenewPeriod(self,AutoRenewPeriod):
self.add_query_param('AutoRenewPeriod',AutoRenewPeriod)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_UsedTime(self):
return self.get_query_params().get('UsedTime')
def set_UsedTime(self,UsedTime):
self.add_query_param('UsedTime',UsedTime)
def get_DBInstanceClass(self):
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self,DBInstanceClass):
self.add_query_param('DBInstanceClass',DBInstanceClass)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_SecurityIPList(self):
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self,SecurityIPList):
self.add_query_param('SecurityIPList',SecurityIPList)
def get_DBNodeStorage(self):
return self.get_query_params().get('DBNodeStorage')
def set_DBNodeStorage(self,DBNodeStorage):
self.add_query_param('DBNodeStorage',DBNodeStorage)
def get_DBInstanceNetworkType(self):
return self.get_query_params().get('DBInstanceNetworkType')
def set_DBInstanceNetworkType(self,DBInstanceNetworkType):
self.add_query_param('DBInstanceNetworkType',DBInstanceNetworkType)
def get_AutoRenew(self):
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self,AutoRenew):
self.add_query_param('AutoRenew',AutoRenew)
def get_DBInstanceVersion(self):
return self.get_query_params().get('DBInstanceVersion')
def set_DBInstanceVersion(self,DBInstanceVersion):
self.add_query_param('DBInstanceVersion',DBInstanceVersion)
def get_VPCId(self):
return self.get_query_params().get('VPCId')
def set_VPCId(self,VPCId):
self.add_query_param('VPCId',VPCId)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType | null |
657 | import asyncio
import logging
from logging import Handler, LogRecord
from types import UnionType
from typing import Callable, List, Protocol
from async_timeout import timeout
from hummingbot.logger import HummingbotLogger
class LogLevel:
NOTSET = 0
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
_IntOrStr: UnionType = int | str
class LoggerMixinProtocol(Protocol):
level: _IntOrStr
log_records: List[LogRecord]
class _LoggerProtocol(LoggerMixinProtocol, Protocol):
def setLevel(self, level: _IntOrStr):
...
def addHandler(self, handler: Handler):
...
class LoggerMixinForTest(LoggerMixinProtocol):
"""
Test logger mixin class that can be used to capture log records during testing.
This mixin provides methods to handle log records and check if specific messages at certain levels are logged.
Example usage:
```python
class MyTestCase(unittest.TestCase, LoggerMixinForTest):
def test_something(self):
self.logger.info("Testing...")
self.assertTrue(self.is_logged("Testing...", logging.INFO))
```
Attributes:
- `level`: The default log level for the logger.
"""
level: _IntOrStr = LogLevel.NOTSET
def METHOD_NAME(self: _LoggerProtocol):
"""
Initialize the test logger mixin by setting the default log level and initializing the log records list.
"""
self.level: _IntOrStr = 1
self.log_records: List[LogRecord] = []
@staticmethod
def _to_loglevel(log_level: _IntOrStr) -> str:
"""
Convert a log level to a string.
:params int | str log_level: The log level to convert.
"""
if isinstance(log_level, int):
log_level = logging.getLevelName(log_level)
return log_level
def set_loggers(self, loggers: List[HummingbotLogger] | HummingbotLogger):
"""
Set up the test logger mixin by adding the test logger to the provided loggers list.
:params List[HummingbotLogger] | HummingbotLogger loggers: The loggers to add to the LoggerMixinForTest.
"""
# __init__() may not be called if the class is used as a mixin
if not hasattr(self, "log_records"):
self.METHOD_NAME()
if isinstance(loggers, HummingbotLogger):
loggers = [loggers]
for logger in loggers:
if logger is not None:
logger.setLevel(self.level)
logger.addHandler(self)
def handle(self, record: LogRecord):
"""
Handle a log record by appending it to the log records list.
:params LogRecord record: The log record to handle.
"""
self.log_records.append(record)
def is_logged(self, log_level: _IntOrStr, message: str) -> bool:
"""
Check if a certain message has been logged at a certain level.
:params int | str log_level: The log level to check.
:params str message: The message to check.
"""
log_level = self._to_loglevel(log_level)
return any(
record.getMessage() == message and record.levelname == log_level
for record in self.log_records
)
def is_partially_logged(self, log_level: _IntOrStr, message: str) -> bool:
"""
Check if a certain message has been 'partially' logged at a certain level.
This is useful for checking if a message has been logged with a dynamic value.
:params int | str log_level: The log level to check.
:params str message: The message to check.
"""
log_level = self._to_loglevel(log_level)
return any(
message in record.getMessage() and record.levelname == log_level
for record in self.log_records
)
async def wait_for_logged(self,
log_level: _IntOrStr,
message: str,
partial: bool = False,
wait_s: float = 3) -> None:
"""
Wait for a certain message to be logged at a certain level.
:params int | str log_level: The log level to check.
:params str message: The message to check.
:params bool partial: Whether to check if the message is partially logged.
:params float wait_s: The number of seconds to wait before timing out.
"""
log_level = self._to_loglevel(log_level)
log_method: Callable[[str | int, str], bool] = self.is_partially_logged if partial else self.is_logged
try:
async with timeout(wait_s):
while not log_method(log_level, message):
await asyncio.sleep(0.1)
except asyncio.TimeoutError as e:
# Used within a class derived from unittest.TestCase
if callable(getattr(self, "fail", None)):
getattr(self, "fail")(f"Message: {message} was not logged.\n"
f"Received Logs: {[record.getMessage() for record in self.log_records]}")
else:
print(f"Message: {message} was not logged.")
print(f"Received Logs: {[record.getMessage() for record in self.log_records]}")
raise e | null |
658 | # Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import sys
import uuid
from guild import remote as remotelib
from guild import remote_util
from guild import util
from guild import var
from . import meta_sync
log = logging.getLogger("guild.remotes.azure_blob")
RUNS_PATH = ["runs"]
DELETED_RUNS_PATH = ["trash", "runs"]
class AzureBlobStorageRemoteType(remotelib.RemoteType):
def __init__(self, _ep):
pass
def remote_for_config(self, name, config):
return AzureBlobStorageRemote(name, config)
def remote_for_spec(self, spec):
raise NotImplementedError()
class AzureBlobStorageRemote(meta_sync.MetaSyncRemote):
def __init__(self, name, config):
self.name = name
self.container = config["container"]
self.root = config.get("root", "/")
self.local_env = remote_util.init_env(config.get("local-env"))
self.local_sync_dir = meta_sync.local_meta_dir(name, self._container_path())
runs_dir = os.path.join(self.local_sync_dir, *RUNS_PATH)
super().__init__(runs_dir, None)
def _container_path(self, *path_parts):
all_parts = path_parts if not self.root else (self.root,) + path_parts
if not all_parts:
return self.container
return self.container + "/" + "/".join(all_parts)
def _sync_runs_meta(self, force=False):
remote_util.remote_activity(f"Refreshing run info for {self.name}")
if not force and meta_sync.meta_current(
self.local_sync_dir, self._remote_meta_id
):
return
_ensure_azure_local_dir(self.local_sync_dir)
meta_sync.clear_local_meta_id(self.local_sync_dir)
# TODO: This is a terribly ineffecient approach as we're
# copying everything just to get metadata for the runs. The
# azcopy sync command has limited include/exclude pattern
# support which makes it hard to use for this
# application. Copying metadata for each remote run would
# likely be quite inefficient as well, though certainly less
# impacting on local storage.
sync_args = [
self._container_path(),
self.local_sync_dir,
"--delete-destination",
"true",
]
self._azcopy("sync", sync_args, quiet=True)
def _remote_meta_id(self):
with util.TempFile("guild-azure-blob-") as tmp:
args = [self._container_path("meta-id"), tmp.path]
self._azcopy("copy", args, quiet=True)
return open(tmp.path, "r").read().strip()
def _azcopy(self, cmd_name, args, quiet=False):
cmd = [_azcopy_cmd(), cmd_name] + args
log.debug("azcopy: %r", cmd)
try:
remote_util.subprocess_call(cmd, extra_env=self.local_env, quiet=quiet)
except subprocess.CalledProcessError as e:
raise remotelib.RemoteProcessError.for_called_process_error(e)
def _delete_runs(self, runs, permanent):
for run in runs:
run_path = self._container_path(*(RUNS_PATH + [run.id]))
if permanent:
self._azcopy("remove", [run_path, "--recursive"])
else:
deleted_path = self._container_path(*(DELETED_RUNS_PATH + [run.id]))
# TODO: We want a simple move/rename here but it looks
# like azcopy requires copy+delete. The copy from a
# blob for some reason requires a SAS token. Stubbing
# this out for now.
self._azcopy("copy", [run_path, deleted_path, "--recursive"])
self._azcopy("remove", [run_path, "--recursive"])
self._new_meta_id()
def _restore_runs(self, runs):
for run in runs:
deleted_path = self._container_path(*(DELETED_RUNS_PATH + [run.id]))
restored_path = self._container_path(*(RUNS_PATH + [run.id]))
# TODO: See _delete_runs above. Same problem applies here.
self._azcopy("copy", [deleted_path, restored_path, "--recursive"])
self._azcopy("remove", [deleted_path, "--recursive"])
self._new_meta_id()
def _purge_runs(self, runs):
for run in runs:
path = self._container_path(*(DELETED_RUNS_PATH + [run.id]))
self._azsync("remove", [path, "--recursive"])
self._new_meta_id()
def status(self, verbose=False):
path = self._container_path()
try:
self._azcopy("ls", [path], quiet=True)
except remotelib.RemoteProcessError as e:
self._handle_status_error(e)
else:
sys.stdout.write(f"{self.name} ({path}) is available\n")
def _handle_status_error(self, e):
output = e.output.decode()
if "NoSuchBucket" in output:
raise remotelib.OperationError(
f"{self.name} is not available - {self.container} does not exist"
)
raise remotelib.OperationError(f"{self.name} is not available: output")
def push(self, runs, delete=False):
for run in runs:
self._push_run(run, delete)
self._new_meta_id()
self._sync_runs_meta(force=True)
def _push_run(self, run, delete):
local_run_src = os.path.join(run.path, "")
remote_run_dest = self._container_path(*RUNS_PATH + [run.id])
args = [local_run_src, remote_run_dest]
if delete:
args[:0] = ["--delete-destination", "true"]
log.info("Copying %s to %s", run.id, self.name)
self._azcopy("sync", args)
def _new_meta_id(self):
meta_id = uuid.uuid4().hex
with util.TempFile("guild-azure-blob-") as tmp:
with open(tmp.path, "w") as f:
f.write(meta_id)
args = [tmp.path, self._container_path("meta-id")]
self._azcopy("copy", args)
def METHOD_NAME(self, runs, delete=False):
for run in runs:
self._pull_run(run, delete)
def _pull_run(self, run, delete):
remote_run_src = self._container_path(*RUNS_PATH + [run.id])
local_run_dest = os.path.join(var.runs_dir(), run.id)
args = [remote_run_src, local_run_dest]
if delete:
args[:0] = ["--delete-destination", "true"]
log.info("Copying %s from %s", run.id, self.name)
util.ensure_dir(local_run_dest)
self._azcopy("sync", args)
def _azcopy_cmd():
cmd = util.which("azcopy")
if not cmd:
raise remotelib.OperationError(
"AzCopy is not available\n"
"Refer to https://docs.microsoft.com/en-us/azure/storage/"
"common/storage-use-azcopy-v10 for help installing it."
)
return cmd
def _ensure_azure_local_dir(dir):
"""Creates dir if it doesn't exist.
azcopy doesn't know it's a local directory if it doesn't exist.
"""
util.ensure_dir(dir) | null |
659 | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""grain tests."""
from unittest import mock
import pytest
import tensorflow_datasets as tfds
from tensorflow_datasets.core import dataset_info as dataset_info_lib
from tensorflow_datasets.core import decode
from tensorflow_datasets.core import file_adapters
from tensorflow_datasets.core import splits as splits_lib
from tensorflow_datasets.core.data_sources import array_record
from tensorflow_datasets.core.utils import shard_utils
from tensorflow_datasets.core.utils.lazy_imports_utils import array_record_data_source
_FILE_INSTRUCTIONS = [
shard_utils.FileInstruction(
'my_file-000-of-003', skip=0, take=12, examples_in_shard=12
),
shard_utils.FileInstruction(
'my_file-001-of-003', skip=2, take=9, examples_in_shard=11
),
shard_utils.FileInstruction(
'my_file-002-of-003', skip=0, take=4, examples_in_shard=4
),
]
def METHOD_NAME():
with mock.patch.object(splits_lib, 'SplitInfo') as split_mock:
split_mock.return_value.name = 'train'
split_mock.return_value.file_instructions = _FILE_INSTRUCTIONS
dataset_info = mock.create_autospec(dataset_info_lib.DatasetInfo)
dataset_info.file_format = file_adapters.FileFormat.ARRAY_RECORD
dataset_info.splits = {'train': split_mock()}
dataset_info.name = 'dataset_name'
return dataset_info
@pytest.mark.parametrize(
['file_format'],
[
(file_adapters.FileFormat.RIEGELI,),
(file_adapters.FileFormat.TFRECORD,),
],
)
def test_unsupported_file_formats_raise_error(file_format):
dataset_info = METHOD_NAME()
dataset_info.file_format = file_format
with pytest.raises(
NotImplementedError,
match='No random access data source for file format',
):
array_record.ArrayRecordDataSource(dataset_info, split='train')
def test_missing_split_raises_error():
dataset_info = METHOD_NAME()
with pytest.raises(
ValueError,
match="Unknown split 'doesnotexist'.",
):
array_record.ArrayRecordDataSource(dataset_info, split='doesnotexist')
def test_array_record_file_format_delegates_to_array_record_data_source():
dataset_info = METHOD_NAME()
with mock.patch.object(
array_record_data_source, 'ArrayRecordDataSource'
) as array_record_data_source_mock:
array_record.ArrayRecordDataSource(dataset_info, split='train')
array_record_data_source_mock.assert_called_once_with(_FILE_INSTRUCTIONS)
def test_data_source_calls_array_record_data_source():
dataset_info = METHOD_NAME()
mock_data_source = mock.create_autospec(
array_record_data_source.ArrayRecordDataSource,
spec_set=True,
)
mock_data_source.__getitem__.side_effect = [
'serialized example',
]
mock_data_source.__len__.return_value = 42
with mock.patch.object(
array_record_data_source,
'ArrayRecordDataSource',
return_value=mock_data_source,
), mock.patch.object(
dataset_info.features,
'deserialize_example_np',
return_value='deserialized example',
) as deserialize_example_mock:
data_source = array_record.ArrayRecordDataSource(
dataset_info, split='train'
)
assert data_source[12] == 'deserialized example'
deserialize_example_mock.assert_called_with(
'serialized example', decoders=None
)
assert deserialize_example_mock.call_count == 1
def test_repr_returns_meaningful_string_without_decoders():
dataset_info = METHOD_NAME()
with mock.patch.object(array_record_data_source, 'ArrayRecordDataSource'):
source = array_record.ArrayRecordDataSource(dataset_info, split='train')
assert (
repr(source)
== "DataSource(name=dataset_name, split='train', decoders=None)"
)
def test_repr_returns_meaningful_string_with_decoders():
dataset_info = METHOD_NAME()
with mock.patch.object(array_record_data_source, 'ArrayRecordDataSource'):
source = array_record.ArrayRecordDataSource(
dataset_info,
split='train',
decoders={'my_feature': decode.SkipDecoding()},
)
assert (
repr(source)
== 'DataSource(name=dataset_name,'
" split='train', decoders={'my_feature': <class"
" 'tensorflow_datasets.core.decode.base.SkipDecoding'>})"
)
def test_data_source_is_iterable():
dataset_info = METHOD_NAME()
mock_data_source = mock.create_autospec(
array_record_data_source.ArrayRecordDataSource,
spec_set=True,
)
mock_data_source.__getitem__.return_value = [1, 2, 3]
mock_data_source.__len__.return_value = 3
with mock.patch.object(
array_record_data_source,
'ArrayRecordDataSource',
return_value=mock_data_source,
), mock.patch.object(
dataset_info.features,
'deserialize_example_np',
return_value='deserialized example',
) as deserialize_example_mock:
data_source = array_record.ArrayRecordDataSource(
dataset_info, split='train'
)
assert len(data_source) == 3
for _ in data_source:
continue
assert mock_data_source.__getitem__.call_count == 3
assert deserialize_example_mock.call_count == 3
assert mock_data_source.__getitem__.call_args_list[0].args[0] == 0
assert mock_data_source.__getitem__.call_args_list[1].args[0] == 1
assert mock_data_source.__getitem__.call_args_list[2].args[0] == 2
def test_data_source_is_sliceable():
mock_array_record_data_source = tfds.testing.PickableDataSourceMock()
with tfds.testing.mock_data(
mock_array_record_data_source=mock_array_record_data_source
):
tfds.data_source('mnist', split='train')
assert len(mock_array_record_data_source.call_args_list) == 1
file_instructions = mock_array_record_data_source.call_args_list[0].args[0]
assert file_instructions[0].skip == 0
assert file_instructions[0].take == 60000
tfds.data_source('mnist', split='train[:50%]')
assert len(mock_array_record_data_source.call_args_list) == 2
file_instructions = mock_array_record_data_source.call_args_list[1].args[0]
assert file_instructions[0].skip == 0
assert file_instructions[0].take == 30000 | null |
660 | """
This type stub file was generated by pyright.
"""
"""Build factory instances."""
DeclarationWithContext = ...
class DeclarationSet:
"""A set of declarations, including the recursive parameters.
Attributes:
declarations (dict(name => declaration)): the top-level declarations
contexts (dict(name => dict(subfield => value))): the nested parameters related
to a given top-level declaration
This object behaves similarly to a dict mapping a top-level declaration name to a
DeclarationWithContext, containing field name, declaration object and extra context.
"""
def __init__(self, initial=...) -> None: ...
@classmethod
def split(cls, entry): # -> tuple[Unknown, None]:
"""Split a declaration name into a (declaration, subpath) tuple.
Examples:
>>> DeclarationSet.split('foo__bar')
('foo', 'bar')
>>> DeclarationSet.split('foo')
('foo', None)
>>> DeclarationSet.split('foo__bar__baz')
('foo', 'bar__baz')
"""
...
@classmethod
def join(cls, root, subkey): # -> LiteralString:
"""Rebuild a full declaration name from its components.
for every string x, we have `join(split(x)) == x`.
"""
...
def METHOD_NAME(self): ...
def update(self, values): # -> None:
"""Add new declarations to this set/
Args:
values (dict(name, declaration)): the declarations to ingest.
"""
...
def filter(self, entries): # -> list[Unknown]:
"""Filter a set of declarations: keep only those related to this object.
This will keep:
- Declarations that 'override' the current ones
- Declarations that are parameters to current ones
"""
...
def sorted(self): ...
def __contains__(self, key): ...
def __getitem__(self, key): ...
def __iter__(self): ...
def values(self): # -> Generator[DeclarationWithContext, None, None]:
"""Retrieve the list of declarations, with their context."""
...
def as_dict(self): # -> dict[Unknown | LiteralString, Unknown]:
"""Return a dict() suitable for our __init__."""
...
def __repr__(self): ...
def parse_declarations(decls, base_pre=..., base_post=...): ...
class BuildStep:
def __init__(self, builder, sequence, parent_step=...) -> None: ...
def resolve(self, declarations): ...
@property
def chain(self): ...
def recurse(self, factory, declarations, force_sequence=...): ...
def __repr__(self): ...
class StepBuilder:
"""A factory instantiation step.
Attributes:
- parent: the parent StepBuilder, or None for the root step
- extras: the passed-in kwargs for this branch
- factory: the factory class being built
- strategy: the strategy to use
"""
def __init__(self, factory_meta, extras, strategy) -> None: ...
def build(self, parent_step=..., force_sequence=...):
"""Build a factory instance."""
...
def recurse(self, factory_meta, extras): # -> Self@StepBuilder:
"""Recurse into a sub-factory call."""
...
def __repr__(self): ...
class Resolver:
"""Resolve a set of declarations.
Attributes are set at instantiation time, values are computed lazily.
Attributes:
__initialized (bool): whether this object's __init__ as run. If set,
setting any attribute will be prevented.
__declarations (dict): maps attribute name to their declaration
__values (dict): maps attribute name to computed value
__pending (str list): names of the attributes whose value is being
computed. This allows to detect cyclic lazy attribute definition.
__step (BuildStep): the BuildStep related to this resolver.
This allows to have the value of a field depend on the value of
another field
"""
__initialized = ...
def __init__(self, declarations, step, sequence) -> None: ...
@property
def factory_parent(self): ...
def __repr__(self): ...
def __getattr__(self, name):
"""Retrieve an attribute's value.
This will compute it if needed, unless it is already on the list of
attributes being computed.
"""
...
def __setattr__(self, name, value): # -> None:
"""Prevent setting attributes once __init__ is done."""
... | null |
661 | from datetime import timedelta
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.timezone import now
from django_ses.signals import bounce_received, complaint_received
from rest_framework.authtoken.models import Token
from cl.api.models import Webhook
from cl.lib.crypto import sha1_activation_key
from cl.users.email_handlers import (
handle_complaint,
handle_hard_bounce,
handle_soft_bounce,
)
from cl.users.models import UserProfile, generate_recap_email
from cl.users.tasks import notify_new_or_updated_webhook
def get_message_id(mail_obj: dict) -> str:
"""Returns the unique message_id from the SES notification header.
:param mail_obj: The notification mail object to extract the message_id
:return message_id: The unique message_id
"""
headers = mail_obj["headers"]
for header in headers:
if header["name"] == "X-CL-ID":
message_id = header["value"]
return message_id
return ""
@receiver(bounce_received, dispatch_uid="bounce_handler")
def METHOD_NAME(sender, mail_obj, bounce_obj, raw_message, *args, **kwargs):
"""Receiver function to handle bounce notifications sent by Amazon SES via
handle_event_webhook
"""
message_id = get_message_id(mail_obj)
if bounce_obj:
bounce_type = bounce_obj["bounceType"]
bounce_sub_type = bounce_obj["bounceSubType"]
bounced_recipients = bounce_obj["bouncedRecipients"]
# If bounce_type is Permanent, handle a hard bounce
# If bounce_type is Transient, handle a soft bounce
# If bounce_type is Undetermined, handle a soft bounce
if bounce_type == "Permanent":
hard_recipient_emails = [
email["emailAddress"] for email in bounced_recipients
]
handle_hard_bounce(bounce_sub_type, hard_recipient_emails)
elif bounce_type == "Transient" or "Undetermined":
# Only consider a soft bounce those that contains a "failed" action
# in its bounce recipient, avoiding other bounces that might not
# be related to failed deliveries, like auto-responders.
soft_recipient_emails = [
email["emailAddress"]
for email in bounced_recipients
if email.get("action", None) == "failed"
]
if soft_recipient_emails:
handle_soft_bounce(
message_id, bounce_sub_type, soft_recipient_emails
)
@receiver(complaint_received, dispatch_uid="complaint_handler")
def complaint_handler(
sender, mail_obj, complaint_obj, raw_message, *args, **kwargs
):
"""Receiver function to handle complaint notifications sent by
Amazon SES via handle_event_webhook
"""
if complaint_obj:
complained_recipients = complaint_obj["complainedRecipients"]
recipient_emails = [
email["emailAddress"] for email in complained_recipients
]
handle_complaint(recipient_emails)
@receiver(
post_save,
sender=settings.AUTH_USER_MODEL,
dispatch_uid="create_auth_token",
)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
@receiver(
post_save,
sender=settings.AUTH_USER_MODEL,
dispatch_uid="create_superuser_profile_object",
)
def superuser_creation(sender, instance, created, **kwargs):
# Create a profile whenever createsuperuser is run
if created and instance.is_superuser:
UserProfile.objects.create(
user=instance,
activation_key=sha1_activation_key(instance.username),
key_expires=now() + timedelta(days=5),
email_confirmed=True,
)
@receiver(post_save, sender=UserProfile, dispatch_uid="assign_recap_email")
def assign_recap_email(sender, instance=None, created=False, **kwargs) -> None:
if created:
instance.recap_email = generate_recap_email(instance)
instance.save()
@receiver(post_save, sender=Webhook, dispatch_uid="webhook_created_or_updated")
def webhook_created_or_updated(
sender, instance=None, created=False, update_fields=None, **kwargs
) -> None:
"""Notify admins when a new webhook is created or updated. Avoid sending
the notification when the webhook failure_count is updated.
"""
if created:
notify_new_or_updated_webhook.delay(instance.pk, created=True)
else:
if update_fields:
if (
"failure_count"
or "enabled"
or "date_modified" in update_fields
):
return
notify_new_or_updated_webhook.delay(instance.pk, created=False) | null |
662 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import uuid
from time import sleep
from urlparse import urlparse
import pytest
from common_utils.pipe_cli import pipe_storage_cp
from pipeline_api_provider import CloudPipelineApiProvider
def create_test_folder(path):
if not os.path.exists(path):
os.makedirs(path)
def create_test_file(path, content):
create_test_folder(os.path.dirname(path))
with open(path, 'w') as f:
f.write(content)
def clean_test_data(path):
if not os.path.exists(path):
return
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
def get_relative_path(url):
parsed = urlparse(url)
return parsed.geturl().replace("%s://" % parsed.scheme, '', 1).replace(parsed.hostname, '', 1).strip("/")
class TestSearch(object):
pipeline_api = CloudPipelineApiProvider()
common_prefix = str(uuid.uuid4()).replace("-", "")
folder = None
pipeline = None
s3_storage = None
issue = None
s3_storage_file_local_path = None
s3_storage_file_path = None
@classmethod
def setup_class(cls):
logging.basicConfig(filename='tests.log', level=logging.INFO,
format='%(levelname)s %(asctime)s %(module)s:%(message)s')
logging.info(cls.common_prefix)
cls.folder = cls.pipeline_api.create_folder(cls.common_prefix)
logging.info("Folder with name %s " % cls.common_prefix)
cls.pipeline = cls.pipeline_api.create_pipeline(cls.common_prefix, cls.common_prefix)
logging.info("Pipeline with name % s" % cls.common_prefix)
cls.s3_storage = cls.pipeline_api.create_s3_data_storage(cls.common_prefix, cls.common_prefix)
logging.info("S3 data storage with name % s" % cls.common_prefix)
cls.issue = cls.pipeline_api.create_issue(cls.common_prefix, cls.common_prefix, cls.folder['id'], 'FOLDER')
logging.info("Issue with name % s" % cls.common_prefix)
cls.pipeline_api.create_comment(cls.issue['id'], cls.common_prefix)
logging.info("Issue comment with text %s" % cls.common_prefix)
cls.s3_storage_file_local_path = os.path.abspath(cls.common_prefix + ".txt")
cls.s3_storage_file_path = 's3://%s/case/' % cls.common_prefix
create_test_file(cls.s3_storage_file_local_path, cls.common_prefix)
pipe_storage_cp(cls.s3_storage_file_local_path, cls.s3_storage_file_path)
logging.info("S3 file with path %s" % cls.s3_storage_file_path)
sleep(600)
@classmethod
def teardown_class(cls):
cls.pipeline_api.delete_folder(cls.folder['id'])
logging.info("Folder %d has been deleted" % cls.folder['id'])
cls.pipeline_api.delete_pipeline(cls.pipeline['id'])
logging.info("Pipeline %d has been deleted" % cls.pipeline['id'])
cls.pipeline_api.delete_data_storage(cls.s3_storage['id'])
logging.info("Data storage %d has been deleted" % cls.s3_storage['id'])
clean_test_data(cls.s3_storage_file_local_path)
@pytest.mark.run(order=1)
def test_search_folder(self):
result = self.pipeline_api.search(self.common_prefix, "FOLDER")
self.verify_search_result(result)
assert len(result['documents']) == 1
for doc in result['documents']:
assert int(doc['id']) == int(self.folder['id'])
assert int(doc['elasticId']) == int(self.folder['id'])
assert doc['name'] == self.folder['name']
assert doc['type'] == 'FOLDER'
self.METHOD_NAME(['name'], doc['highlights'])
@pytest.mark.run(order=1)
def test_search_pipeline(self):
result = self.pipeline_api.search(self.common_prefix, "PIPELINE")
self.verify_search_result(result)
assert len(result['documents']) == 1
for doc in result['documents']:
assert int(doc['id']) == int(self.pipeline['id'])
assert int(doc['elasticId']) == int(self.pipeline['id'])
assert doc['name'] == self.pipeline['name']
assert doc['type'] == 'PIPELINE'
self.METHOD_NAME(['name', 'description'], doc['highlights'])
@pytest.mark.run(order=1)
def test_search_s3_data_storage(self):
result = self.pipeline_api.search(self.common_prefix, "S3_STORAGE")
self.verify_search_result(result)
assert len(result['documents']) == 1
for doc in result['documents']:
assert int(doc['id']) == int(self.s3_storage['id'])
assert int(doc['elasticId']) == int(self.s3_storage['id'])
assert doc['name'] == self.s3_storage['name']
assert doc['type'] == 'S3_STORAGE'
self.METHOD_NAME(['name', 'description', 'path'], doc['highlights'])
@pytest.mark.run(order=2)
def test_search_issue(self):
result = self.pipeline_api.search(self.common_prefix, "ISSUE")
self.verify_search_result(result)
assert len(result['documents']) == 1
for doc in result['documents']:
assert int(doc['id']) == int(self.issue['id'])
assert int(doc['elasticId']) == int(self.issue['id'])
assert doc['name'] == self.issue['name']
assert doc['type'] == 'ISSUE'
self.METHOD_NAME(['name', 'text', 'comments'], doc['highlights'])
@pytest.mark.run(order=2)
def test_search_s3_file(self):
result = self.pipeline_api.search(self.common_prefix, "S3_FILE")
self.verify_search_result(result)
assert len(result['documents']) == 1
for doc in result['documents']:
name = get_relative_path(self.s3_storage_file_path)
assert doc['id'] == name
assert doc['name'] == name
assert doc['type'] == 'S3_FILE'
self.METHOD_NAME(['name', 'storage_name'], doc['highlights'])
@staticmethod
def verify_search_result(result):
assert 'totalHits' in result and int(result['totalHits']) > 0
assert 'searchSucceeded' in result and bool(result['searchSucceeded'])
assert 'documents' in result
for doc in result['documents']:
assert 'elasticId' in doc
assert 'id' in doc
assert 'name' in doc
assert 'type' in doc
assert 'highlights' in doc
@staticmethod
def METHOD_NAME(expected_fields, actual_highlights):
highlights = []
assert len(expected_fields) == len(actual_highlights)
for highlight in actual_highlights:
assert 'fieldName' in highlight
assert 'matches' in highlight and len(highlight['matches']) == 1
highlights.append(highlight['fieldName'])
assert set(expected_fields) == set(highlights) | null |
663 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkros.endpoint import endpoint_data
class UpdateStackRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ROS', '2019-09-10', 'UpdateStack','ros')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TimeoutInMinutes(self):
return self.get_query_params().get('TimeoutInMinutes')
def set_TimeoutInMinutes(self,TimeoutInMinutes):
self.add_query_param('TimeoutInMinutes',TimeoutInMinutes)
def get_StackPolicyDuringUpdateBody(self):
return self.get_query_params().get('StackPolicyDuringUpdateBody')
def set_StackPolicyDuringUpdateBody(self,StackPolicyDuringUpdateBody):
self.add_query_param('StackPolicyDuringUpdateBody',StackPolicyDuringUpdateBody)
def get_TemplateVersion(self):
return self.get_query_params().get('TemplateVersion')
def set_TemplateVersion(self,TemplateVersion):
self.add_query_param('TemplateVersion',TemplateVersion)
def get_DisableRollback(self):
return self.get_query_params().get('DisableRollback')
def set_DisableRollback(self,DisableRollback):
self.add_query_param('DisableRollback',DisableRollback)
def get_TemplateId(self):
return self.get_query_params().get('TemplateId')
def set_TemplateId(self,TemplateId):
self.add_query_param('TemplateId',TemplateId)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self, Tagss):
for depth1 in range(len(Tagss)):
if Tagss[depth1].get('Value') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Value', Tagss[depth1].get('Value'))
if Tagss[depth1].get('Key') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Key', Tagss[depth1].get('Key'))
def get_Parameters(self):
return self.get_query_params().get('Parameters')
def set_Parameters(self, Parameterss):
for depth1 in range(len(Parameterss)):
if Parameterss[depth1].get('ParameterValue') is not None:
self.add_query_param('Parameters.' + str(depth1 + 1) + '.ParameterValue', Parameterss[depth1].get('ParameterValue'))
if Parameterss[depth1].get('ParameterKey') is not None:
self.add_query_param('Parameters.' + str(depth1 + 1) + '.ParameterKey', Parameterss[depth1].get('ParameterKey'))
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_TemplateBody(self):
return self.get_query_params().get('TemplateBody')
def set_TemplateBody(self,TemplateBody):
self.add_query_param('TemplateBody',TemplateBody)
def get_StackId(self):
return self.get_query_params().get('StackId')
def set_StackId(self,StackId):
self.add_query_param('StackId',StackId)
def get_TemplateURL(self):
return self.get_query_params().get('TemplateURL')
def set_TemplateURL(self,TemplateURL):
self.add_query_param('TemplateURL',TemplateURL)
def get_StackPolicyBody(self):
return self.get_query_params().get('StackPolicyBody')
def set_StackPolicyBody(self,StackPolicyBody):
self.add_query_param('StackPolicyBody',StackPolicyBody)
def get_StackPolicyDuringUpdateURL(self):
return self.get_query_params().get('StackPolicyDuringUpdateURL')
def set_StackPolicyDuringUpdateURL(self,StackPolicyDuringUpdateURL):
self.add_query_param('StackPolicyDuringUpdateURL',StackPolicyDuringUpdateURL)
def get_RamRoleName(self):
return self.get_query_params().get('RamRoleName')
def set_RamRoleName(self,RamRoleName):
self.add_query_param('RamRoleName',RamRoleName)
def get_UsePreviousParameters(self):
return self.get_query_params().get('UsePreviousParameters')
def METHOD_NAME(self,UsePreviousParameters):
self.add_query_param('UsePreviousParameters',UsePreviousParameters)
def get_ReplacementOption(self):
return self.get_query_params().get('ReplacementOption')
def set_ReplacementOption(self,ReplacementOption):
self.add_query_param('ReplacementOption',ReplacementOption)
def get_StackPolicyURL(self):
return self.get_query_params().get('StackPolicyURL')
def set_StackPolicyURL(self,StackPolicyURL):
self.add_query_param('StackPolicyURL',StackPolicyURL | null |
664 | ##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import imath
import IECore
class TestDataCastOp( unittest.TestCase ) :
def testTypeConvertion( self ) :
self.assertEqual( IECore.DataCastOp()( object = IECore.FloatData( 2 ), targetType = int(IECore.TypeId.DoubleData) ), IECore.DoubleData( 2 ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.DoubleData( 2 ), targetType = int(IECore.TypeId.FloatData) ), IECore.FloatData( 2 ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.IntData( 2 ), targetType = int(IECore.TypeId.UIntData) ), IECore.UIntData( 2 ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.V3fData( imath.V3f( 2 ) ), targetType = int(IECore.TypeId.V3dData) ), IECore.V3dData( imath.V3d( 2 ) ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.QuatfData( imath.Quatf( 1,2,3,4 ) ), targetType = int(IECore.TypeId.QuatdData) ), IECore.QuatdData( imath.Quatd( 1,2,3,4 ) ) )
def METHOD_NAME( self ) :
self.assertEqual( IECore.DataCastOp()( object = IECore.V3fData( imath.V3f( 1, 2, 3 ) ), targetType = int(IECore.TypeId.FloatVectorData) ), IECore.FloatVectorData( [ 1, 2, 3 ] ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.V3fData( imath.V3f( 1, 2, 3 ) ), targetType = int(IECore.TypeId.Color3fData) ), IECore.Color3fData( imath.Color3f( 1, 2, 3 ) ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3) ] ), targetType = int(IECore.TypeId.FloatVectorData) ), IECore.FloatVectorData( [ 1, 1, 1, 2, 2, 2, 3, 3, 3 ] ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.FloatVectorData( [ 1, 2, 3 ] ), targetType = int(IECore.TypeId.V3fData) ), IECore.V3fData( imath.V3f( 1, 2, 3 ) ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.Color3fData( imath.Color3f( 1, 2, 3 ) ), targetType = int(IECore.TypeId.V3fData) ), IECore.V3fData( imath.V3f( 1, 2, 3 ) ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.FloatVectorData( [ 1, 1, 1, 2, 2, 2, 3, 3, 3 ] ), targetType = int(IECore.TypeId.V3fVectorData) ), IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3) ] ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3) ] ), targetType = int(IECore.TypeId.Color3fVectorData) ), IECore.Color3fVectorData( [ imath.Color3f(1), imath.Color3f(2), imath.Color3f(3) ] ) )
self.assertEqual( IECore.DataCastOp()( object = IECore.V3dVectorData( [ imath.V3d(1), imath.V3d(2), imath.V3d(3) ] ), targetType = int(IECore.TypeId.Color3fVectorData) ), IECore.Color3fVectorData( [ imath.Color3f(1), imath.Color3f(2), imath.Color3f(3) ] ) )
def testInvalidConversions( self ) :
tests = [
( IECore.FloatVectorData( [ 1, 2, 3 ] ), int(IECore.TypeId.V2fData) ),
( imath.M33f(), int( IECore.TypeId.M44fData ) ),
( IECore.FloatVectorData( [ 1, 2, 3, 4 ] ), int(IECore.TypeId.V3fData) ),
( IECore.FloatVectorData( [ 1, 2 ] ), int(IECore.TypeId.V3fData) ),
( IECore.FloatVectorData( [ 1, 2, 3, 4 ] ), int(IECore.TypeId.V3fVectorData) ),
( IECore.FloatVectorData( [ 1, 2, 3, 4, 5 ] ), int(IECore.TypeId.V3fVectorData) ),
]
i = 0
for ( obj, tt ) in tests:
try:
IECore.DataCastOp()( object = obj, targetType = tt )
except:
i += 1
else:
raise Exception( "Should fail on this test " + i )
if __name__ == "__main__":
unittest.main() | null |
665 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvcs.endpoint import endpoint_data
class GetProfileListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vcs', '2020-05-15', 'GetProfileList','vcs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProfileIdList(self):
return self.get_body_params().get('ProfileIdList')
def set_ProfileIdList(self,ProfileIdList):
self.add_body_params('ProfileIdList', ProfileIdList)
def get_CorpId(self):
return self.get_body_params().get('CorpId')
def set_CorpId(self,CorpId):
self.add_body_params('CorpId', CorpId)
def get_Gender(self):
return self.get_body_params().get('Gender')
def set_Gender(self,Gender):
self.add_body_params('Gender', Gender)
def get_PlateNo(self):
return self.get_body_params().get('PlateNo')
def METHOD_NAME(self,PlateNo):
self.add_body_params('PlateNo', PlateNo)
def get_IdNumber(self):
return self.get_body_params().get('IdNumber')
def set_IdNumber(self,IdNumber):
self.add_body_params('IdNumber', IdNumber)
def get_PageNumber(self):
return self.get_body_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_body_params('PageNumber', PageNumber)
def get_FaceImageId(self):
return self.get_body_params().get('FaceImageId')
def set_FaceImageId(self,FaceImageId):
self.add_body_params('FaceImageId', FaceImageId)
def get_FaceUrl(self):
return self.get_body_params().get('FaceUrl')
def set_FaceUrl(self,FaceUrl):
self.add_body_params('FaceUrl', FaceUrl)
def get_PageSize(self):
return self.get_body_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_body_params('PageSize', PageSize)
def get_PersonIdList(self):
return self.get_body_params().get('PersonIdList')
def set_PersonIdList(self,PersonIdList):
self.add_body_params('PersonIdList', PersonIdList)
def get_LiveAddress(self):
return self.get_body_params().get('LiveAddress')
def set_LiveAddress(self,LiveAddress):
self.add_body_params('LiveAddress', LiveAddress)
def get_IsvSubId(self):
return self.get_body_params().get('IsvSubId')
def set_IsvSubId(self,IsvSubId):
self.add_body_params('IsvSubId', IsvSubId)
def get_SceneType(self):
return self.get_body_params().get('SceneType')
def set_SceneType(self,SceneType):
self.add_body_params('SceneType', SceneType)
def get_PhoneNo(self):
return self.get_body_params().get('PhoneNo')
def set_PhoneNo(self,PhoneNo):
self.add_body_params('PhoneNo', PhoneNo)
def get_CatalogId(self):
return self.get_body_params().get('CatalogId')
def set_CatalogId(self,CatalogId):
self.add_body_params('CatalogId', CatalogId)
def get_Name(self):
return self.get_body_params().get('Name')
def set_Name(self,Name):
self.add_body_params('Name', Name)
def get_BizId(self):
return self.get_body_params().get('BizId')
def set_BizId(self,BizId):
self.add_body_params('BizId', BizId)
def get_MatchingRateThreshold(self):
return self.get_body_params().get('MatchingRateThreshold')
def set_MatchingRateThreshold(self,MatchingRateThreshold):
self.add_body_params('MatchingRateThreshold', MatchingRateThreshold | null |
666 | import asyncio
from typing import Any, Dict, List, Optional
from hummingbot.connector.exchange.injective_v2.injective_query_executor import BaseInjectiveQueryExecutor
class ProgrammableQueryExecutor(BaseInjectiveQueryExecutor):
def __init__(self):
self._ping_responses = asyncio.Queue()
self._spot_markets_responses = asyncio.Queue()
self._derivative_market_responses = asyncio.Queue()
self._derivative_markets_responses = asyncio.Queue()
self._spot_order_book_responses = asyncio.Queue()
self._derivative_order_book_responses = asyncio.Queue()
self._transaction_by_hash_responses = asyncio.Queue()
self._account_portfolio_responses = asyncio.Queue()
self._simulate_transaction_responses = asyncio.Queue()
self._send_transaction_responses = asyncio.Queue()
self._spot_trades_responses = asyncio.Queue()
self._derivative_trades_responses = asyncio.Queue()
self._historical_spot_orders_responses = asyncio.Queue()
self._historical_derivative_orders_responses = asyncio.Queue()
self._transaction_block_height_responses = asyncio.Queue()
self._funding_rates_responses = asyncio.Queue()
self._oracle_prices_responses = asyncio.Queue()
self._funding_payments_responses = asyncio.Queue()
self._derivative_positions_responses = asyncio.Queue()
self._spot_order_book_updates = asyncio.Queue()
self._public_spot_trade_updates = asyncio.Queue()
self._derivative_order_book_updates = asyncio.Queue()
self._public_derivative_trade_updates = asyncio.Queue()
self._oracle_prices_updates = asyncio.Queue()
self._subaccount_positions_events = asyncio.Queue()
self._subaccount_balance_events = asyncio.Queue()
self._historical_spot_order_events = asyncio.Queue()
self._historical_derivative_order_events = asyncio.Queue()
self._transaction_events = asyncio.Queue()
async def ping(self):
response = await self._ping_responses.get()
return response
async def spot_markets(self, status: str) -> Dict[str, Any]:
response = await self._spot_markets_responses.get()
return response
async def derivative_markets(self, status: str) -> Dict[str, Any]:
response = await self._derivative_markets_responses.get()
return response
async def derivative_market(self, market_id: str) -> Dict[str, Any]:
response = await self._derivative_market_responses.get()
return response
async def get_spot_orderbook(self, market_id: str) -> Dict[str, Any]:
response = await self._spot_order_book_responses.get()
return response
async def get_derivative_orderbook(self, market_id: str) -> Dict[str, Any]:
response = await self._derivative_order_book_responses.get()
return response
async def METHOD_NAME(self, tx_hash: str) -> Dict[str, Any]:
response = await self._transaction_by_hash_responses.get()
return response
async def get_tx_block_height(self, tx_hash: str) -> int:
response = await self._transaction_block_height_responses.get()
return response
async def account_portfolio(self, account_address: str) -> Dict[str, Any]:
response = await self._account_portfolio_responses.get()
return response
async def simulate_tx(self, tx_byte: bytes) -> Dict[str, Any]:
response = await self._simulate_transaction_responses.get()
return response
async def send_tx_sync_mode(self, tx_byte: bytes) -> Dict[str, Any]:
response = await self._send_transaction_responses.get()
return response
async def get_spot_trades(
self,
market_ids: List[str],
subaccount_id: Optional[str] = None,
start_time: Optional[int] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
) -> Dict[str, Any]:
response = await self._spot_trades_responses.get()
return response
async def get_derivative_trades(
self,
market_ids: List[str],
subaccount_id: Optional[str] = None,
start_time: Optional[int] = None,
skip: Optional[int] = None,
limit: Optional[int] = None,
) -> Dict[str, Any]:
response = await self._derivative_trades_responses.get()
return response
async def get_historical_spot_orders(
self,
market_ids: List[str],
subaccount_id: str,
start_time: int,
skip: int,
) -> Dict[str, Any]:
response = await self._historical_spot_orders_responses.get()
return response
async def get_historical_derivative_orders(
self,
market_ids: List[str],
subaccount_id: str,
start_time: int,
skip: int,
) -> Dict[str, Any]:
response = await self._historical_derivative_orders_responses.get()
return response
async def get_funding_rates(self, market_id: str, limit: int) -> Dict[str, Any]:
response = await self._funding_rates_responses.get()
return response
async def get_funding_payments(self, subaccount_id: str, market_id: str, limit: int) -> Dict[str, Any]:
response = await self._funding_payments_responses.get()
return response
async def get_derivative_positions(self, subaccount_id: str, skip: int) -> Dict[str, Any]:
response = await self._derivative_positions_responses.get()
return response
async def get_oracle_prices(
self,
base_symbol: str,
quote_symbol: str,
oracle_type: str,
oracle_scale_factor: int,
) -> Dict[str, Any]:
response = await self._oracle_prices_responses.get()
return response
async def spot_order_book_updates_stream(self, market_ids: List[str]):
while True:
next_ob_update = await self._spot_order_book_updates.get()
yield next_ob_update
async def public_spot_trades_stream(self, market_ids: List[str]):
while True:
next_trade = await self._public_spot_trade_updates.get()
yield next_trade
async def derivative_order_book_updates_stream(self, market_ids: List[str]):
while True:
next_ob_update = await self._derivative_order_book_updates.get()
yield next_ob_update
async def public_derivative_trades_stream(self, market_ids: List[str]):
while True:
next_trade = await self._public_derivative_trade_updates.get()
yield next_trade
async def oracle_prices_stream(self, oracle_base: str, oracle_quote: str, oracle_type: str):
while True:
next_update = await self._oracle_prices_updates.get()
yield next_update
async def subaccount_positions_stream(self, subaccount_id: str):
while True:
next_event = await self._subaccount_positions_events.get()
yield next_event
async def subaccount_balance_stream(self, subaccount_id: str):
while True:
next_event = await self._subaccount_balance_events.get()
yield next_event
async def subaccount_historical_spot_orders_stream(
self, market_id: str, subaccount_id: str
):
while True:
next_event = await self._historical_spot_order_events.get()
yield next_event
async def subaccount_historical_derivative_orders_stream(
self, market_id: str, subaccount_id: str
):
while True:
next_event = await self._historical_derivative_order_events.get()
yield next_event
async def transactions_stream(self,):
while True:
next_event = await self._transaction_events.get()
yield next_event | null |
667 | from __future__ import print_function
import logging
import sys
import time
import serial
import six
class DataError(Exception):
"""Raised when a serial communication is corrupt.
*data* attribute contains the corrupt packet.
*extra* attribute contains any data left in the serial buffer
past the end of the packet.
"""
def __init__(self, msg, data, extra):
self.data = data
self.extra = extra
Exception.__init__(self, msg)
class SerialDevice(object):
"""
Class used for standardizing access to serial devices.
Provides some commonly used functions for reading and writing
serial packets.
"""
def __init__(self, **kwds):
"""
All keyword arguments define the default arguments to use when
opening the serial port (see pyserial Serial.__init__).
If both 'port' and 'baudrate' are provided here, then
self.open() is called automatically.
"""
self.serial = None
self.__serialOpts = {
'bytesize': serial.EIGHTBITS,
'timeout': 0, # no timeout. See SerialDevice._readWithTimeout()
}
self.__serialOpts.update(kwds)
if 'port' in kwds and 'baudrate' in self.__serialOpts:
self.open()
@classmethod
def normalizePortName(cls, port):
"""
Return a 'normalized' port name that is always the same for a particular serial port.
On windows, this means 'com1', 'COM1', and 0 will all normalize to 0. On unix,
the port name is unchanged.
"""
if sys.platform.startswith('win'):
if isinstance(port, int):
port = 'com%d' % (port + 1)
elif isinstance(port, six.string_types) and port.lower()[:3] == 'com':
port = port.lower()
return port
def open(self, port=None, baudrate=None, **kwds):
""" Open a serial port. If this port was previously closed, then calling
open() with no arguments will re-open the original port with the same settings.
All keyword arguments are sent to the pyserial Serial.__init__() method.
"""
if port is None:
port = self.__serialOpts['port']
if baudrate is None:
baudrate = self.__serialOpts['baudrate']
port = SerialDevice.normalizePortName(port)
self.__serialOpts.update({
'port': port,
'baudrate': baudrate,
})
self.__serialOpts.update(kwds)
self.serial = serial.Serial(**self.__serialOpts)
logging.info('Opened serial port: %s', self.__serialOpts)
def close(self):
"""Close the serial port."""
self.serial.close()
self.serial = None
logging.info('Closed serial port: %s', self.__serialOpts['port'])
def readAll(self):
"""Read all bytes waiting in buffer; non-blocking."""
n = self.serial.inWaiting()
if n > 0:
d = self.serial.read(n)
logging.info('Serial port %s readAll: %r', self.__serialOpts['port'], d)
return d
return ''
def write(self, data):
"""Write *data* to the serial port"""
if sys.version > '3' and isinstance(data, str):
data = data.encode()
logging.info('Serial port %s write: %r', self.__serialOpts['port'], data)
self.serial.write(data)
def read(self, length, timeout=5.0, term=None):
"""
Read *length* bytes or raise TimeoutError after *timeout* has elapsed.
If *term* is given, check that the packet is terminated with *term* and
return the packet excluding *term*. If the packet is not terminated
with *term*, then DataError is raised.
"""
# self.serial.setTimeout(timeout) # broken!
packet = self._readWithTimeout(length, timeout)
if len(packet) < length:
err = TimeoutError("Timed out waiting for serial data (received so far: %s)" % repr(packet))
err.data = packet
raise err
if term is not None:
if packet[-len(term):] != term:
time.sleep(0.01)
extra = self.readAll()
err = DataError("Packet corrupt: %s (len=%d)" % (repr(packet), len(packet)), packet, extra)
raise err
logging.info('Serial port %s read: %r', self.__serialOpts['port'], packet)
return packet[:-len(term)]
logging.info('Serial port %s read: %r', self.__serialOpts['port'], packet)
return packet
def _readWithTimeout(self, nBytes, timeout):
# Note: pyserial's timeout mechanism is broken (specifically, calling setTimeout can cause
# serial data to be lost) so we implement our own in readWithTimeout().
start = time.time()
packet = b''
# Interval between serial port checks is adaptive:
# * start with very short interval for low-latency reads
# * iteratively increase interval duration to reduce CPU usage on long reads
sleep = 100e-6 # initial sleep is 100 us
while time.time() - start < timeout:
waiting = self.serial.inWaiting()
if waiting > 0:
readBytes = min(waiting, nBytes - len(packet))
packet += self.serial.read(readBytes)
sleep = 100e-6 # every time we read data, reset sleep time
if len(packet) >= nBytes:
break
time.sleep(sleep)
sleep = min(0.05, 2 * sleep) # wait a bit longer next time
return packet
def readUntil(self, term, minBytes=0, timeout=5):
"""Read from the serial port until *term* is received, or *timeout* has elapsed.
If *minBytes* is given, then this number of bytes will be read without checking for *term*.
Returns the entire packet including *term*.
"""
if isinstance(term, str):
term = term.encode()
start = time.time()
if minBytes > 0:
packet = self.read(minBytes, timeout=timeout)
else:
packet = b''
while True:
elapsed = time.time() - start
if elapsed >= timeout:
err = TimeoutError("Timed out while reading serial packet. Data so far: '%r'" % packet)
err.data = packet
raise err
try:
packet += self.read(1, timeout=timeout - elapsed)
except TimeoutError:
err = TimeoutError("Timed out while reading serial packet. Data so far: '%r'" % packet)
err.data = packet
raise err
if len(packet) > minBytes and packet[-len(term):] == term:
return packet
def readline(self, **kwargs):
return self.readUntil("\n", **kwargs)
def hasDataToRead(self):
return self.serial.inWaiting() > 0
def METHOD_NAME(self):
## not recommended..
d = self.readAll()
time.sleep(0.1)
d += self.readAll()
if len(d) > 0:
print(self, "Warning: discarded serial data ", repr(d))
return d
def getPort(self):
"""Return the serial port that was last connected.
"""
return self.__serialOpts['port']
def getBaudrate(self):
"""Return the configured baud rate.
"""
return self.__serialOpts['baudrate']
if __name__ == '__main__':
try:
port, baud = sys.argv[1:3]
except ValueError:
print("Missing arguments! Usage:\n\tpython -i SerialDevice PORT BAUDRATE")
exit(1)
else:
sd = SerialDevice(port=port, baudrate=baud)
print("")
print("Serial port opened and available as 'sd'.")
print("Try using sd.write(...), sd.readAll(), and sd.read(length, term, timeout)") | null |
668 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdts.endpoint import endpoint_data
class DescribeConnectionStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dts', '2020-01-01', 'DescribeConnectionStatus','dts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def METHOD_NAME(self): # String
return self.get_query_params().get('SourceEndpointRegion')
def set_SourceEndpointRegion(self, SourceEndpointRegion): # String
self.add_query_param('SourceEndpointRegion', SourceEndpointRegion)
def get_SourceEndpointArchitecture(self): # String
return self.get_query_params().get('SourceEndpointArchitecture')
def set_SourceEndpointArchitecture(self, SourceEndpointArchitecture): # String
self.add_query_param('SourceEndpointArchitecture', SourceEndpointArchitecture)
def get_DestinationEndpointInstanceType(self): # String
return self.get_query_params().get('DestinationEndpointInstanceType')
def set_DestinationEndpointInstanceType(self, DestinationEndpointInstanceType): # String
self.add_query_param('DestinationEndpointInstanceType', DestinationEndpointInstanceType)
def get_SourceEndpointInstanceID(self): # String
return self.get_query_params().get('SourceEndpointInstanceID')
def set_SourceEndpointInstanceID(self, SourceEndpointInstanceID): # String
self.add_query_param('SourceEndpointInstanceID', SourceEndpointInstanceID)
def get_SourceEndpointUserName(self): # String
return self.get_query_params().get('SourceEndpointUserName')
def set_SourceEndpointUserName(self, SourceEndpointUserName): # String
self.add_query_param('SourceEndpointUserName', SourceEndpointUserName)
def get_SourceEndpointDatabaseName(self): # String
return self.get_query_params().get('SourceEndpointDatabaseName')
def set_SourceEndpointDatabaseName(self, SourceEndpointDatabaseName): # String
self.add_query_param('SourceEndpointDatabaseName', SourceEndpointDatabaseName)
def get_DestinationEndpointRegion(self): # String
return self.get_query_params().get('DestinationEndpointRegion')
def set_DestinationEndpointRegion(self, DestinationEndpointRegion): # String
self.add_query_param('DestinationEndpointRegion', DestinationEndpointRegion)
def get_SourceEndpointIP(self): # String
return self.get_query_params().get('SourceEndpointIP')
def set_SourceEndpointIP(self, SourceEndpointIP): # String
self.add_query_param('SourceEndpointIP', SourceEndpointIP)
def get_DestinationEndpointUserName(self): # String
return self.get_query_params().get('DestinationEndpointUserName')
def set_DestinationEndpointUserName(self, DestinationEndpointUserName): # String
self.add_query_param('DestinationEndpointUserName', DestinationEndpointUserName)
def get_DestinationEndpointArchitecture(self): # String
return self.get_query_params().get('DestinationEndpointArchitecture')
def set_DestinationEndpointArchitecture(self, DestinationEndpointArchitecture): # String
self.add_query_param('DestinationEndpointArchitecture', DestinationEndpointArchitecture)
def get_DestinationEndpointOracleSID(self): # String
return self.get_query_params().get('DestinationEndpointOracleSID')
def set_DestinationEndpointOracleSID(self, DestinationEndpointOracleSID): # String
self.add_query_param('DestinationEndpointOracleSID', DestinationEndpointOracleSID)
def get_DestinationEndpointEngineName(self): # String
return self.get_query_params().get('DestinationEndpointEngineName')
def set_DestinationEndpointEngineName(self, DestinationEndpointEngineName): # String
self.add_query_param('DestinationEndpointEngineName', DestinationEndpointEngineName)
def get_DestinationEndpointInstanceID(self): # String
return self.get_query_params().get('DestinationEndpointInstanceID')
def set_DestinationEndpointInstanceID(self, DestinationEndpointInstanceID): # String
self.add_query_param('DestinationEndpointInstanceID', DestinationEndpointInstanceID)
def get_DestinationEndpointPort(self): # String
return self.get_query_params().get('DestinationEndpointPort')
def set_DestinationEndpointPort(self, DestinationEndpointPort): # String
self.add_query_param('DestinationEndpointPort', DestinationEndpointPort)
def get_SourceEndpointPassword(self): # String
return self.get_query_params().get('SourceEndpointPassword')
def set_SourceEndpointPassword(self, SourceEndpointPassword): # String
self.add_query_param('SourceEndpointPassword', SourceEndpointPassword)
def get_SourceEndpointPort(self): # String
return self.get_query_params().get('SourceEndpointPort')
def set_SourceEndpointPort(self, SourceEndpointPort): # String
self.add_query_param('SourceEndpointPort', SourceEndpointPort)
def get_DestinationEndpointIP(self): # String
return self.get_query_params().get('DestinationEndpointIP')
def set_DestinationEndpointIP(self, DestinationEndpointIP): # String
self.add_query_param('DestinationEndpointIP', DestinationEndpointIP)
def get_SourceEndpointInstanceType(self): # String
return self.get_query_params().get('SourceEndpointInstanceType')
def set_SourceEndpointInstanceType(self, SourceEndpointInstanceType): # String
self.add_query_param('SourceEndpointInstanceType', SourceEndpointInstanceType)
def get_SourceEndpointOracleSID(self): # String
return self.get_query_params().get('SourceEndpointOracleSID')
def set_SourceEndpointOracleSID(self, SourceEndpointOracleSID): # String
self.add_query_param('SourceEndpointOracleSID', SourceEndpointOracleSID)
def get_DestinationEndpointDatabaseName(self): # String
return self.get_query_params().get('DestinationEndpointDatabaseName')
def set_DestinationEndpointDatabaseName(self, DestinationEndpointDatabaseName): # String
self.add_query_param('DestinationEndpointDatabaseName', DestinationEndpointDatabaseName)
def get_DestinationEndpointPassword(self): # String
return self.get_query_params().get('DestinationEndpointPassword')
def set_DestinationEndpointPassword(self, DestinationEndpointPassword): # String
self.add_query_param('DestinationEndpointPassword', DestinationEndpointPassword)
def get_SourceEndpointEngineName(self): # String
return self.get_query_params().get('SourceEndpointEngineName')
def set_SourceEndpointEngineName(self, SourceEndpointEngineName): # String
self.add_query_param('SourceEndpointEngineName', SourceEndpointEngineName) | null |
669 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalb.endpoint import endpoint_data
class UpdateHealthCheckTemplateAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alb', '2020-06-16', 'UpdateHealthCheckTemplateAttribute','alb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_HealthCheckTimeout(self): # Integer
return self.get_query_params().get('HealthCheckTimeout')
def set_HealthCheckTimeout(self, HealthCheckTimeout): # Integer
self.add_query_param('HealthCheckTimeout', HealthCheckTimeout)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_HealthCheckProtocol(self): # String
return self.get_query_params().get('HealthCheckProtocol')
def set_HealthCheckProtocol(self, HealthCheckProtocol): # String
self.add_query_param('HealthCheckProtocol', HealthCheckProtocol)
def get_UnhealthyThreshold(self): # Integer
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self, UnhealthyThreshold): # Integer
self.add_query_param('UnhealthyThreshold', UnhealthyThreshold)
def get_HealthyThreshold(self): # Integer
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self, HealthyThreshold): # Integer
self.add_query_param('HealthyThreshold', HealthyThreshold)
def get_HealthCheckPath(self): # String
return self.get_query_params().get('HealthCheckPath')
def set_HealthCheckPath(self, HealthCheckPath): # String
self.add_query_param('HealthCheckPath', HealthCheckPath)
def get_HealthCheckCodes(self): # Array
return self.get_query_params().get('HealthCheckCodes')
def set_HealthCheckCodes(self, HealthCheckCodes): # Array
for index1, value1 in enumerate(HealthCheckCodes):
self.add_query_param('HealthCheckCodes.' + str(index1 + 1), value1)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_HealthCheckMethod(self): # String
return self.get_query_params().get('HealthCheckMethod')
def set_HealthCheckMethod(self, HealthCheckMethod): # String
self.add_query_param('HealthCheckMethod', HealthCheckMethod)
def get_HealthCheckHost(self): # String
return self.get_query_params().get('HealthCheckHost')
def set_HealthCheckHost(self, HealthCheckHost): # String
self.add_query_param('HealthCheckHost', HealthCheckHost)
def get_HealthCheckInterval(self): # Integer
return self.get_query_params().get('HealthCheckInterval')
def set_HealthCheckInterval(self, HealthCheckInterval): # Integer
self.add_query_param('HealthCheckInterval', HealthCheckInterval)
def get_HealthCheckTemplateName(self): # String
return self.get_query_params().get('HealthCheckTemplateName')
def set_HealthCheckTemplateName(self, HealthCheckTemplateName): # String
self.add_query_param('HealthCheckTemplateName', HealthCheckTemplateName)
def get_HealthCheckTemplateId(self): # String
return self.get_query_params().get('HealthCheckTemplateId')
def set_HealthCheckTemplateId(self, HealthCheckTemplateId): # String
self.add_query_param('HealthCheckTemplateId', HealthCheckTemplateId)
def get_HealthCheckHttpVersion(self): # String
return self.get_query_params().get('HealthCheckHttpVersion')
def set_HealthCheckHttpVersion(self, HealthCheckHttpVersion): # String
self.add_query_param('HealthCheckHttpVersion', HealthCheckHttpVersion)
def METHOD_NAME(self): # Integer
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self, HealthCheckConnectPort): # Integer
self.add_query_param('HealthCheckConnectPort', HealthCheckConnectPort) | null |
670 | """
SoftLayer.tests.CLI.modules.tag_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the user cli command
"""
from unittest import mock as mock
from SoftLayer.exceptions import SoftLayerAPIError
from SoftLayer import testing
class TagCLITests(testing.TestCase):
def test_list(self):
result = self.run_command(['tags', 'list'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'getUnattachedTagsForCurrentUser')
self.assert_called_with('SoftLayer_Tag', 'getAttachedTagsForCurrentUser')
self.assertIn('coreos', result.output)
def test_list_detail(self):
result = self.run_command(['tags', 'list', '-d'])
self.assert_no_fail(result)
self.assertIn('"vs-test1.test.sftlyr.ws', result.output) # From fixtures/virutal_guest.getObject
# self.assert_called_with('SoftLayer_Tag', 'getUnattachedTagsForCurrentUser')
self.assert_called_with('SoftLayer_Tag', 'getAttachedTagsForCurrentUser')
self.assert_called_with('SoftLayer_Tag', 'getReferences', identifier=1286571)
self.assert_called_with('SoftLayer_Virtual_Guest', 'getObject', identifier=33488921)
def test_list_detail_ungettable(self):
mock = self.set_mock('SoftLayer_Virtual_Guest', 'getObject')
mock.side_effect = SoftLayerAPIError(404, "TEST ERROR")
result = self.run_command(['tags', 'list', '-d'])
self.assert_no_fail(result)
self.assertIn("TEST ERROR", result.output) # From fixtures/virutal_guest.getObject
# self.assert_called_with('SoftLayer_Tag', 'getUnattachedTagsForCurrentUser')
self.assert_called_with('SoftLayer_Tag', 'getAttachedTagsForCurrentUser')
self.assert_called_with('SoftLayer_Tag', 'getReferences', identifier=1286571)
self.assert_called_with('SoftLayer_Virtual_Guest', 'getObject', identifier=33488921)
@mock.patch('SoftLayer.CLI.tags.set.click')
def test_set_tags(self, click):
result = self.run_command(['tags', 'set', '--tags=tag1,tag2', '--key-name=GUEST', '--resource-id=100'])
click.secho.assert_called_with('Set tags successfully', fg='green')
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'setTags', args=("tag1,tag2", "GUEST", 100), )
@mock.patch('SoftLayer.CLI.tags.set.click')
def test_set_tags_failure(self, click):
mock = self.set_mock('SoftLayer_Tag', 'setTags')
mock.return_value = False
result = self.run_command(['tags', 'set', '--tags=tag1,tag2', '--key-name=GUEST', '--resource-id=100'])
click.secho.assert_called_with('Failed to set tags', fg='red')
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'setTags', args=("tag1,tag2", "GUEST", 100), )
def test_details_by_name(self):
tag_name = 'bs_test_instance'
result = self.run_command(['tags', 'details', tag_name])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'getTagByTagName', args=(tag_name,))
def METHOD_NAME(self):
tag_id = '1286571'
result = self.run_command(['tags', 'details', tag_id])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'getObject', identifier=tag_id)
def test_deleteTags_by_name(self):
result = self.run_command(['tags', 'delete', 'test'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'deleteTag', args=('test',))
def test_deleteTags_by_id(self):
result = self.run_command(['tags', 'delete', '123456'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'getObject', identifier='123456')
self.assert_called_with('SoftLayer_Tag', 'deleteTag', args=('bs_test_instance',))
def test_deleteTags_by_number_name(self):
result = self.run_command(['tags', 'delete', '123456', '--name'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'deleteTag', args=('123456',))
@mock.patch('SoftLayer.CLI.tags.delete.click')
def test_deleteTags_fail(self, click):
mock = self.set_mock('SoftLayer_Tag', 'deleteTag')
mock.return_value = False
result = self.run_command(['tags', 'delete', '123456', '--name'])
click.secho.assert_called_with('Failed to remove tag 123456', fg='red')
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'deleteTag', args=('123456',))
def test_taggable(self):
result = self.run_command(['tags', 'taggable'])
self.assert_no_fail(result)
self.assertIn('"host14.vmware.test.com', result.output)
self.assert_called_with('SoftLayer_Tag', 'getAllTagTypes')
self.assert_called_with('SoftLayer_Search', 'advancedSearch', args=('_objectType:SoftLayer_Hardware',))
def test_cleanup(self):
result = self.run_command(['tags', 'cleanup'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Tag', 'getUnattachedTagsForCurrentUser')
self.assert_called_with('SoftLayer_Tag', 'deleteTag', args=('coreos',))
def test_cleanup_dry(self):
result = self.run_command(['tags', 'cleanup', '-d'])
self.assert_no_fail(result)
self.assertIn('(Dry Run)', result.output)
self.assert_called_with('SoftLayer_Tag', 'getUnattachedTagsForCurrentUser')
self.assertEqual([], self.calls(service='SoftLayer_Tag', method='deleteTag')) | null |
671 | from __future__ import annotations
import numpy as np
import pytest
pytestmark = pytest.mark.gpu
import dask.array as da
from dask.array.utils import assert_eq
cupy = pytest.importorskip("cupy")
@pytest.mark.parametrize("idx_chunks", [None, 3, 2, 1])
@pytest.mark.parametrize("x_chunks", [(3, 5), (2, 3), (1, 2), (1, 1)])
def test_index_with_int_dask_array(x_chunks, idx_chunks):
# test data is crafted to stress use cases:
# - pick from different chunks of x out of order
# - a chunk of x contains no matches
# - only one chunk of x
x = cupy.array(
[[10, 20, 30, 40, 50], [60, 70, 80, 90, 100], [110, 120, 130, 140, 150]]
)
idx = cupy.array([3, 0, 1])
expect = cupy.array([[40, 10, 20], [90, 60, 70], [140, 110, 120]])
x = da.from_array(x, chunks=x_chunks)
if idx_chunks is not None:
idx = da.from_array(idx, chunks=idx_chunks)
assert_eq(x[:, idx], expect)
assert_eq(x.T[idx, :], expect.T)
@pytest.mark.parametrize("idx_chunks", [None, 3, 2, 1])
@pytest.mark.parametrize("x_chunks", [(3, 5), (2, 3), (1, 2), (1, 1)])
def METHOD_NAME(x_chunks, idx_chunks):
# test data is crafted to stress use cases:
# - pick from different chunks of x out of order
# - a chunk of x contains no matches
# - only one chunk of x
x = cupy.array(
[[10, 20, 30, 40, 50], [60, 70, 80, 90, 100], [110, 120, 130, 140, 150]]
)
orig_idx = np.array([3, 0, 1])
expect = cupy.array([[40, 10, 20], [90, 60, 70], [140, 110, 120]])
if x_chunks is not None:
x = da.from_array(x, chunks=x_chunks)
if idx_chunks is not None:
idx = da.from_array(orig_idx, chunks=idx_chunks)
else:
idx = orig_idx
assert_eq(x[:, idx], expect)
assert_eq(x.T[idx, :], expect.T)
# CuPy index
orig_idx = cupy.array(orig_idx)
if idx_chunks is not None:
idx = da.from_array(orig_idx, chunks=idx_chunks)
else:
idx = orig_idx
assert_eq(x[:, idx], expect)
assert_eq(x.T[idx, :], expect.T)
@pytest.mark.parametrize("chunks", [1, 2, 3])
def test_index_with_int_dask_array_0d(chunks):
# Slice by 0-dimensional array
x = da.from_array(cupy.array([[10, 20, 30], [40, 50, 60]]), chunks=chunks)
idx0 = da.from_array(1, chunks=1)
assert_eq(x[idx0, :], x[1, :])
assert_eq(x[:, idx0], x[:, 1])
# CuPy index
idx0 = da.from_array(cupy.array(1), chunks=1)
assert_eq(x[idx0, :], x[1, :])
assert_eq(x[:, idx0], x[:, 1])
@pytest.mark.skip("dask.Array.nonzero() doesn't support non-NumPy arrays yet")
@pytest.mark.parametrize("chunks", [1, 2, 3, 4, 5])
def test_index_with_int_dask_array_nanchunks(chunks):
# Slice by array with nan-sized chunks
a = da.from_array(cupy.arange(-2, 3), chunks=chunks)
assert_eq(a[a.nonzero()], cupy.array([-2, -1, 1, 2]))
# Edge case: the nan-sized chunks resolve to size 0
a = da.zeros_like(cupy.array(()), shape=5, chunks=chunks)
assert_eq(a[a.nonzero()], cupy.array([]))
@pytest.mark.parametrize("chunks", [2, 4])
def test_index_with_int_dask_array_negindex(chunks):
a = da.arange(4, chunks=chunks, like=cupy.array(()))
idx = da.from_array([-1, -4], chunks=1)
assert_eq(a[idx], cupy.array([3, 0]))
# CuPy index
idx = da.from_array(cupy.array([-1, -4]), chunks=1)
assert_eq(a[idx], cupy.array([3, 0]))
@pytest.mark.parametrize("chunks", [2, 4])
def test_index_with_int_dask_array_indexerror(chunks):
a = da.arange(4, chunks=chunks, like=cupy.array(()))
idx = da.from_array([4], chunks=1)
with pytest.raises(IndexError):
a[idx].compute()
idx = da.from_array([-5], chunks=1)
with pytest.raises(IndexError):
a[idx].compute()
# CuPy indices
idx = da.from_array(cupy.array([4]), chunks=1)
with pytest.raises(IndexError):
a[idx].compute()
idx = da.from_array(cupy.array([-5]), chunks=1)
with pytest.raises(IndexError):
a[idx].compute()
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"]
)
def test_index_with_int_dask_array_dtypes(dtype):
a = da.from_array(cupy.array([10, 20, 30, 40]), chunks=-1)
idx = da.from_array(np.array([1, 2]).astype(dtype), chunks=1)
assert_eq(a[idx], cupy.array([20, 30]))
# CuPy index
idx = da.from_array(cupy.array([1, 2]).astype(dtype), chunks=1)
assert_eq(a[idx], cupy.array([20, 30]))
def test_index_with_int_dask_array_nocompute():
"""Test that when the indices are a dask array
they are not accidentally computed
"""
def crash():
raise NotImplementedError()
x = da.arange(5, chunks=-1, like=cupy.array(()))
idx = da.Array({("x", 0): (crash,)}, name="x", chunks=((2,),), dtype=np.int64)
result = x[idx]
with pytest.raises(NotImplementedError):
result.compute() | null |
672 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import subprocess
""" crashme_methods
utility / standard functions used when executing crashme
these methods are used by native mode / unittest cases
"""
def prepare_config(bot):
""" Create the config file crash-me needs to execute """
output_filename= "%s/crashme.cfg" % (bot.system_manager.workdir)
# remove the existing configuration file to start fresh
if os.path.exists(output_filename):
logging.info("Removing %s" % output_filename)
os.remove(output_filename)
output_file= open(output_filename,"w")
# don't support '+' for concatenation
output_file.writelines("func_extra_concat_as_+=no\n")
# new boost libraries are causing us to put these limits in, needs investigation
output_file.writelines("max_text_size=1048576\n")
output_file.writelines("where_string_size=1048576\n")
output_file.writelines("select_string_size=1048576\n")
output_file.flush()
output_file.close()
def METHOD_NAME(test_cmd, test_executor, servers):
""" Execute the commandline and return the result.
We use subprocess as we can pass os.environ dicts and whatnot
"""
# prepare our config file
bot = test_executor
prepare_config(bot)
output_filename= "%s/crashme.cfg" % (bot.system_manager.workdir)
testcase_name = bot.current_testcase.fullname
crashme_outfile = os.path.join(bot.logdir,'crashme.out')
crashme_output = open(crashme_outfile,'w')
crashme_cmd = test_cmd + " --config-file=%s" %(output_filename)
bot.logging.info("Executing crash-me: %s" %(crashme_cmd))
bot.logging.info("This may take some time. Please be patient...")
crashme_subproc = subprocess.Popen( crashme_cmd
, shell=True
, cwd=os.path.join(bot.system_manager.testdir, 'sql-bench')
, env=bot.working_environment
, stdout = crashme_output
, stderr = subprocess.STDOUT
)
crashme_subproc.wait()
retcode = crashme_subproc.returncode
crashme_output.close()
crashme_file = open(crashme_outfile,'r')
output = ''.join(crashme_file.readlines())
bot.logging.debug(output)
crashme_file.close()
bot.logging.debug("crashme_retcode: %d" %(retcode))
bot.current_test_retcode = retcode
bot.current_test_output = output
test_status = process_crashme_output(bot)
return test_status, retcode, bot.current_test_output
def process_crashme_output(bot):
if bot.current_test_retcode == 0:
output_data = bot.current_test_output.split('\n')
if output_data[0].startswith('Using an array as a reference is deprecated'):
file_name_idx = 6
else:
file_name_idx = 3
infile_name = output_data[file_name_idx].split(':')[1].strip()
output_data = None
inf= open(infile_name, "r")
inlines= inf.readlines()
error_flag= False
in_error_section = False
# crash-me is quite chatty and we don't normally want to sift
# through ALL of that stuff. We do allow seeing it via --verbose
if not bot.verbose:
bot.current_test_output = ''
for inline in inlines:
if in_error_section and not inline.strip().startswith('#'):
in_error_section = False
if '=error' in inline:
error_flag= True
in_error_section= True
if in_error_section:
bot.current_test_output += inline
inf.close()
if not error_flag:
if not bot.verbose:
bot.current_test_output = None
return 'pass'
return 'fail'
| null |
673 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateLoadBalancerHTTPSListenerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'CreateLoadBalancerHTTPSListener','ens')
self.set_method('POST')
def get_ListenerForward(self): # String
return self.get_query_params().get('ListenerForward')
def set_ListenerForward(self, ListenerForward): # String
self.add_query_param('ListenerForward', ListenerForward)
def get_HealthCheckTimeout(self): # Integer
return self.get_query_params().get('HealthCheckTimeout')
def set_HealthCheckTimeout(self, HealthCheckTimeout): # Integer
self.add_query_param('HealthCheckTimeout', HealthCheckTimeout)
def get_HealthCheckURI(self): # String
return self.get_query_params().get('HealthCheckURI')
def set_HealthCheckURI(self, HealthCheckURI): # String
self.add_query_param('HealthCheckURI', HealthCheckURI)
def get_HealthCheck(self): # String
return self.get_query_params().get('HealthCheck')
def set_HealthCheck(self, HealthCheck): # String
self.add_query_param('HealthCheck', HealthCheck)
def get_Cookie(self): # String
return self.get_query_params().get('Cookie')
def set_Cookie(self, Cookie): # String
self.add_query_param('Cookie', Cookie)
def get_HealthCheckMethod(self): # String
return self.get_query_params().get('HealthCheckMethod')
def set_HealthCheckMethod(self, HealthCheckMethod): # String
self.add_query_param('HealthCheckMethod', HealthCheckMethod)
def get_HealthCheckDomain(self): # String
return self.get_query_params().get('HealthCheckDomain')
def set_HealthCheckDomain(self, HealthCheckDomain): # String
self.add_query_param('HealthCheckDomain', HealthCheckDomain)
def get_RequestTimeout(self): # Integer
return self.get_query_params().get('RequestTimeout')
def set_RequestTimeout(self, RequestTimeout): # Integer
self.add_query_param('RequestTimeout', RequestTimeout)
def get_LoadBalancerId(self): # String
return self.get_query_params().get('LoadBalancerId')
def set_LoadBalancerId(self, LoadBalancerId): # String
self.add_query_param('LoadBalancerId', LoadBalancerId)
def get_HealthCheckInterval(self): # Integer
return self.get_query_params().get('HealthCheckInterval')
def set_HealthCheckInterval(self, HealthCheckInterval): # Integer
self.add_query_param('HealthCheckInterval', HealthCheckInterval)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_UnhealthyThreshold(self): # Integer
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self, UnhealthyThreshold): # Integer
self.add_query_param('UnhealthyThreshold', UnhealthyThreshold)
def get_HealthyThreshold(self): # Integer
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self, HealthyThreshold): # Integer
self.add_query_param('HealthyThreshold', HealthyThreshold)
def get_Scheduler(self): # String
return self.get_query_params().get('Scheduler')
def set_Scheduler(self, Scheduler): # String
self.add_query_param('Scheduler', Scheduler)
def get_ForwardPort(self): # Integer
return self.get_query_params().get('ForwardPort')
def set_ForwardPort(self, ForwardPort): # Integer
self.add_query_param('ForwardPort', ForwardPort)
def get_CookieTimeout(self): # Integer
return self.get_query_params().get('CookieTimeout')
def set_CookieTimeout(self, CookieTimeout): # Integer
self.add_query_param('CookieTimeout', CookieTimeout)
def get_StickySessionType(self): # String
return self.get_query_params().get('StickySessionType')
def set_StickySessionType(self, StickySessionType): # String
self.add_query_param('StickySessionType', StickySessionType)
def get_ListenerPort(self): # Integer
return self.get_query_params().get('ListenerPort')
def METHOD_NAME(self, ListenerPort): # Integer
self.add_query_param('ListenerPort', ListenerPort)
def get_ServerCertificateId(self): # String
return self.get_query_params().get('ServerCertificateId')
def set_ServerCertificateId(self, ServerCertificateId): # String
self.add_query_param('ServerCertificateId', ServerCertificateId)
def get_IdleTimeout(self): # Integer
return self.get_query_params().get('IdleTimeout')
def set_IdleTimeout(self, IdleTimeout): # Integer
self.add_query_param('IdleTimeout', IdleTimeout)
def get_HealthCheckConnectPort(self): # Integer
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self, HealthCheckConnectPort): # Integer
self.add_query_param('HealthCheckConnectPort', HealthCheckConnectPort)
def get_HealthCheckHttpCode(self): # String
return self.get_query_params().get('HealthCheckHttpCode')
def set_HealthCheckHttpCode(self, HealthCheckHttpCode): # String
self.add_query_param('HealthCheckHttpCode', HealthCheckHttpCode) | null |
674 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from collections import Counter
from typing import Dict, List, Optional
import torch
import torch.nn.functional as F
from reagent.core.tracker import observable
from reagent.evaluation.cpe import CpeDetails, CpeEstimateSet
from reagent.evaluation.doubly_robust_estimator import DoublyRobustEstimator
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.sequential_doubly_robust_estimator import (
SequentialDoublyRobustEstimator,
)
from reagent.evaluation.weighted_sequential_doubly_robust_estimator import (
WeightedSequentialDoublyRobustEstimator,
)
logger = logging.getLogger(__name__)
def get_tensor(x, dtype=None):
"""
Input:
- x: list or a sequence
- dtype: target data type of the elements in tensor [optional]
It will be inferred automatically if not provided.
Output:
Tensor given a list or a sequence.
If the input is None, it returns None
If the input is a tensor it returns the tensor.
If type is provides the output Tensor will have that type
"""
if x is None:
return None
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
if dtype is not None:
x = x.type(dtype)
return x
def get_metrics_to_score(metric_reward_values: Optional[Dict[str, float]]) -> List[str]:
if metric_reward_values is None:
return []
return sorted([*metric_reward_values.keys()])
@observable(cpe_details=CpeDetails)
class Evaluator:
NUM_J_STEPS_FOR_MAGIC_ESTIMATOR = 25
def __init__(self, action_names, gamma, model, metrics_to_score=None) -> None:
self.action_names = action_names
self.metrics_to_score = metrics_to_score
self.gamma = gamma
self.model = model
self.doubly_robust_estimator = DoublyRobustEstimator()
self.sequential_doubly_robust_estimator = SequentialDoublyRobustEstimator(gamma)
self.weighted_sequential_doubly_robust_estimator = (
WeightedSequentialDoublyRobustEstimator(gamma)
)
def METHOD_NAME(self, edp: EvaluationDataPage) -> CpeDetails:
cpe_details = CpeDetails()
cpe_details.reward_estimates = self.score_cpe("Reward", edp)
if (
self.metrics_to_score is not None
and edp.logged_metrics is not None
and self.action_names is not None
):
for i, metric in enumerate(self.metrics_to_score):
logger.info(
"--------- Running CPE on metric: {} ---------".format(metric)
)
metric_reward_edp = edp.set_metric_as_reward(i, len(self.action_names))
cpe_details.metric_estimates[metric] = self.score_cpe(
metric, metric_reward_edp
)
if self.action_names is not None:
if edp.optimal_q_values is not None:
value_means = edp.optimal_q_values.mean(dim=0)
cpe_details.q_value_means = {
action: float(value_means[i])
for i, action in enumerate(self.action_names)
}
# pyre-ignore [16]: `Optional` has no attribute `std`
value_stds = edp.optimal_q_values.std(dim=0)
cpe_details.q_value_stds = {
action: float(value_stds[i])
for i, action in enumerate(self.action_names)
}
if edp.eval_action_idxs is not None:
cpe_details.action_distribution = {
# pyre-ignore [16]: `bool` has no attribute `sum`
action: float((edp.eval_action_idxs == i).sum())
# pyre-ignore [16]: `Optional` has no attribute `shape`
/ edp.eval_action_idxs.shape[0]
for i, action in enumerate(self.action_names)
}
# pyre-fixme[16]: `Evaluator` has no attribute `notify_observers`.
self.notify_observers(cpe_details=cpe_details)
return cpe_details
def score_cpe(self, metric_name, edp: EvaluationDataPage):
(
direct_method,
inverse_propensity,
doubly_robust,
) = self.doubly_robust_estimator.estimate(edp)
sequential_doubly_robust = self.sequential_doubly_robust_estimator.estimate(edp)
weighted_doubly_robust = (
self.weighted_sequential_doubly_robust_estimator.estimate(
edp, num_j_steps=1, whether_self_normalize_importance_weights=True
)
)
magic = self.weighted_sequential_doubly_robust_estimator.estimate(
edp,
num_j_steps=Evaluator.NUM_J_STEPS_FOR_MAGIC_ESTIMATOR,
whether_self_normalize_importance_weights=True,
)
return CpeEstimateSet(
direct_method=direct_method,
inverse_propensity=inverse_propensity,
doubly_robust=doubly_robust,
sequential_doubly_robust=sequential_doubly_robust,
weighted_doubly_robust=weighted_doubly_robust,
magic=magic,
)
def _get_batch_logged_actions(self, arr):
action_counter = Counter()
for actions in arr:
# torch.max() returns the element and the index.
# The latter is the argmax equivalent
_, argmax = torch.max(actions, dim=1)
# Counter object does not work well with Tensors, hence casting back to numpy
action_counter.update(Counter(argmax.numpy()))
total_actions = 1.0 * sum(action_counter.values())
return (
{
action_name: (action_counter[i] / total_actions)
for i, action_name in enumerate(self.action_names)
},
{
action_name: action_counter[i]
for i, action_name in enumerate(self.action_names)
},
)
def get_target_distribution_error(
self, actions, target_distribution, actual_distribution
):
"""Calculate MSE between actual and target action distribution."""
if not target_distribution:
return None
error = 0
for i, action in enumerate(actions):
error += (target_distribution[i] - actual_distribution[action]) ** 2
return error / len(actions)
@staticmethod
def huberLoss(label, output):
if abs(label - output) > 1:
return abs(label - output) - 0.5
else:
return 0.5 * (label - output) * (label - output) | null |
675 | from typing import List
import pytest
from pytest_mock import MockerFixture
from lightly.active_learning.config.selection_config import SelectionConfig
from lightly.api import ApiWorkflowClient, api_workflow_selection
from lightly.openapi_generated.swagger_client.models import (
JobResultType,
JobState,
JobStatusData,
JobStatusDataResult,
SamplingCreateRequest,
SamplingMethod,
TagData,
)
from tests.api_workflow import utils
def _get_tags(dataset_id: str, tag_name: str = "just-a-tag") -> List[TagData]:
return [
TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name=tag_name,
tot_size=4,
created_at=1577836800,
changes=[],
)
]
def METHOD_NAME(tag_name: str = "new-tag") -> SamplingCreateRequest:
return SamplingCreateRequest(
new_tag_name=tag_name,
method=SamplingMethod.RANDOM,
config={},
)
def test_selection__tag_exists(mocker: MockerFixture) -> None:
tag_name = "some-tag"
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=_get_tags(dataset_id=utils.generate_id(), tag_name=tag_name),
)
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name=tag_name))
assert (
str(exception.value) == "There already exists a tag with tag_name some-tag"
)
def test_selection__no_tags(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=[])
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == "There exists no initial-tag for this dataset."
def test_selection(mocker: MockerFixture) -> None:
tag_name = "some-tag"
dataset_id = utils.generate_id()
mocker.patch("time.sleep")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=METHOD_NAME(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = utils.generate_id()
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
return_value=JobStatusData(
id=utils.generate_id(),
wait_time_till_next_poll=1,
created_at=0,
status=JobState.FINISHED,
result=JobStatusDataResult(type=JobResultType.SAMPLING, data="new-tag-id"),
)
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
mocked_tags_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._tags_api = mocked_tags_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
client.selection(selection_config=SelectionConfig(name=tag_name))
mocked_get_job_status.assert_called_once()
mocked_tags_api.get_tag_by_tag_id.assert_called_once_with(
dataset_id=dataset_id, tag_id="new-tag-id"
)
def test_selection__job_failed(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
job_id = "some-job-id"
mocker.patch("time.sleep")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=METHOD_NAME(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = job_id
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
return_value=JobStatusData(
id=utils.generate_id(),
wait_time_till_next_poll=1,
created_at=0,
status=JobState.FAILED,
error="bad job",
)
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == (
"Selection job with job_id some-job-id failed with error bad job"
)
def test_selection__too_many_errors(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
job_id = "some-job-id"
mocker.patch("time.sleep")
mocked_print = mocker.patch("builtins.print")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=METHOD_NAME(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = job_id
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
side_effect=[Exception("surprise!") for _ in range(20)]
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
with pytest.raises(Exception) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == "surprise!"
mocked_print.assert_called_once_with(
"Selection job with job_id some-job-id could not be started "
"because of error: surprise!"
)
def test_upload_scores(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tags = _get_tags(dataset_id=dataset_id, tag_name="initial-tag")
tag_id = tags[0].id
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=tags,
)
mocker.patch.object(
api_workflow_selection, "_parse_active_learning_scores", return_value=[1]
)
mocked_api = mocker.MagicMock()
mocked_create_score = mocked_api.create_or_update_active_learning_score_by_tag_id
client = ApiWorkflowClient()
client._scores_api = mocked_api
client._dataset_id = dataset_id
mocked_create_score.reset_mock()
client.upload_scores(al_scores={"score_type": [1, 2, 3]}, query_tag_id=tag_id)
mocked_create_score.assert_called_once()
kwargs = mocked_create_score.call_args[1]
assert kwargs.get("tag_id") == tag_id | null |
676 | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Models representing stack component flavors."""
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
from uuid import UUID
from pydantic import BaseModel, Field
from zenml.enums import StackComponentType
from zenml.models.base_models import (
BaseRequestModel,
BaseResponseModel,
update_model,
)
from zenml.models.constants import STR_FIELD_MAX_LENGTH
from zenml.models.filter_models import WorkspaceScopedFilterModel
from zenml.models.service_connector_models import (
ServiceConnectorRequirements,
)
if TYPE_CHECKING:
from zenml.models import UserResponseModel, WorkspaceResponseModel
# ---- #
# BASE #
# ---- #
class FlavorBaseModel(BaseModel):
"""Base model for stack component flavors."""
name: str = Field(
title="The name of the Flavor.",
max_length=STR_FIELD_MAX_LENGTH,
)
type: StackComponentType = Field(title="The type of the Flavor.")
config_schema: Dict[str, Any] = Field(
title="The JSON schema of this flavor's corresponding configuration.",
)
connector_type: Optional[str] = Field(
default=None,
title="The type of the connector that this flavor uses.",
max_length=STR_FIELD_MAX_LENGTH,
)
connector_resource_type: Optional[str] = Field(
default=None,
title="The resource type of the connector that this flavor uses.",
max_length=STR_FIELD_MAX_LENGTH,
)
connector_resource_id_attr: Optional[str] = Field(
default=None,
title="The name of an attribute in the stack component configuration "
"that plays the role of resource ID when linked to a service connector.",
max_length=STR_FIELD_MAX_LENGTH,
)
source: str = Field(
title="The path to the module which contains this Flavor.",
max_length=STR_FIELD_MAX_LENGTH,
)
integration: Optional[str] = Field(
title="The name of the integration that the Flavor belongs to.",
max_length=STR_FIELD_MAX_LENGTH,
)
logo_url: Optional[str] = Field(
default=None,
title="Optionally, a url pointing to a png,"
"svg or jpg can be attached.",
)
docs_url: Optional[str] = Field(
default=None,
title="Optionally, a url pointing to docs, within docs.zenml.io.",
)
sdk_docs_url: Optional[str] = Field(
default=None,
title="Optionally, a url pointing to SDK docs,"
"within sdkdocs.zenml.io.",
)
is_custom: bool = Field(
title="Whether or not this flavor is a custom, user created flavor.",
default=True,
)
@property
def METHOD_NAME(self) -> Optional[ServiceConnectorRequirements]:
"""Returns the connector requirements for the flavor.
Returns:
The connector requirements for the flavor.
"""
if not self.connector_resource_type:
return None
return ServiceConnectorRequirements(
connector_type=self.connector_type,
resource_type=self.connector_resource_type,
resource_id_attr=self.connector_resource_id_attr,
)
# -------- #
# RESPONSE #
# -------- #
class FlavorResponseModel(FlavorBaseModel, BaseResponseModel):
"""Response model for stack component flavors."""
ANALYTICS_FIELDS: ClassVar[List[str]] = [
"id",
"type",
"integration",
]
user: Union["UserResponseModel", None] = Field(
title="The user that created this resource.", nullable=True
)
workspace: Optional["WorkspaceResponseModel"] = Field(
title="The project of this resource."
)
# ------ #
# FILTER #
# ------ #
class FlavorFilterModel(WorkspaceScopedFilterModel):
"""Model to enable advanced filtering of all Flavors."""
name: Optional[str] = Field(
default=None,
description="Name of the flavor",
)
type: Optional[str] = Field(
default=None,
description="Stack Component Type of the stack flavor",
)
integration: Optional[str] = Field(
default=None,
description="Integration associated with the flavor",
)
workspace_id: Optional[Union[UUID, str]] = Field(
default=None, description="Workspace of the stack"
)
user_id: Optional[Union[UUID, str]] = Field(
default=None, description="User of the stack"
)
# ------- #
# REQUEST #
# ------- #
class FlavorRequestModel(FlavorBaseModel, BaseRequestModel):
"""Request model for stack component flavors."""
ANALYTICS_FIELDS: ClassVar[List[str]] = [
"type",
"integration",
]
user: Optional[UUID] = Field(
default=None, title="The id of the user that created this resource."
)
workspace: Optional[UUID] = Field(
default=None, title="The workspace to which this resource belongs."
)
# ------- #
# Update #
# ------- #
@update_model
class FlavorUpdateModel(FlavorRequestModel):
"""Update model for flavors.""" | null |
677 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmartag.endpoint import endpoint_data
class ModifyHealthCheckRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Smartag', '2018-03-13', 'ModifyHealthCheck','smartag')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProbeInterval(self): # Integer
return self.get_query_params().get('ProbeInterval')
def set_ProbeInterval(self, ProbeInterval): # Integer
self.add_query_param('ProbeInterval', ProbeInterval)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DstPort(self): # Integer
return self.get_query_params().get('DstPort')
def set_DstPort(self, DstPort): # Integer
self.add_query_param('DstPort', DstPort)
def get_Description(self): # String
return self.get_query_params().get('Description')
def METHOD_NAME(self, Description): # String
self.add_query_param('Description', Description)
def get_HcInstanceId(self): # String
return self.get_query_params().get('HcInstanceId')
def set_HcInstanceId(self, HcInstanceId): # String
self.add_query_param('HcInstanceId', HcInstanceId)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_FailCountThreshold(self): # Integer
return self.get_query_params().get('FailCountThreshold')
def set_FailCountThreshold(self, FailCountThreshold): # Integer
self.add_query_param('FailCountThreshold', FailCountThreshold)
def get_ProbeTimeout(self): # Integer
return self.get_query_params().get('ProbeTimeout')
def set_ProbeTimeout(self, ProbeTimeout): # Integer
self.add_query_param('ProbeTimeout', ProbeTimeout)
def get_RttFailThreshold(self): # Integer
return self.get_query_params().get('RttFailThreshold')
def set_RttFailThreshold(self, RttFailThreshold): # Integer
self.add_query_param('RttFailThreshold', RttFailThreshold)
def get_RttThreshold(self): # Integer
return self.get_query_params().get('RttThreshold')
def set_RttThreshold(self, RttThreshold): # Integer
self.add_query_param('RttThreshold', RttThreshold)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_DstIpAddr(self): # String
return self.get_query_params().get('DstIpAddr')
def set_DstIpAddr(self, DstIpAddr): # String
self.add_query_param('DstIpAddr', DstIpAddr)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_SrcIpAddr(self): # String
return self.get_query_params().get('SrcIpAddr')
def set_SrcIpAddr(self, SrcIpAddr): # String
self.add_query_param('SrcIpAddr', SrcIpAddr)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_SmartAGId(self): # String
return self.get_query_params().get('SmartAGId')
def set_SmartAGId(self, SmartAGId): # String
self.add_query_param('SmartAGId', SmartAGId)
def get_SrcPort(self): # Integer
return self.get_query_params().get('SrcPort')
def set_SrcPort(self, SrcPort): # Integer
self.add_query_param('SrcPort', SrcPort)
def get_ProbeCount(self): # Integer
return self.get_query_params().get('ProbeCount')
def set_ProbeCount(self, ProbeCount): # Integer
self.add_query_param('ProbeCount', ProbeCount) | null |
678 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from __future__ import annotations
from petstore_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]
class ShapeTypeEnums:
@schemas.classproperty
def QUADRILATERAL(cls) -> typing.Literal["Quadrilateral"]:
return ShapeType.validate("Quadrilateral")
@dataclasses.dataclass(frozen=True)
class ShapeType(
schemas.Schema
):
types: typing.FrozenSet[typing.Type] = frozenset({
str,
})
enum_value_to_name: typing.Mapping[typing.Union[int, float, str, schemas.Bool, None], str] = dataclasses.field(
default_factory=lambda: {
"Quadrilateral": "QUADRILATERAL",
}
)
enums = ShapeTypeEnums
@typing.overload
@classmethod
def validate(
cls,
arg: typing.Literal["Quadrilateral"],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal["Quadrilateral"]: ...
@typing.overload
@classmethod
def validate(
cls,
arg: str,
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal["Quadrilateral",]: ...
@classmethod
def validate(
cls,
arg,
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal[
"Quadrilateral",
]:
validated_arg = super().validate_base(
arg,
configuration=configuration,
)
return typing.cast(typing.Literal[
"Quadrilateral",
],
validated_arg
)
QuadrilateralType: typing_extensions.TypeAlias = schemas.StrSchema
Properties = typing.TypedDict(
'Properties',
{
"shapeType": typing.Type[ShapeType],
"quadrilateralType": typing.Type[QuadrilateralType],
}
)
class QuadrilateralInterfaceDict(schemas.immutabledict[str, str]):
__required_keys__: typing.FrozenSet[str] = frozenset({
"quadrilateralType",
"shapeType",
})
__optional_keys__: typing.FrozenSet[str] = frozenset({
})
def __new__(
cls,
*,
quadrilateralType: str,
shapeType: typing.Literal[
"Quadrilateral"
],
configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,
**kwargs: schemas.INPUT_TYPES_ALL,
):
arg_: typing.Dict[str, typing.Any] = {
"quadrilateralType": quadrilateralType,
"shapeType": shapeType,
}
arg_.update(kwargs)
used_arg_ = typing.cast(QuadrilateralInterfaceDictInput, arg_)
return QuadrilateralInterface.validate(used_arg_, configuration=configuration_)
@staticmethod
def METHOD_NAME(
arg: typing.Union[
QuadrilateralInterfaceDictInput,
QuadrilateralInterfaceDict
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> QuadrilateralInterfaceDict:
return QuadrilateralInterface.validate(arg, configuration=configuration)
@property
def quadrilateralType(self) -> str:
return typing.cast(
str,
self.__getitem__("quadrilateralType")
)
@property
def shapeType(self) -> typing.Literal["Quadrilateral"]:
return typing.cast(
typing.Literal["Quadrilateral"],
self.__getitem__("shapeType")
)
def get_additional_property_(self, name: str) -> typing.Union[schemas.OUTPUT_BASE_TYPES, schemas.Unset]:
schemas.raise_if_key_known(name, self.__required_keys__, self.__optional_keys__)
return self.get(name, schemas.unset)
QuadrilateralInterfaceDictInput = typing.Mapping[str, schemas.INPUT_TYPES_ALL]
@dataclasses.dataclass(frozen=True)
class QuadrilateralInterface(
schemas.AnyTypeSchema[QuadrilateralInterfaceDict, typing.Tuple[schemas.OUTPUT_BASE_TYPES, ...]],
):
"""NOTE: This class is auto generated by OpenAPI JSON Schema Generator.
Ref: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
Do not edit the class manually.
"""
# any type
required: typing.FrozenSet[str] = frozenset({
"quadrilateralType",
"shapeType",
})
properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
schemas.immutabledict: QuadrilateralInterfaceDict,
}
)
| null |
679 | from rest_framework import serializers as ser
from addons.osfstorage.models import OsfStorageFile
from api.base.serializers import (
IDField,
JSONAPISerializer,
LinksField,
RelationshipField,
TypeField,
VersionedDateTimeField,
)
from api.base.utils import absolute_reverse
from api.files.serializers import get_file_download_link
from api.nodes.serializers import NodeSerializer
class MeetingSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'name',
'location',
])
id = IDField(source='endpoint', read_only=True)
type = TypeField()
name = ser.CharField(read_only=True)
location = ser.CharField(read_only=True)
start_date = VersionedDateTimeField(read_only=True)
end_date = VersionedDateTimeField(read_only=True)
info_url = ser.URLField(read_only=True)
logo_url = ser.URLField(read_only=True)
field_names = ser.DictField(read_only=True)
submissions_count = ser.SerializerMethodField()
active = ser.BooleanField(read_only=True)
type_one_submission_email = ser.SerializerMethodField()
type_two_submission_email = ser.SerializerMethodField()
is_accepting_type_one = ser.BooleanField(source='poster', read_only=True)
is_accepting_type_two = ser.BooleanField(source='talk', read_only=True)
submissions = RelationshipField(
related_view='meetings:meeting-submissions',
related_view_kwargs={'meeting_id': '<endpoint>'},
related_meta={'count': 'get_submissions_count'},
)
links = LinksField({
'self': 'get_absolute_url',
'html': 'get_absolute_html_url',
})
def format_submission_email(self, obj, submission_field):
if obj.active:
return '{}-{}@osf.io'.format(obj.endpoint, obj.field_names.get(submission_field))
return ''
def get_type_one_submission_email(self, obj):
return self.format_submission_email(obj, 'submission1')
def get_type_two_submission_email(self, obj):
return self.format_submission_email(obj, 'submission2')
def get_absolute_url(self, obj):
return absolute_reverse('meetings:meeting-detail', kwargs={'meeting_id': obj.endpoint})
def get_submissions_count(self, obj):
if getattr(obj, 'submissions_count', None):
return obj.submissions_count
else:
return obj.valid_submissions.count()
class Meta:
type_ = 'meetings'
class MeetingSubmissionSerializer(NodeSerializer):
filterable_fields = frozenset([
'title',
'meeting_category',
'author_name',
])
author_name = ser.SerializerMethodField()
download_count = ser.SerializerMethodField()
meeting_category = ser.SerializerMethodField()
author = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': 'get_author_id'},
read_only=True,
)
links = LinksField({
'self': 'get_absolute_url',
'html': 'get_absolute_html_url',
'download': 'get_download_link',
})
def get_author(self, obj):
contrib_queryset = obj.contributor_set.filter(visible=True).order_by('_order')
if contrib_queryset:
return contrib_queryset.first().user
return None
def get_author_id(self, obj):
# Author guid is annotated on queryset in ListView
if getattr(obj, 'author_id', None):
return obj.author_id
else:
author = self.get_author(obj)
return author._id if author else None
def METHOD_NAME(self, obj):
"""
Returns the first bibliographic contributor's family_name if it exists.
Otherwise, return its fullname.
"""
if getattr(obj, 'author_name', None):
# Field is annotated on queryset in ListView for filtering purposes
return obj.author_name
else:
author = self.get_author(obj)
if author:
return author.family_name if author.family_name else author.fullname
return None
def get_meeting_category(self, obj):
"""
Returns the existance of a certain tag on the node. If the first submission type tag exists,
return that. Otherwise, return the second submission type tag as a default.
"""
if getattr(obj, 'meeting_category', None):
# Field is annotated on queryset in ListView for filtering purposes
return obj.meeting_category
else:
meeting = self.context['meeting']
submission1_name = meeting.field_names.get('submission1')
submission2_name = meeting.field_names.get('submission2')
submission_tags = obj.tags.values_list('name', flat=True)
return submission1_name if submission1_name in submission_tags else submission2_name
def get_download_count(self, obj):
"""
Return the download counts of the first osfstorage file
"""
if getattr(obj, 'download_count', None):
return obj.download_count or 0
else:
submission_file = self.get_submission_file(obj)
return submission_file.get_download_count() if submission_file else None
def get_download_link(self, obj):
"""
First osfstoragefile on a node - if the node was created for a meeting,
assuming its first file is the meeting submission.
"""
if getattr(obj, 'file_id', None):
submission_file = OsfStorageFile.objects.get(id=obj.file_id)
else:
submission_file = self.get_submission_file(obj)
if submission_file:
return get_file_download_link(submission_file)
return None
def get_submission_file(self, obj):
return obj.files.order_by('created').first()
def get_absolute_url(self, obj):
meeting_endpoint = self.context['meeting'].endpoint
return absolute_reverse(
'meetings:meeting-submission-detail',
kwargs={
'meeting_id': meeting_endpoint,
'submission_id': obj._id,
},
)
# Overrides BaseAPISerializer
def parse_sparse_fields(self):
"""
Since meeting submissions are actually nodes, we are subclassing the NodeSerializer,
but we only want to return a subset of fields specific to meetings
"""
fieldset = [
'date_created',
'title',
'author',
'author_name',
'meeting_category',
'download_count',
'submission_file',
]
for field_name in list(self.fields.keys()):
if field_name in ('id', 'links', 'type'):
# MUST return these fields
continue
if field_name not in fieldset:
self.fields.pop(field_name)
return super().parse_sparse_fields()
class Meta:
type_ = 'meeting-submissions' | null |
680 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmc.endpoint import endpoint_data
class ModifyReplicationJobAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'smc', '2019-06-01', 'ModifyReplicationJobAttribute','smc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TargetType(self):
return self.get_query_params().get('TargetType')
def set_TargetType(self,TargetType):
self.add_query_param('TargetType',TargetType)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Frequency(self):
return self.get_query_params().get('Frequency')
def set_Frequency(self,Frequency):
self.add_query_param('Frequency',Frequency)
def get_JobId(self):
return self.get_query_params().get('JobId')
def set_JobId(self,JobId):
self.add_query_param('JobId',JobId)
def get_ImageName(self):
return self.get_query_params().get('ImageName')
def set_ImageName(self,ImageName):
self.add_query_param('ImageName',ImageName)
def get_SystemDiskSize(self):
return self.get_query_params().get('SystemDiskSize')
def set_SystemDiskSize(self,SystemDiskSize):
self.add_query_param('SystemDiskSize',SystemDiskSize)
def get_InstanceType(self):
return self.get_query_params().get('InstanceType')
def set_InstanceType(self,InstanceType):
self.add_query_param('InstanceType',InstanceType)
def get_ContainerRepository(self):
return self.get_query_params().get('ContainerRepository')
def set_ContainerRepository(self,ContainerRepository):
self.add_query_param('ContainerRepository',ContainerRepository)
def get_ContainerTag(self):
return self.get_query_params().get('ContainerTag')
def set_ContainerTag(self,ContainerTag):
self.add_query_param('ContainerTag',ContainerTag)
def get_ContainerNamespace(self):
return self.get_query_params().get('ContainerNamespace')
def set_ContainerNamespace(self,ContainerNamespace):
self.add_query_param('ContainerNamespace',ContainerNamespace)
def get_LaunchTemplateId(self):
return self.get_query_params().get('LaunchTemplateId')
def set_LaunchTemplateId(self,LaunchTemplateId):
self.add_query_param('LaunchTemplateId',LaunchTemplateId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_SystemDiskParts(self):
return self.get_query_params().get('SystemDiskPart')
def METHOD_NAME(self, SystemDiskParts):
for depth1 in range(len(SystemDiskParts)):
if SystemDiskParts[depth1].get('SizeBytes') is not None:
self.add_query_param('SystemDiskPart.' + str(depth1 + 1) + '.SizeBytes', SystemDiskParts[depth1].get('SizeBytes'))
if SystemDiskParts[depth1].get('Block') is not None:
self.add_query_param('SystemDiskPart.' + str(depth1 + 1) + '.Block', SystemDiskParts[depth1].get('Block'))
if SystemDiskParts[depth1].get('Device') is not None:
self.add_query_param('SystemDiskPart.' + str(depth1 + 1) + '.Device', SystemDiskParts[depth1].get('Device'))
def get_ValidTime(self):
return self.get_query_params().get('ValidTime')
def set_ValidTime(self,ValidTime):
self.add_query_param('ValidTime',ValidTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DataDisks(self):
return self.get_query_params().get('DataDisk')
def set_DataDisks(self, DataDisks):
for depth1 in range(len(DataDisks)):
if DataDisks[depth1].get('Size') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Size', DataDisks[depth1].get('Size'))
if DataDisks[depth1].get('Part') is not None:
for depth2 in range(len(DataDisks[depth1].get('Part'))):
if DataDisks[depth1].get('Part')[depth2].get('SizeBytes') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Part.' + str(depth2 + 1) + '.SizeBytes', DataDisks[depth1].get('Part')[depth2].get('SizeBytes'))
if DataDisks[depth1].get('Part')[depth2].get('Block') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Part.' + str(depth2 + 1) + '.Block', DataDisks[depth1].get('Part')[depth2].get('Block'))
if DataDisks[depth1].get('Part')[depth2].get('Device') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Part.' + str(depth2 + 1) + '.Device', DataDisks[depth1].get('Part')[depth2].get('Device'))
if DataDisks[depth1].get('Index') is not None:
self.add_query_param('DataDisk.' + str(depth1 + 1) + '.Index', DataDisks[depth1].get('Index'))
def get_LaunchTemplateVersion(self):
return self.get_query_params().get('LaunchTemplateVersion')
def set_LaunchTemplateVersion(self,LaunchTemplateVersion):
self.add_query_param('LaunchTemplateVersion',LaunchTemplateVersion)
def get_ScheduledStartTime(self):
return self.get_query_params().get('ScheduledStartTime')
def set_ScheduledStartTime(self,ScheduledStartTime):
self.add_query_param('ScheduledStartTime',ScheduledStartTime)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_InstanceRamRole(self):
return self.get_query_params().get('InstanceRamRole')
def set_InstanceRamRole(self,InstanceRamRole):
self.add_query_param('InstanceRamRole',InstanceRamRole)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_MaxNumberOfImageToKeep(self):
return self.get_query_params().get('MaxNumberOfImageToKeep')
def set_MaxNumberOfImageToKeep(self,MaxNumberOfImageToKeep):
self.add_query_param('MaxNumberOfImageToKeep',MaxNumberOfImageToKeep | null |
681 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CopySnapshotRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CopySnapshot','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def METHOD_NAME(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SnapshotId(self): # String
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self, SnapshotId): # String
self.add_query_param('SnapshotId', SnapshotId)
def get_DestinationRegionId(self): # String
return self.get_query_params().get('DestinationRegionId')
def set_DestinationRegionId(self, DestinationRegionId): # String
self.add_query_param('DestinationRegionId', DestinationRegionId)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_Arns(self): # RepeatList
return self.get_query_params().get('Arn')
def set_Arns(self, Arn): # RepeatList
for depth1 in range(len(Arn)):
if Arn[depth1].get('RoleType') is not None:
self.add_query_param('Arn.' + str(depth1 + 1) + '.RoleType', Arn[depth1].get('RoleType'))
if Arn[depth1].get('Rolearn') is not None:
self.add_query_param('Arn.' + str(depth1 + 1) + '.Rolearn', Arn[depth1].get('Rolearn'))
if Arn[depth1].get('AssumeRoleFor') is not None:
self.add_query_param('Arn.' + str(depth1 + 1) + '.AssumeRoleFor', Arn[depth1].get('AssumeRoleFor'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DestinationSnapshotName(self): # String
return self.get_query_params().get('DestinationSnapshotName')
def set_DestinationSnapshotName(self, DestinationSnapshotName): # String
self.add_query_param('DestinationSnapshotName', DestinationSnapshotName)
def get_DestinationSnapshotDescription(self): # String
return self.get_query_params().get('DestinationSnapshotDescription')
def set_DestinationSnapshotDescription(self, DestinationSnapshotDescription): # String
self.add_query_param('DestinationSnapshotDescription', DestinationSnapshotDescription)
def get_Encrypted(self): # Boolean
return self.get_query_params().get('Encrypted')
def set_Encrypted(self, Encrypted): # Boolean
self.add_query_param('Encrypted', Encrypted)
def get_RetentionDays(self): # Integer
return self.get_query_params().get('RetentionDays')
def set_RetentionDays(self, RetentionDays): # Integer
self.add_query_param('RetentionDays', RetentionDays)
def get_KMSKeyId(self): # String
return self.get_query_params().get('KMSKeyId')
def set_KMSKeyId(self, KMSKeyId): # String
self.add_query_param('KMSKeyId', KMSKeyId)
def get_DestinationStorageLocationArn(self): # String
return self.get_query_params().get('DestinationStorageLocationArn')
def set_DestinationStorageLocationArn(self, DestinationStorageLocationArn): # String
self.add_query_param('DestinationStorageLocationArn', DestinationStorageLocationArn) | null |
682 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateImagePipelineRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateImagePipeline','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BaseImageType(self): # String
return self.get_query_params().get('BaseImageType')
def set_BaseImageType(self, BaseImageType): # String
self.add_query_param('BaseImageType', BaseImageType)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_ToRegionIds(self): # RepeatList
return self.get_query_params().get('ToRegionId')
def set_ToRegionIds(self, ToRegionId): # RepeatList
for depth1 in range(len(ToRegionId)):
self.add_query_param('ToRegionId.' + str(depth1 + 1), ToRegionId[depth1])
def get_InternetMaxBandwidthOut(self): # Integer
return self.get_query_params().get('InternetMaxBandwidthOut')
def set_InternetMaxBandwidthOut(self, InternetMaxBandwidthOut): # Integer
self.add_query_param('InternetMaxBandwidthOut', InternetMaxBandwidthOut)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_ImageName(self): # String
return self.get_query_params().get('ImageName')
def set_ImageName(self, ImageName): # String
self.add_query_param('ImageName', ImageName)
def get_SystemDiskSize(self): # Integer
return self.get_query_params().get('SystemDiskSize')
def METHOD_NAME(self, SystemDiskSize): # Integer
self.add_query_param('SystemDiskSize', SystemDiskSize)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_BaseImage(self): # String
return self.get_query_params().get('BaseImage')
def set_BaseImage(self, BaseImage): # String
self.add_query_param('BaseImage', BaseImage)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_AddAccounts(self): # RepeatList
return self.get_query_params().get('AddAccount')
def set_AddAccounts(self, AddAccount): # RepeatList
for depth1 in range(len(AddAccount)):
self.add_query_param('AddAccount.' + str(depth1 + 1), AddAccount[depth1])
def get_DeleteInstanceOnFailure(self): # Boolean
return self.get_query_params().get('DeleteInstanceOnFailure')
def set_DeleteInstanceOnFailure(self, DeleteInstanceOnFailure): # Boolean
self.add_query_param('DeleteInstanceOnFailure', DeleteInstanceOnFailure)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_BuildContent(self): # String
return self.get_query_params().get('BuildContent')
def set_BuildContent(self, BuildContent): # String
self.add_query_param('BuildContent', BuildContent) | null |
683 | from decimal import Decimal
from typing import (
Any,
Dict,
List,
Tuple,
)
import aiohttp
from hummingbot.core.data_type.common import TradeType
from hummingbot.core.utils.async_utils import safe_ensure_future
TOKEN_CONFIGURATIONS_URL = '/api/v3/exchange/tokens'
class LoopringAPITokenConfigurationDataSource():
""" Gets the token configuration on creation.
Use LoopringAPITokenConfigurationDataSource.create() to create.
"""
def __init__(self):
self._tokenid_lookup: Dict[str, int] = {}
self._symbol_lookup: Dict[int, str] = {}
self._token_configurations: Dict[int, Any] = {}
self._decimals: Dict[int, Decimal] = {}
@classmethod
def create(cls):
configuration_data_source = cls()
safe_ensure_future(configuration_data_source._configure())
return configuration_data_source
async def _configure(self):
async with aiohttp.ClientSession() as client:
response: aiohttp.ClientResponse = await client.get(
f"https://api3.loopring.io{TOKEN_CONFIGURATIONS_URL}"
)
if response.status >= 300:
raise IOError(f"Error fetching active loopring token configurations. HTTP status is {response.status}.")
response_dict: Dict[str, Any] = await response.json()
for config in response_dict:
self._token_configurations[config['tokenId']] = config
self._tokenid_lookup[config['symbol']] = config['tokenId']
self._symbol_lookup[config['tokenId']] = config['symbol']
self._decimals[config['tokenId']] = Decimal(f"10e{-(config['decimals'] + 1)}")
def get_bq(self, symbol: str) -> List[str]:
""" Returns the base and quote of a trading pair """
return symbol.split('-')
def METHOD_NAME(self, symbol: str) -> int:
""" Returns the token id for the given token symbol """
return self._tokenid_lookup.get(symbol)
def get_symbol(self, tokenid: int) -> str:
"""Returns the symbol for the given tokenid """
return self._symbol_lookup.get(tokenid)
def unpad(self, volume: str, tokenid: int) -> Decimal:
"""Converts the padded volume/size string into the correct Decimal representation
based on the "decimals" setting from the token configuration for the referenced token
"""
return Decimal(volume) * self._decimals[tokenid]
def pad(self, volume: Decimal, tokenid: int) -> str:
"""Converts the volume/size Decimal into the padded string representation for the api
based on the "decimals" setting from the token configuration for the referenced token
"""
return str(Decimal(volume) // self._decimals[tokenid])
def get_config(self, tokenid: int) -> Dict[str, Any]:
""" Returns the token configuration for the referenced token id """
return self._token_configurations.get(tokenid)
def get_tokens(self) -> List[int]:
return list(self._token_configurations.keys())
def sell_buy_amounts(self, baseid, quoteid, amount, price, side) -> Tuple[int]:
""" Returns the buying and selling amounts for unidirectional orders, based on the order
side, price and amount and returns the padded values.
"""
quote_amount = amount * price
padded_amount = int(self.pad(amount, baseid))
padded_quote_amount = int(self.pad(quote_amount, quoteid))
if side is TradeType.SELL:
return {
"sellToken": {
"tokenId": str(baseid),
"volume": str(padded_amount)
},
"buyToken": {
"tokenId": str(quoteid),
"volume": str(padded_quote_amount)
},
"fillAmountBOrS": False
}
else:
return {
"sellToken": {
"tokenId": str(quoteid),
"volume": str(padded_quote_amount)
},
"buyToken": {
"tokenId": str(baseid),
"volume": str(padded_amount)
},
"fillAmountBOrS": True
} | null |
684 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateTransitRouterVpcAttachmentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateTransitRouterVpcAttachment')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_VpcOwnerId(self): # Long
return self.get_query_params().get('VpcOwnerId')
def set_VpcOwnerId(self, VpcOwnerId): # Long
self.add_query_param('VpcOwnerId', VpcOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_CenId(self): # String
return self.get_query_params().get('CenId')
def set_CenId(self, CenId): # String
self.add_query_param('CenId', CenId)
def get_TransitRouterAttachmentName(self): # String
return self.get_query_params().get('TransitRouterAttachmentName')
def set_TransitRouterAttachmentName(self, TransitRouterAttachmentName): # String
self.add_query_param('TransitRouterAttachmentName', TransitRouterAttachmentName)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_AutoPublishRouteEnabled(self): # Boolean
return self.get_query_params().get('AutoPublishRouteEnabled')
def set_AutoPublishRouteEnabled(self, AutoPublishRouteEnabled): # Boolean
self.add_query_param('AutoPublishRouteEnabled', AutoPublishRouteEnabled)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_ZoneMappingss(self): # RepeatList
return self.get_query_params().get('ZoneMappings')
def set_ZoneMappingss(self, ZoneMappings): # RepeatList
for depth1 in range(len(ZoneMappings)):
if ZoneMappings[depth1].get('VSwitchId') is not None:
self.add_query_param('ZoneMappings.' + str(depth1 + 1) + '.VSwitchId', ZoneMappings[depth1].get('VSwitchId'))
if ZoneMappings[depth1].get('ZoneId') is not None:
self.add_query_param('ZoneMappings.' + str(depth1 + 1) + '.ZoneId', ZoneMappings[depth1].get('ZoneId'))
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransitRouterId(self): # String
return self.get_query_params().get('TransitRouterId')
def set_TransitRouterId(self, TransitRouterId): # String
self.add_query_param('TransitRouterId', TransitRouterId)
def METHOD_NAME(self): # String
return self.get_query_params().get('TransitRouterAttachmentDescription')
def set_TransitRouterAttachmentDescription(self, TransitRouterAttachmentDescription): # String
self.add_query_param('TransitRouterAttachmentDescription', TransitRouterAttachmentDescription)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
685 | from __future__ import annotations
import os
import subprocess
import sys
import time
import fsspec
import pytest
from fsspec.core import open_files
from packaging.version import parse as parse_version
import dask.bag as db
from dask.utils import tmpdir
files = ["a", "b"]
requests = pytest.importorskip("requests")
errs: tuple[type[Exception], ...] = (
requests.exceptions.RequestException,
FileNotFoundError,
)
if parse_version(fsspec.__version__) > parse_version("0.7.4"):
aiohttp = pytest.importorskip("aiohttp")
errs = errs + (aiohttp.client_exceptions.ClientResponseError,)
@pytest.fixture(scope="module")
def dir_server():
with tmpdir() as d:
for fn in files:
with open(os.path.join(d, fn), "wb") as f:
f.write(b"a" * 10000)
cmd = [sys.executable, "-m", "http.server", "8999"]
p = subprocess.Popen(cmd, cwd=d)
timeout = 10
while True:
try:
requests.get("http://localhost:8999")
break
except requests.exceptions.ConnectionError as e:
time.sleep(0.1)
timeout -= 0.1
if timeout < 0:
raise RuntimeError("Server did not appear") from e
yield d
p.terminate()
def test_simple(dir_server):
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn)[0]
with f as f:
data = f.read()
with open(os.path.join(dir_server, fn), "rb") as expected:
assert data == expected.read()
def test_loc(dir_server):
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn)[0]
with open(os.path.join(dir_server, fn), "rb") as expected:
expected = expected.read()
with f as f:
data = f.read(2)
assert data == expected[:2]
assert f.loc == 2
f.seek(0)
data = f.read(3)
assert data == expected[:3]
f.seek(1, 1)
assert f.loc == 4
def test_fetch_range_with_headers(dir_server):
# https://github.com/dask/dask/issues/4479
root = "http://localhost:8999/"
fn = files[0]
headers = {"Date": "Wed, 21 Oct 2015 07:28:00 GMT"}
f = open_files(root + fn, headers=headers)[0]
with f as f:
data = f.read(length=1) + f.read(length=-1)
with open(os.path.join(dir_server, fn), "rb") as expected:
assert data == expected.read()
@pytest.mark.parametrize("block_size", [None, 99999])
def test_ops(dir_server, block_size):
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn)[0]
with open(os.path.join(dir_server, fn), "rb") as expected:
expected = expected.read()
with f as f:
# these pass because the default
assert f.read(10) == expected[:10]
f.seek(0)
assert f.read(10) == expected[:10]
assert f.read(10) == expected[10:20]
f.seek(-10, 2)
assert f.read() == expected[-10:]
def test_ops_blocksize(dir_server):
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn, block_size=2)[0]
with open(os.path.join(dir_server, fn), "rb") as expected:
expected = expected.read()
with f as f:
# it's OK to read the whole file
assert f.read() == expected
# and now the file magically has a size
assert f.size == len(expected)
# note that if we reuse f from above, because it is tokenized, we get
# the same open file - where is this cached?
fn = files[1]
f = open_files(root + fn, block_size=2)[0]
with f as f:
if parse_version(fsspec.__version__) < parse_version("2021.11.1"):
# fails because we want only 12 bytes
with pytest.raises(ValueError):
assert f.read(10) == expected[:10]
else:
# fixed in https://github.com/fsspec/filesystem_spec/pull/830
assert f.read(10) == expected[:10]
def test_errors(dir_server):
f = open_files("http://localhost:8999/doesnotexist")[0]
with pytest.raises(errs):
with f as f:
f.read()
f = open_files("http://nohost/")[0]
expected = FileNotFoundError
with pytest.raises(expected):
with f as f:
f.read()
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn, mode="wb")[0]
with pytest.raises(NotImplementedError):
with f:
pass
f = open_files(root + fn)[0]
with f as f:
with pytest.raises(ValueError):
f.seek(-1)
def test_files(dir_server):
root = "http://localhost:8999/"
fs = open_files([root + f for f in files])
for f, f2 in zip(fs, files):
with f as f:
with open(os.path.join(dir_server, f2), "rb") as expected:
assert f.read() == expected.read()
def METHOD_NAME(dir_server):
root = "http://localhost:8999/"
fs = open_files(root + "/*")
assert fs[0].path == "http://localhost:8999/a"
assert fs[1].path == "http://localhost:8999/b"
@pytest.mark.network
@pytest.mark.parametrize("engine", ["pyarrow", "fastparquet"])
def test_parquet(engine):
pytest.importorskip("requests", minversion="2.21.0")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip(engine)
df = dd.read_parquet(
[
"https://github.com/Parquet/parquet-compatibility/raw/"
"master/parquet-testdata/impala/1.1.1-NONE/"
"nation.impala.parquet"
],
engine=engine,
).compute()
assert df.n_nationkey.tolist() == list(range(25))
assert df.columns.tolist() == ["n_nationkey", "n_name", "n_regionkey", "n_comment"]
@pytest.mark.flaky(
reruns=10, reruns_delay=5, reason="https://github.com/dask/dask/issues/3696"
)
@pytest.mark.network
def test_bag():
# This test pulls from different hosts
urls = [
"https://raw.githubusercontent.com/weierophinney/pastebin/"
"master/public/js-src/dojox/data/tests/stores/patterns.csv",
"https://en.wikipedia.org",
]
b = db.read_text(urls)
assert b.npartitions == 2
b.compute()
@pytest.mark.network
def test_read_csv():
dd = pytest.importorskip("dask.dataframe")
url = (
"https://raw.githubusercontent.com/weierophinney/pastebin/"
"master/public/js-src/dojox/data/tests/stores/patterns.csv"
)
b = dd.read_csv(url)
b.compute() | null |
686 | import os
import threading
import time
from typing import Optional
import psutil
from galaxy import (
job_metrics,
model,
)
from galaxy.app_unittest_utils.tools_support import UsesTools
from galaxy.jobs.runners import local
from galaxy.util import bunch
from galaxy.util.unittest import TestCase
class TestLocalJobRunner(TestCase, UsesTools):
def setUp(self):
self.setup_app()
self._init_tool()
self.app.job_metrics = job_metrics.JobMetrics()
self.job_wrapper = MockJobWrapper(self.app, self.test_directory, self.tool)
def tearDown(self):
self.tear_down_app()
def test_run(self):
self.job_wrapper.command_line = "echo HelloWorld"
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "HelloWorld"
def test_galaxy_lib_on_path(self):
self.job_wrapper.command_line = '''python -c "import galaxy.util"'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.exit_code == 0
def test_default_slots(self):
self.job_wrapper.command_line = """echo $GALAXY_SLOTS"""
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "1"
def test_slots_override(self):
# Set local_slots in job destination to specify slots for
# local job runner.
self.job_wrapper.job_destination.params["local_slots"] = 3
self.job_wrapper.command_line = """echo $GALAXY_SLOTS"""
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "3"
def test_exit_code(self):
self.job_wrapper.command_line = '''sh -c "exit 4"'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.exit_code == 4
def test_metadata_gets_set(self):
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert os.path.exists(self.job_wrapper.mock_metadata_path)
def test_metadata_gets_set_if_embedded(self):
self.job_wrapper.job_destination.params["embed_metadata_in_job"] = "True"
# Kill off cruft for _handle_metadata_externally and make sure job still works...
self.job_wrapper.external_output_metadata = None
self.app.datatypes_registry.set_external_metadata_tool = None
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert os.path.exists(self.job_wrapper.mock_metadata_path)
def test_stopping_job(self):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
runner = local.LocalJobRunner(self.app, 1)
def queue():
runner.queue_job(self.job_wrapper)
t = threading.Thread(target=queue)
t.start()
external_id = self.job_wrapper.wait_for_external_id()
assert psutil.pid_exists(external_id)
runner.stop_job(self.job_wrapper)
t.join(1)
assert not psutil.pid_exists(external_id)
def test_shutdown_no_jobs(self):
self.app.config.monitor_thread_join_timeout = 5
runner = local.LocalJobRunner(self.app, 1)
runner.start()
runner.shutdown()
def test_stopping_job_at_shutdown(self):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
self.app.model.session = bunch.Bunch(add=lambda x: None, flush=lambda: None)
runner = local.LocalJobRunner(self.app, 1)
runner.start()
self.app.config.monitor_thread_join_timeout = 15
def queue():
runner.queue_job(self.job_wrapper)
t = threading.Thread(target=queue)
t.start()
external_id = self.job_wrapper.wait_for_external_id()
assert psutil.pid_exists(external_id)
runner.shutdown()
t.join(1)
assert not psutil.pid_exists(external_id)
assert "job terminated by Galaxy shutdown" in self.job_wrapper.fail_message
class MockJobWrapper:
def __init__(self, app, test_directory, tool):
working_directory = os.path.join(test_directory, "workdir")
tool_working_directory = os.path.join(working_directory, "working")
os.makedirs(tool_working_directory)
self.app = app
self.tool = tool
self.requires_containerization = False
self.state = model.Job.states.QUEUED
self.command_line = "echo HelloWorld"
self.environment_variables = []
self.commands_in_new_shell = False
self.prepare_called = False
self.dependency_shell_commands = None
self.working_directory = working_directory
self.tool_working_directory = tool_working_directory
self.requires_setting_metadata = True
self.job_destination = bunch.Bunch(id="default", params={})
self.galaxy_lib_dir = os.path.abspath("lib")
self.job = model.Job()
self.job_id = 1
self.job.id = 1
self.output_paths = ["/tmp/output1.dat"]
self.mock_metadata_path = os.path.abspath(os.path.join(test_directory, "METADATA_SET"))
self.metadata_command = f"touch {self.mock_metadata_path}"
self.galaxy_virtual_env = None
self.shell = "/bin/bash"
self.cleanup_job = "never"
self.tmp_dir_creation_statement = ""
self.use_metadata_binary = False
self.guest_ports = []
self.metadata_strategy = "directory"
self.remote_command_line = False
# Cruft for setting metadata externally, axe at some point.
self.external_output_metadata: Optional[bunch.Bunch] = bunch.Bunch()
self.app.datatypes_registry.set_external_metadata_tool = bunch.Bunch(build_dependency_shell_commands=lambda: [])
def check_tool_output(*args, **kwds):
return "ok"
def wait_for_external_id(self):
"""Test method for waiting until an external id has been registered."""
external_id = None
for _ in range(50):
external_id = self.job.job_runner_external_id
if external_id:
break
time.sleep(0.1)
return external_id
def prepare(self):
self.prepare_called = True
def set_external_id(self, external_id, **kwd):
self.job.job_runner_external_id = external_id
def get_command_line(self):
return self.command_line
def container_monitor_command(self, *args, **kwds):
return None
def get_id_tag(self):
return "1"
def get_state(self):
return self.state
def change_state(self, state, job=None):
self.state = state
@property
def job_io(self):
return bunch.Bunch(
get_output_fnames=lambda: [], check_job_script_integrity=False, version_path="/tmp/version_path"
)
def get_job(self):
return self.job
def setup_external_metadata(self, **kwds):
return self.metadata_command
def get_env_setup_clause(self):
return ""
def METHOD_NAME(self):
return False
def fail(
self, message, exception=False, tool_stdout="", tool_stderr="", exit_code=None, job_stdout=None, job_stderr=None
):
self.fail_message = message
self.fail_exception = exception
def finish(self, stdout, stderr, exit_code, **kwds):
self.stdout = stdout
self.stderr = stderr
self.exit_code = exit_code
def tmp_directory(self):
return None
def home_directory(self):
return None
def reclaim_ownership(self):
pass
@property
def is_cwl_job(self):
return False | null |
687 | import logging
from datetime import date, timedelta
import waffle
from asgiref.sync import sync_to_async
from django.conf import settings
from django.http import HttpRequest, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.template import TemplateDoesNotExist
from requests import Session
from rest_framework import status
from rest_framework.status import HTTP_400_BAD_REQUEST
from cl.lib.elasticsearch_utils import build_es_base_query
from cl.lib.scorched_utils import ExtraSolrInterface
from cl.lib.search_utils import (
build_alert_estimation_query,
build_court_count_query,
build_coverage_query,
get_solr_interface,
)
from cl.search.documents import AudioDocument
from cl.search.forms import SearchForm
from cl.search.models import SEARCH_TYPES, Court
from cl.simple_pages.views import get_coverage_data_fds
logger = logging.getLogger(__name__)
def annotate_courts_with_counts(courts, court_count_tuples):
"""Solr gives us a response like:
court_count_tuples = [
('ca2', 200),
('ca1', 42),
...
]
Here we add an attribute to our court objects so they have these values.
"""
# Convert the tuple to a dict
court_count_dict = {}
for court_str, count in court_count_tuples:
court_count_dict[court_str] = count
for court in courts:
court.count = court_count_dict.get(court.pk, 0)
return courts
def make_court_variable():
courts = Court.objects.exclude(jurisdiction=Court.TESTING_COURT)
with Session() as session:
si = ExtraSolrInterface(
settings.SOLR_OPINION_URL, http_connection=session, mode="r"
)
response = si.query().add_extra(**build_court_count_query()).execute()
court_count_tuples = response.facet_counts.facet_fields["court_exact"]
courts = annotate_courts_with_counts(courts, court_count_tuples)
return courts
def court_index(request: HttpRequest) -> HttpResponse:
"""Shows the information we have available for the courts."""
courts = make_court_variable()
return render(
request, "jurisdictions.html", {"courts": courts, "private": False}
)
def rest_docs(request, version=None):
"""Show the correct version of the rest docs"""
courts = make_court_variable()
court_count = len(courts)
context = {"court_count": court_count, "courts": courts, "private": False}
try:
return render(request, f"rest-docs-{version}.html", context)
except TemplateDoesNotExist:
return render(request, "rest-docs-vlatest.html", context)
def api_index(request: HttpRequest) -> HttpResponse:
court_count = Court.objects.exclude(
jurisdiction=Court.TESTING_COURT
).count()
return render(
request, "docs.html", {"court_count": court_count, "private": False}
)
def replication_docs(request: HttpRequest) -> HttpResponse:
return render(request, "replication.html", {"private": False})
def bulk_data_index(request: HttpRequest) -> HttpResponse:
"""Shows an index page for the dumps."""
disclosure_coverage = get_coverage_data_fds()
return render(
request,
"bulk-data.html",
disclosure_coverage,
)
def strip_zero_years(data):
"""Removes zeroes from the ends of the court data
Some courts only have values through to a certain date, but we don't
check for that in our queries. Instead, we truncate any zero-values that
occur at the end of their stats.
"""
start = 0
end = len(data)
# Slice off zeroes at the beginning
for i, data_pair in enumerate(data):
if data_pair[1] != 0:
start = i
break
# Slice off zeroes at the end
for i, data_pair in reversed(list(enumerate(data))):
if data_pair[1] != 0:
end = i
break
return data[start : end + 1]
def coverage_data(request, version, court):
"""Provides coverage data for a court.
Responds to either AJAX or regular requests.
"""
if court != "all":
court_str = get_object_or_404(Court, pk=court).pk
else:
court_str = "all"
q = request.GET.get("q")
with Session() as session:
si = ExtraSolrInterface(
settings.SOLR_OPINION_URL, http_connection=session, mode="r"
)
facet_field = "dateFiled"
response = (
si.query()
.add_extra(**build_coverage_query(court_str, q, facet_field))
.execute()
)
counts = response.facet_counts.facet_ranges[facet_field]["counts"]
counts = strip_zero_years(counts)
# Calculate the totals
annual_counts = {}
total_docs = 0
for date_string, count in counts:
annual_counts[date_string[:4]] = count
total_docs += count
return JsonResponse(
{"annual_counts": annual_counts, "total": total_docs}, safe=True
)
async def get_result_count(request, version, day_count):
"""Get the count of results for the past `day_count` number of days
GET parameters will be a complete search string
:param request: The Django request object
:param version: The API version number (ignored for now, but there for
later)
:param day_count: The number of days to average across. More is slower.
:return: A JSON object with the number of hits during the last day_range
period.
"""
search_form = await sync_to_async(SearchForm)(request.GET.copy())
if not search_form.is_valid():
return JsonResponse(
{"error": "Invalid SearchForm"},
safe=True,
status=HTTP_400_BAD_REQUEST,
)
cd = search_form.cleaned_data
search_type = cd["type"]
es_flag_for_oa = await sync_to_async(waffle.flag_is_active)(
request, "oa-es-active"
)
if (
search_type == SEARCH_TYPES.ORAL_ARGUMENT and es_flag_for_oa
): # Elasticsearch version for OA
document_type = AudioDocument
cd["argued_after"] = date.today() - timedelta(days=int(day_count))
cd["argued_before"] = None
search_query = document_type.search()
s = build_es_base_query(search_query, cd)
total_query_results = s.count()
else:
with Session() as session:
try:
si = get_solr_interface(cd, http_connection=session)
except NotImplementedError:
logger.error(
"Tried getting solr connection for %s, but it's not "
"implemented yet",
cd["type"],
)
raise
response = (
si.query()
.add_extra(**build_alert_estimation_query(cd, int(day_count)))
.execute()
)
total_query_results = response.result.numFound
return JsonResponse({"count": total_query_results}, safe=True)
async def deprecated_api(request, v):
return JsonResponse(
{
"meta": {
"status": "This endpoint is deprecated. Please upgrade to the "
"newest version of the API.",
},
"objects": [],
},
safe=False,
status=status.HTTP_410_GONE,
)
def METHOD_NAME(request):
context = {"private": False}
return render(request, "rest-change-log.html", context)
def webhooks_getting_started(request):
context = {"private": False}
return render(request, "webhooks-getting-started.html", context)
def webhooks_docs(request, version=None):
"""Show the correct version of the webhooks docs"""
context = {"private": False}
try:
return render(request, f"webhooks-docs-{version}.html", context)
except TemplateDoesNotExist:
return render(request, "webhooks-docs-vlatest.html", context) | null |
688 | import functools
import logging
import requests
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from galaxy.structured_app import BasicSharedApp
from galaxy.util import DEFAULT_SOCKET_TIMEOUT
log = logging.getLogger(__name__)
class CitationsManager:
def __init__(self, app: BasicSharedApp) -> None:
self.app = app
self.doi_cache = DoiCache(app.config)
def citations_for_tool(self, tool):
return tool.citations
def citations_for_tool_ids(self, tool_ids):
citation_collection = CitationCollection()
for tool_id in tool_ids:
tool = self._get_tool(tool_id)
for citation in self.citations_for_tool(tool):
citation_collection.add(citation)
return citation_collection.citations
def parse_citation(self, citation_elem):
return parse_citation(citation_elem, self)
def _get_tool(self, tool_id):
tool = self.app.toolbox.get_tool(tool_id)
return tool
class DoiCache:
def __init__(self, config):
cache_opts = {
"cache.type": config.citation_cache_type,
"cache.data_dir": config.citation_cache_data_dir,
"cache.lock_dir": config.citation_cache_lock_dir,
"cache.url": config.citation_cache_url,
"cache.table_name": config.citation_cache_table_name,
"cache.schema_name": config.citation_cache_schema_name,
}
self._cache = CacheManager(**parse_cache_config_options(cache_opts)).get_cache("doi")
def _raw_get_bibtex(self, doi):
doi_url = f"https://doi.org/{doi}"
headers = {"Accept": "application/x-bibtex"}
res = requests.get(doi_url, headers=headers, timeout=DEFAULT_SOCKET_TIMEOUT)
# To decode the response content, res.text tries to determine the
# content encoding from the Content-Type header (res.encoding), and if
# that fails, falls back to guessing from the content itself (res.apparent_encoding).
# The guessed encoding is sometimes wrong, better to default to utf-8.
if res.encoding is None:
res.encoding = "utf-8"
return res.text
def METHOD_NAME(self, doi):
createfunc = functools.partial(self._raw_get_bibtex, doi)
return self._cache.get(key=doi, createfunc=createfunc)
def parse_citation(elem, citation_manager):
"""
Parse an abstract citation entry from the specified XML element.
"""
citation_type = elem.attrib.get("type", None)
citation_class = CITATION_CLASSES.get(citation_type, None)
if not citation_class:
log.warning(f"Unknown or unspecified citation type: {citation_type}")
return None
try:
citation = citation_class(elem, citation_manager)
except Exception as e:
raise Exception(f"Invalid citation of type '{citation_type}' with content '{elem.text}': {e}")
return citation
class CitationCollection:
def __init__(self):
self.citations = []
def __iter__(self):
return self.citations.__iter__()
def __len__(self):
return len(self.citations)
def add(self, new_citation):
for citation in self.citations:
if citation.equals(new_citation):
# TODO: We have two equivalent citations, pick the more
# informative/complete/correct.
return False
self.citations.append(new_citation)
return True
class BaseCitation:
def to_dict(self, citation_format):
if citation_format == "bibtex":
return dict(
format="bibtex",
content=self.to_bibtex(),
)
else:
raise Exception(f"Unknown citation format {citation_format}")
def equals(self, other_citation):
if self.has_doi() and other_citation.has_doi():
return self.doi() == other_citation.doi()
else:
# TODO: Do a better job figuring out if this is the same citation.
return self.to_bibtex() == other_citation.to_bibtex()
def has_doi(self):
return False
class BibtexCitation(BaseCitation):
def __init__(self, elem, citation_manager):
self.raw_bibtex = elem.text.strip()
def to_bibtex(self):
return self.raw_bibtex
class DoiCitation(BaseCitation):
BIBTEX_UNSET = object()
def __init__(self, elem, citation_manager):
self.__doi = elem.text.strip()
self.doi_cache = citation_manager.doi_cache
self.raw_bibtex = DoiCitation.BIBTEX_UNSET
def has_doi(self):
return True
def doi(self):
return self.__doi
def to_bibtex(self):
if self.raw_bibtex is DoiCitation.BIBTEX_UNSET:
try:
self.raw_bibtex = self.doi_cache.METHOD_NAME(self.__doi)
except Exception:
log.exception("Failed to fetch bibtex for DOI %s", self.__doi)
if self.raw_bibtex is DoiCitation.BIBTEX_UNSET:
return f"""@MISC{{{self.__doi},
DOI = {{{self.__doi}}},
note = {{Failed to fetch BibTeX for DOI.}}
}}"""
else:
return self.raw_bibtex
CITATION_CLASSES = dict(
bibtex=BibtexCitation,
doi=DoiCitation,
) | null |
689 | # Copyright (C) Jan 2020 Mellanox Technologies Ltd. All rights reserved.
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This software is available to you under a choice of one of two
# licenses. You may choose to be licensed under the terms of the GNU
# General Public License (GPL) Version 2, available from the file
# COPYING in the main directory of this source tree, or the
# OpenIB.org BSD license below:
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --
#######################################################
#
# SegmentCreator.py
# Python implementation of the Class SegmentCreator
# Generated by Enterprise Architect
# Created on: 14-Aug-2019 10:12:03 AM
# Original author: talve
#
#######################################################
from segments.SegmentFactory import SegmentFactory
from utils import constants as cs
class SegmentCreator:
"""this class is responsible for splitting the raw data to segments and creating
segments objects.
"""
def METHOD_NAME(self, raw_data):
"""convert segments data into a segments objects by using SegmentFactory.
"""
try:
segments = []
raw_data_segments_lst = self._parse_segments(raw_data)
for raw_seg in raw_data_segments_lst:
seg_type = '{:0b}'.format(raw_seg[cs.SEGMENT_TYPE_DWORD_LOCATION]).zfill(32)[cs.SEGMENT_TYPE_START: cs.
SEGMENT_TYPE_END]
seg_type = hex(int(seg_type, 2))
seg_type_for_create = SegmentCreator.get_seg_type_for_register_segments(seg_type)
seg = SegmentFactory.METHOD_NAME(seg_type_for_create, raw_seg)
seg.resource_type = seg_type
segments.append(seg)
except Exception as e:
raise Exception("Failed to create segments with error: {0}".format(e))
return segments
def _parse_segments(self, raw_data):
"""splitting the raw data into segments
raw data is represented as a list of dword's
"""
splitted_segments = []
try:
end_index = len(raw_data) - 1
current_index = 0
while current_index <= end_index:
# seg size specified in dwords
seg_size = '{:032b}'.format(raw_data[cs.SEGMENT_SIZE_DWORD_LOCATION + current_index])[
cs.SEGMENT_SIZE_START: cs.SEGMENT_SIZE_END]
seg_size = int(seg_size, 2)
if seg_size == 0:
raise Exception("Error in segments splitting. raw_data didn't get smaller - found segment_size = 0")
seg_data = raw_data[current_index:seg_size + current_index]
splitted_segments.append(seg_data)
current_index += seg_size
except Exception as e:
raise Exception("Failed to split segments with error: {0}".format(e))
return splitted_segments
@classmethod
def is_resource_segment(cls, seg_type):
"""This method check if the segment type is a inside the interval of a resource segment
"""
return cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE_MAX >= seg_type >= cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE_MIN
@classmethod
def get_seg_type_for_register_segments(cls, seg_type):
"""This method check if the segment type is a reference segment
and return the right type of that segment.
"""
if cls.is_resource_segment(seg_type):
return cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE
return seg_type | null |
690 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
import sys
from itertools import combinations
from skupper_router_internal.tools.command import (main,
UsageError,
parse_args_skstat,
_skmanage_parser,
_skstat_parser)
from system_test import unittest
def mock_error(self, message):
raise ValueError(message)
argparse.ArgumentParser.error = mock_error # type: ignore[assignment] # Cannot assign to a method
# Since BusManager file is defined in tools/skmanage.in -> tools/skmanage
# otherwise it could be just imported
class FakeBusManager:
def METHOD_NAME(self): pass
def displayConnections(self): pass
def displayRouterLinks(self): pass
def displayRouterNodes(self): pass
def displayEdges(self): pass
def displayAddresses(self): pass
def displayMemory(self): pass
def displayPolicy(self): pass
def displayVhosts(self): pass
def displayVhostgroups(self): pass
def displayVhoststats(self): pass
def displayAutolinks(self): pass
def displayLog(self): pass
def show_all(self): pass
FBM = FakeBusManager
class TestParseArgsSkstat(unittest.TestCase):
def setUp(self):
super().setUp()
self.parser = _skstat_parser(BusManager=FBM)
def test_parse_args_skstat_print_help(self):
self.parser.print_help()
def test_parse_args_skstat_mutually_exclusive(self):
options1 = ["-g", "-c",
"-l", "-n", "-e", "-a", "-m", "--autolinks", "--log",
"--all-entities"]
options2 = ["-r", "--all-routers"]
def _call_pairs(options):
for options_pair in combinations(options, 2):
with self.assertRaises(ValueError):
self.parser.parse_args(options_pair)
_call_pairs(options1)
_call_pairs(options2)
def test_parse_args_skstat_default(self):
args = parse_args_skstat(FBM, argv=[])
self.assertEqual(FBM.METHOD_NAME.__name__, args.show)
def test_parse_args_skstat_method_show_matching(self):
matching = [("-g", FBM.METHOD_NAME.__name__),
("-c", FBM.displayConnections.__name__),
("-l", FBM.displayRouterLinks.__name__),
("-n", FBM.displayRouterNodes.__name__),
("-e", FBM.displayEdges.__name__),
("-a", FBM.displayAddresses.__name__),
("-m", FBM.displayMemory.__name__),
("--autolinks", FBM.displayAutolinks.__name__),
("--log", FBM.displayLog.__name__),
("--all-entities", FBM.show_all.__name__),
]
for option, expected in matching:
args = self.parser.parse_args([option])
self.assertEqual(expected, args.show)
def test_parse_args_skstat_limit(self):
args = self.parser.parse_args([])
self.assertEqual(None, args.limit)
args = self.parser.parse_args(["--limit", "1"])
self.assertEqual(1, args.limit)
class TestParseArgsSkmanage(unittest.TestCase):
def setUp(self):
super().setUp()
self.operations = ["HERE", "SOME", "OPERATIONS"]
self.parser = _skmanage_parser(operations=self.operations)
def test_parse_args_skmanage_print_help(self):
self.parser.print_help()
def test_parse_args_skmanage_operation_no_args(self):
argv = "-r r1 QUERY --type some --name the_name -b 127.0.0.1:5672"
opts, args = self.parser.parse_known_args(argv.split())
self.assertEqual("QUERY", args[0])
def test_parse_args_skmanage_operation_and_args(self):
argv = "-r r1 QUERY arg1=val1 --type some other=argument --name the_name -b 127.0.0.1:5672"
opts, args = self.parser.parse_known_args(argv.split())
self.assertEqual(["QUERY", "arg1=val1", "other=argument"], args)
class TestMain(unittest.TestCase):
def test_main(self):
def run_success(argv):
self.assertEqual(sys.argv, argv)
def run_raises(argv, _Exception):
run_success(argv)
raise _Exception("some")
def run_raises_UsageError(argv):
run_raises(argv, UsageError)
def run_raises_Exception(argv):
run_raises(argv, Exception)
def run_raises_KeyboardInterrupt(argv):
run_raises(argv, KeyboardInterrupt)
self.assertEqual(0, main(run_success))
failed_runs = [
# run_raises_UsageError, ##uncomment this exposes bug
run_raises_Exception,
run_raises_KeyboardInterrupt,
]
for run in failed_runs:
self.assertEqual(1, main(run))
if __name__ == '__main__':
unittest.main() | null |
691 | from builtins import str
__docformat__ = "restructuredtext en"
from mdp import numx, numx_linalg, utils, Node, NodeException, TrainingException
from mdp.utils import mult
# ??? For the future: add an optional second phase to compute
# residuals, significance of the slope.
class LinearRegressionNode(Node):
"""Compute least-square, multivariate linear regression on the input
data, i.e., learn coefficients ``b_j`` so that the linear combination
``y_i = b_0 + b_1 x_1 + ... b_N x_N`` , for ``i = 1 ... M``, minimizes
the sum of squared error given the training ``x``'s and ``y``'s.
This is a supervised learning node, and requires input data ``x`` and
target data ``y`` to be supplied during training (see ``train``
docstring).
:ivar beta: The coefficients of the linear regression.
"""
def __init__(self, with_bias=True, use_pinv=False,
input_dim=None, output_dim=None, dtype=None):
"""Initializes an object of type 'LinearRegressionNode'.
:param with_bias: If true, the linear model includes a constant term.
- True: y_i = b_0 + b_1 x_1 + ... b_N x_N
- False: y_i = b_1 x_1 + ... b_N x_N
If present, the constant term is stored in the first
column of ``self.beta``. Default: True.
:type with_bias: bool
:param use_pinv: If true, uses the pseudo-inverse function to compute
the linear regression coefficients, which is more robust
in some cases. Default: False.
:type use_pinv: bool
:param input_dim: Dimensionality of the input.
Default is None.
:type input_dim: int
:param output_dim: Dimensionality of the output.
Default is None.
:type output_dim: int
:param dtype: Datatype of the input.
Default is None.
:type dtype: numpy.dtype, str
"""
super(LinearRegressionNode, self).__init__(input_dim, output_dim, dtype)
self.with_bias = with_bias
self.use_pinv = use_pinv
# for the linear regression estimator we need two terms
# the first one is X^T X
self._xTx = None
# the second one is X^T Y
self._xTy = None
# keep track of how many data points have been sent
self._tlen = 0
# final regression coefficients
# if with_bias=True, beta includes the bias term in the first column
self.beta = None
@staticmethod
def is_invertible():
return False
def _check_train_args(self, x, y):
# set output_dim if necessary
if self._output_dim is None:
self._set_output_dim(y.shape[1])
# check output dimensionality
self._check_output(y)
if y.shape[0] != x.shape[0]:
msg = ("The number of output points should be equal to the "
"number of datapoints (%d != %d)" % (y.shape[0], x.shape[0]))
raise TrainingException(msg)
def _train(self, x, y):
"""
:param x: Array of different input observations.
:type x: numpy.ndarray
:param y: Array of size (x.shape[0], output_dim) that contains the
observed output to the input x's.
:type y: numpy.ndarray
"""
# initialize internal vars if necessary
if self._xTx is None:
if self.with_bias:
x_size = self._input_dim + 1
else:
x_size = self._input_dim
self._xTx = numx.zeros((x_size, x_size), self._dtype)
self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)
if self.with_bias:
x = self._add_constant(x)
# update internal variables
self._xTx += mult(x.T, x)
self._xTy += mult(x.T, y)
self._tlen += x.shape[0]
def _stop_training(self):
try:
if self.use_pinv:
invfun = utils.pinv
else:
invfun = utils.inv
inv_xTx = invfun(self._xTx)
except numx_linalg.LinAlgError as exception:
errstr = (str(exception) +
"\n Input data may be redundant (i.e., some of the " +
"variables may be linearly dependent).")
raise NodeException(errstr)
self.beta = mult(inv_xTx, self._xTy)
# remove junk
del self._xTx
del self._xTy
def METHOD_NAME(self, x):
if self.with_bias:
x = self._add_constant(x)
return mult(x, self.beta)
def _add_constant(self, x):
"""Add a constant term to the vector 'x'.
x -> [1 x]
:param x: The vector a constant term is appended to.
:type x: numpy.ndarray
:return: The altered vector.
:rtype: numpy.ndarray
"""
return numx.concatenate((numx.ones((x.shape[0], 1),
dtype=self.dtype), x), axis=1 | null |
692 | # Extended prompt utilities.
# Copyright (C) 2011-2019 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwd()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return '<no %s>' % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return '<no attribute %s on current %s>' % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, 'frame', attr, 'name')
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, 'thread', attr, 'num')
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return '\033'
def _prompt_bs(attr):
"A backslash."
return '\\'
def METHOD_NAME(attr):
"A newline."
return '\n'
def _prompt_r(attr):
"A carriage return."
return '\r'
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return '\001'
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return '\002'
prompt_substitutions = {
'e': _prompt_esc,
'\\': _prompt_bs,
'n': METHOD_NAME,
'r': _prompt_r,
'v': _prompt_version,
'w': _prompt_pwd,
'f': _prompt_frame,
't': _prompt_thread,
'p': _prompt_param,
'[': _prompt_noprint_begin,
']': _prompt_noprint_end
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ''
keys = sorted (prompt_substitutions.keys())
for key in keys:
result += ' \\%s\t%s\n' % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ''
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == '\\':
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == '{':
j = i + 1
while j < plen and prompt[j] != '}':
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != '}':
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result | null |
693 | import logging
import sys
from copy import deepcopy
from galaxy.util import listify
log = logging.getLogger(__name__)
class FilterFactory:
"""
An instance of this class is responsible for filtering the list
of tools presented to a given user in a given context.
"""
def __init__(self, toolbox):
self.toolbox = toolbox
# Prepopulate dict containing filters that are always checked,
# other filters that get checked depending on context (e.g. coming from
# trackster or no user found are added in build filters).
self.default_filters = dict(tool=[_not_hidden, _handle_authorization], section=[], label=[])
# Add dynamic filters to these default filters.
config = toolbox.app.config
self.__base_modules = listify(
getattr(config, "toolbox_filter_base_modules", "galaxy.tool_util.toolbox.filters")
)
self.__init_filters("tool", getattr(config, "tool_filters", ""), self.default_filters)
self.__init_filters("section", getattr(config, "tool_section_filters", ""), self.default_filters)
self.__init_filters("label", getattr(config, "tool_label_filters", ""), self.default_filters)
def build_filters(self, trans, **kwds):
"""
Build list of filters to check tools against given current context.
"""
filters = deepcopy(self.default_filters)
if trans.user:
for name, value in trans.user.preferences.items():
if value and value.strip():
user_filters = listify(value, do_strip=True)
category = ""
if name == "toolbox_tool_filters":
category = "tool"
elif name == "toolbox_section_filters":
category = "section"
elif name == "toolbox_label_filters":
category = "label"
if category:
validate = getattr(trans.app.config, f"user_tool_{category}_filters", [])
self.__init_filters(category, user_filters, filters, validate=validate)
if kwds.get("trackster", False):
filters["tool"].append(_has_trackster_conf)
return filters
def __init_filters(self, key, filters, toolbox_filters, validate=None):
for filter in filters:
if validate is None or filter in validate or filter in self.default_filters:
filter_function = self.build_filter_function(filter)
if filter_function is not None:
toolbox_filters[key].append(filter_function)
else:
log.warning("Refusing to load %s filter '%s' which is not defined in config", key, filter)
return toolbox_filters
def build_filter_function(self, filter_name):
"""Obtain python function (importing a submodule if needed)
corresponding to filter_name.
"""
if ":" in filter_name:
# Should be a submodule of filters (e.g. examples:restrict_development_tools)
(module_name, function_name) = filter_name.rsplit(":", 1)
function = self.METHOD_NAME(module_name, function_name)
else:
# No module found, just load a function from this file or
# one that has be explicitly imported.
function = globals()[filter_name.strip()]
return function
def METHOD_NAME(self, module_name, function_name):
function_name = function_name.strip()
for base_module in self.__base_modules:
full_module_name = f"{base_module}.{module_name.strip()}"
try:
__import__(full_module_name)
except ImportError:
continue
module = sys.modules[full_module_name]
if hasattr(module, function_name):
return getattr(module, function_name)
log.warning("Failed to load module for '%s.%s'.", module_name, function_name, exc_info=True)
# Stock Filter Functions
def _not_hidden(context, tool):
return not tool.hidden
def _handle_authorization(context, tool):
user = context.trans.user
if tool.require_login and not user:
return False
if not tool.allow_user_access(user, attempting_access=False):
return False
return True
def _has_trackster_conf(context, tool):
return tool.trackster_conf | null |
694 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class DescribeDBInstancesByPerformanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribeDBInstancesByPerformance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Tag4value(self): # String
return self.get_query_params().get('Tag.4.value')
def set_Tag4value(self, Tag4value): # String
self.add_query_param('Tag.4.value', Tag4value)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Tag2key(self): # String
return self.get_query_params().get('Tag.2.key')
def set_Tag2key(self, Tag2key): # String
self.add_query_param('Tag.2.key', Tag2key)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Tag3key(self): # String
return self.get_query_params().get('Tag.3.key')
def set_Tag3key(self, Tag3key): # String
self.add_query_param('Tag.3.key', Tag3key)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_Tag1value(self): # String
return self.get_query_params().get('Tag.1.value')
def set_Tag1value(self, Tag1value): # String
self.add_query_param('Tag.1.value', Tag1value)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def METHOD_NAME(self): # String
return self.get_query_params().get('SortKey')
def set_SortKey(self, SortKey): # String
self.add_query_param('SortKey', SortKey)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_Tag3value(self): # String
return self.get_query_params().get('Tag.3.value')
def set_Tag3value(self, Tag3value): # String
self.add_query_param('Tag.3.value', Tag3value)
def get_proxyId(self): # String
return self.get_query_params().get('proxyId')
def set_proxyId(self, proxyId): # String
self.add_query_param('proxyId', proxyId)
def get_Tag5key(self): # String
return self.get_query_params().get('Tag.5.key')
def set_Tag5key(self, Tag5key): # String
self.add_query_param('Tag.5.key', Tag5key)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Tag5value(self): # String
return self.get_query_params().get('Tag.5.value')
def set_Tag5value(self, Tag5value): # String
self.add_query_param('Tag.5.value', Tag5value)
def get_Tags(self): # String
return self.get_query_params().get('Tags')
def set_Tags(self, Tags): # String
self.add_query_param('Tags', Tags)
def get_Tag1key(self): # String
return self.get_query_params().get('Tag.1.key')
def set_Tag1key(self, Tag1key): # String
self.add_query_param('Tag.1.key', Tag1key)
def get_SortMethod(self): # String
return self.get_query_params().get('SortMethod')
def set_SortMethod(self, SortMethod): # String
self.add_query_param('SortMethod', SortMethod)
def get_Tag2value(self): # String
return self.get_query_params().get('Tag.2.value')
def set_Tag2value(self, Tag2value): # String
self.add_query_param('Tag.2.value', Tag2value)
def get_Tag4key(self): # String
return self.get_query_params().get('Tag.4.key')
def set_Tag4key(self, Tag4key): # String
self.add_query_param('Tag.4.key', Tag4key) | null |
695 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class DescribeInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeInstances','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SearchKey(self): # String
return self.get_query_params().get('SearchKey')
def set_SearchKey(self, SearchKey): # String
self.add_query_param('SearchKey', SearchKey)
def get_NetworkType(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_InstanceClass(self): # String
return self.get_query_params().get('InstanceClass')
def set_InstanceClass(self, InstanceClass): # String
self.add_query_param('InstanceClass', InstanceClass)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Expired(self): # String
return self.get_query_params().get('Expired')
def set_Expired(self, Expired): # String
self.add_query_param('Expired', Expired)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_EditionType(self): # String
return self.get_query_params().get('EditionType')
def set_EditionType(self, EditionType): # String
self.add_query_param('EditionType', EditionType)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_InstanceStatus(self): # String
return self.get_query_params().get('InstanceStatus')
def set_InstanceStatus(self, InstanceStatus): # String
self.add_query_param('InstanceStatus', InstanceStatus)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_GlobalInstance(self): # Boolean
return self.get_query_params().get('GlobalInstance')
def set_GlobalInstance(self, GlobalInstance): # Boolean
self.add_query_param('GlobalInstance', GlobalInstance)
def get_PrivateIp(self): # String
return self.get_query_params().get('PrivateIp')
def set_PrivateIp(self, PrivateIp): # String
self.add_query_param('PrivateIp', PrivateIp)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_InstanceIds(self): # String
return self.get_query_params().get('InstanceIds')
def set_InstanceIds(self, InstanceIds): # String
self.add_query_param('InstanceIds', InstanceIds)
def get_ArchitectureType(self): # String
return self.get_query_params().get('ArchitectureType')
def set_ArchitectureType(self, ArchitectureType): # String
self.add_query_param('ArchitectureType', ArchitectureType)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def METHOD_NAME(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
696 | # Copyright (c) 2017-2022 The Molecular Sciences Software Institute, Virginia Tech
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''
Handlers for command line subcommands
'''
from .. import curate, printing, fileio, api
from .common import format_columns
import os
def _bsecurate_cli_elements_in_files(args):
'''Handles the elements-in-files subcommand'''
data = curate.elements_in_files(args.files)
return '\n'.join(format_columns(data.items()))
def _bsecurate_cli_component_file_refs(args):
'''Handles the component-file-refs subcommand'''
data = curate.component_file_refs(args.files)
s = ''
for cfile, cdata in data.items():
s += cfile + '\n'
rows = []
for el, refs in cdata:
rows.append((' ' + el, ' '.join(refs)))
s += '\n'.join(format_columns(rows)) + '\n\n'
return s
def _bsecurate_cli_print_component_file(args):
'''Handles the print-component-file subcommand'''
data = fileio.read_json_basis(args.file)
return printing.component_basis_str(data, elements=args.elements)
def _bsecurate_cli_compare_basis_sets(args):
'''Handles compare-basis-sets subcommand'''
ret = curate.compare_basis_sets(args.basis1, args.basis2, args.version1, args.version2, args.uncontract_general,
args.data_dir, args.data_dir)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE"
def _bsecurate_cli_compare_basis_files(args):
'''Handles compare-basis-files subcommand'''
ret = curate.compare_basis_files(args.file1, args.file2, args.readfmt1, args.readfmt2, args.uncontract_general)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE"
def METHOD_NAME(args):
'''Handles compare-basis-files subcommand'''
ret = curate.compare_basis_against_file(args.basis, args.file, args.readfmt, args.version, args.uncontract_general)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE"
def _bsecurate_cli_make_diff(args):
'''Handles the view-graph subcommand'''
curate.diff_json_files(args.left, args.right)
return ''
def _bsecurate_cli_view_graph(args):
'''Handles the view-graph subcommand'''
curate.view_graph(args.basis, args.version, args.data_dir)
return ''
def _bsecurate_cli_make_graph_file(args):
'''Handles the make-graph-file subcommand'''
curate.make_graph_file(args.basis, args.outfile, args.render, args.version, args.data_dir)
return ''
def _bsecurate_cli_update_metadata(args):
'''Handles the update-metadata subcommand'''
data_dir = api._default_data_dir if args.data_dir is None else args.data_dir
metadata_file = os.path.join(data_dir, 'METADATA.json')
curate.create_metadata_file(metadata_file, data_dir)
return ''
def bsecurate_cli_handle_subcmd(args):
handler_map = {
'elements-in-files': _bsecurate_cli_elements_in_files,
'component-file-refs': _bsecurate_cli_component_file_refs,
'print-component-file': _bsecurate_cli_print_component_file,
'compare-basis-sets': _bsecurate_cli_compare_basis_sets,
'compare-basis-files': _bsecurate_cli_compare_basis_files,
'compare-basis-to-file': METHOD_NAME,
'make-diff': _bsecurate_cli_make_diff,
'view-graph': _bsecurate_cli_view_graph,
'make-graph-file': _bsecurate_cli_make_graph_file,
'update-metadata': _bsecurate_cli_update_metadata
}
return handler_map[args.subcmd](args) | null |
697 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class AddCustomLiveStreamTranscodeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCustomLiveStreamTranscode','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResWithSource(self): # String
return self.get_query_params().get('ResWithSource')
def set_ResWithSource(self, ResWithSource): # String
self.add_query_param('ResWithSource', ResWithSource)
def get_Gop(self): # String
return self.get_query_params().get('Gop')
def set_Gop(self, Gop): # String
self.add_query_param('Gop', Gop)
def get_AudioCodec(self): # String
return self.get_query_params().get('AudioCodec')
def set_AudioCodec(self, AudioCodec): # String
self.add_query_param('AudioCodec', AudioCodec)
def get_KmsUID(self): # String
return self.get_query_params().get('KmsUID')
def set_KmsUID(self, KmsUID): # String
self.add_query_param('KmsUID', KmsUID)
def get_Height(self): # Integer
return self.get_query_params().get('Height')
def set_Height(self, Height): # Integer
self.add_query_param('Height', Height)
def get_App(self): # String
return self.get_query_params().get('App')
def set_App(self, App): # String
self.add_query_param('App', App)
def get_Profile(self): # Integer
return self.get_query_params().get('Profile')
def set_Profile(self, Profile): # Integer
self.add_query_param('Profile', Profile)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ExtWithSource(self): # String
return self.get_query_params().get('ExtWithSource')
def set_ExtWithSource(self, ExtWithSource): # String
self.add_query_param('ExtWithSource', ExtWithSource)
def get_BitrateWithSource(self): # String
return self.get_query_params().get('BitrateWithSource')
def set_BitrateWithSource(self, BitrateWithSource): # String
self.add_query_param('BitrateWithSource', BitrateWithSource)
def get_Domain(self): # String
return self.get_query_params().get('Domain')
def set_Domain(self, Domain): # String
self.add_query_param('Domain', Domain)
def get_Template(self): # String
return self.get_query_params().get('Template')
def set_Template(self, Template): # String
self.add_query_param('Template', Template)
def get_Lazy(self): # String
return self.get_query_params().get('Lazy')
def set_Lazy(self, Lazy): # String
self.add_query_param('Lazy', Lazy)
def get_KmsKeyExpireInterval(self): # String
return self.get_query_params().get('KmsKeyExpireInterval')
def set_KmsKeyExpireInterval(self, KmsKeyExpireInterval): # String
self.add_query_param('KmsKeyExpireInterval', KmsKeyExpireInterval)
def get_TemplateType(self): # String
return self.get_query_params().get('TemplateType')
def set_TemplateType(self, TemplateType): # String
self.add_query_param('TemplateType', TemplateType)
def get_AudioProfile(self): # String
return self.get_query_params().get('AudioProfile')
def set_AudioProfile(self, AudioProfile): # String
self.add_query_param('AudioProfile', AudioProfile)
def get_EncryptParameters(self): # String
return self.get_query_params().get('EncryptParameters')
def set_EncryptParameters(self, EncryptParameters): # String
self.add_query_param('EncryptParameters', EncryptParameters)
def get_AudioChannelNum(self): # Integer
return self.get_query_params().get('AudioChannelNum')
def set_AudioChannelNum(self, AudioChannelNum): # Integer
self.add_query_param('AudioChannelNum', AudioChannelNum)
def get_FPS(self): # Integer
return self.get_query_params().get('FPS')
def set_FPS(self, FPS): # Integer
self.add_query_param('FPS', FPS)
def get_AudioRate(self): # Integer
return self.get_query_params().get('AudioRate')
def set_AudioRate(self, AudioRate): # Integer
self.add_query_param('AudioRate', AudioRate)
def get_FpsWithSource(self): # String
return self.get_query_params().get('FpsWithSource')
def METHOD_NAME(self, FpsWithSource): # String
self.add_query_param('FpsWithSource', FpsWithSource)
def get_AudioBitrate(self): # Integer
return self.get_query_params().get('AudioBitrate')
def set_AudioBitrate(self, AudioBitrate): # Integer
self.add_query_param('AudioBitrate', AudioBitrate)
def get_Width(self): # Integer
return self.get_query_params().get('Width')
def set_Width(self, Width): # Integer
self.add_query_param('Width', Width)
def get_VideoBitrate(self): # Integer
return self.get_query_params().get('VideoBitrate')
def set_VideoBitrate(self, VideoBitrate): # Integer
self.add_query_param('VideoBitrate', VideoBitrate)
def get_KmsKeyID(self): # String
return self.get_query_params().get('KmsKeyID')
def set_KmsKeyID(self, KmsKeyID): # String
self.add_query_param('KmsKeyID', KmsKeyID) | null |
698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Handle dashboard embed related tests.
Copyright (C) 2018 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import os
from unittest.mock import Mock, patch
from django.conf import settings
from revenue.management.commands.kudos_revenue import Command, call_etherscan_api
from revenue.models import DigitalGoodPurchase
from test_plus.test import TestCase
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, ok, status_code, reason='Fail'):
self.ok = ok
self.json_data = json_data
self.status_code = status_code
self.reason = reason
def json(self):
return self.json_data
params = args[1]
if params['action'] == 'txlist':
with open(os.path.join(os.path.dirname(__file__), 'txlist_sample.json')) as json_file:
return MockResponse(json.load(json_file), True, 200)
elif params['action'] == 'tokentx':
with open(os.path.join(os.path.dirname(__file__), 'tokentx_sample.json')) as json_file:
return MockResponse(json.load(json_file), True, 200)
return MockResponse(None, False, 404)
class TestKudosRevenue(TestCase):
def setUp(self):
default_account = settings.KUDOS_REVENUE_ACCOUNT_ADDRESS
self.account = default_account if len(default_account) > 0 else '0xAD278911Ad07534F921eD7D757b6c0e6730FCB16'
@patch('revenue.management.commands.kudos_revenue.requests.get')
def test_etherscan_account_txlist_api_call(self, mock_func):
"""Test etherscan txlist api call """
mock_func.return_value = Mock(ok=True)
mock_func.return_value.json.return_value = {
"status": "1",
"message": "OK",
"result": [
{
"blockNumber": "6570004",
"timeStamp": "1540317632",
"hash": "0x84a4a80b6e70048bf0ee8b937a4931efdb4f30e248e0f15036cf40748aef938e",
"nonce": "4",
"blockHash": "0x9b9d524c5fb92ed79fac4174c8136b9a11527cf7ba30985a27502c104bb6c574",
"transactionIndex": "95",
"from": "0xf8ae578d5d4e570de6c31f26d42ef369c320ae0b",
"to": "0xAD278911Ad07534F921eD7D757b6c0e6730FCB16",
"value": "50000000000000000",
"gas": "21000",
"gasPrice": "4000000000",
"isError": "0",
"txreceipt_status": "1",
"input": "0x",
"contractAddress": "",
"cumulativeGasUsed": "7992355",
"gasUsed": "21000",
"confirmations": "2546337"
}
]
}
params = {
'module': 'account',
'action': 'txlist',
'address': self.account,
'startblock': 0,
'endblock': 6570004,
'apikey': settings.ETHERSCAN_API_KEY,
'sort': 'asc',
}
records = call_etherscan_api('mainnet', params)
self.assertTrue(len(records) == 1)
for record in records:
self.assertTrue('hash' in record)
self.assertTrue('contractAddress' in record)
self.assertTrue('value' in record)
self.assertTrue('from' in record)
self.assertTrue('to' in record)
break
'''@patch('revenue.management.commands.kudos_revenue.call_etherscan_api')
def test_etherscan_account_tokentx_api_call(self, mock_func):
"""Test etherscan tokentx api call """
mock_func.return_value = Mock(ok=True)
mock_func.return_value.json.return_value = {
"blockNumber": "6966980",
"timeStamp": "1545985587",
"hash": "0x5a852c0437b1db68de3551c335d6c1399a5c0b82516b5bf71d6b80df54f26d02",
"nonce": "203",
"blockHash": "0x1c0ce49c6d06420bbbf877db1a42681d722c3e2dc885335ff8215bbdd0f3f4de",
"from": "0x1615aecb476aec5f6066dcf2e80716ccf0e7345c",
"contractAddress": "0x85332b222787eacab0fff68cf3b884798823528c",
"to": "0xAD278911Ad07534F921eD7D757b6c0e6730FCB16",
"value": "666",
"tokenName": "WinETHFree",
"tokenSymbol": "winethfree.com (Win ETH Free)",
"tokenDecimal": "0",
"transactionIndex": "2",
"gas": "2000000",
"gasPrice": "3000000000",
"gasUsed": "1702798",
"cumulativeGasUsed": "6090923",
"input": "deprecated",
"confirmations": "2149742"
}
params = {
'module': 'account',
'action': 'tokentx',
'address': self.account,
'startblock': 0,
'endblock': 6966980,
'apikey': settings.ETHERSCAN_API_KEY,
'sort': 'asc',
}
records = call_etherscan_api('mainnet', params)
self.assertTrue(len(records) == 1)
for record in records:
self.assertTrue('hash' in record)
self.assertTrue('contractAddress' in record)
self.assertTrue('value' in record)
self.assertTrue('from' in record)
self.assertTrue('to' in record)
self.assertTrue('tokenDecimal' in record)
self.assertTrue('tokenSymbol' in record)
break'''
@patch('revenue.management.commands.kudos_revenue.requests.get')
def test_etherscan_account_wrong_api_call(self, mock_func):
"""Test wrong call to etherscan api """
mock_func.return_value = Mock(ok=False)
params = {
'module': 'account',
'action': 'transactions', # non-existent action
'address': self.account,
'startblock': 0,
'endblock': 6570004,
'apikey': settings.ETHERSCAN_API_KEY,
'sort': 'asc',
}
records = call_etherscan_api('mainnet', params)
self.assertTrue(len(records) == 0)
@patch('revenue.management.commands.kudos_revenue.requests.get')
def METHOD_NAME(self, mock_func):
"""Test no records found during etherscan api call """
mock_func.return_value = Mock(ok=False)
params = {
'module': 'account',
'action': 'txlist',
'address': self.account,
'startblock': 0,
'endblock': 6,
'apikey': settings.ETHERSCAN_API_KEY,
'sort': 'asc',
}
records = call_etherscan_api('mainnet', params)
self.assertTrue(len(records) == 0)
@patch('revenue.management.commands.kudos_revenue.requests.get')
def test_command_handle(self, mock_func):
"""Test command kudos revenue."""
mock_func.side_effect = mocked_requests_get
Command() \
.handle([], network='rinkeby', account_address=self.account, start_block=0, end_block=2965443)
assert DigitalGoodPurchase.objects.all() \
.filter(receive_address__iexact=self.account, tokenName__iexact='ETH').count() == 2
assert DigitalGoodPurchase.objects.all() \
.filter(receive_address__iexact=self.account) \
.exclude(tokenName='ETH').count() == 0 | null |
699 | #!/usr/bin/env python3
# Setup script for PyPI; use CMakeFile.txt to build extension modules
import contextlib
import os
import re
import shutil
import string
import subprocess
import sys
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Dict, Iterator, List, Union
import setuptools.command.sdist
DIR = Path(__file__).parent.absolute()
VERSION_REGEX = re.compile(
r"^\s*#\s*define\s+PYBIND11_VERSION_([A-Z]+)\s+(.*)$", re.MULTILINE
)
VERSION_FILE = Path("pybind11/_version.py")
COMMON_FILE = Path("include/pybind11/detail/common.h")
def METHOD_NAME(matches: Dict[str, str]) -> str:
patch_level_serial = matches["PATCH"]
serial = None
major = int(matches["MAJOR"])
minor = int(matches["MINOR"])
flds = patch_level_serial.split(".")
if flds:
patch = int(flds[0])
if len(flds) == 1:
level = "0"
serial = 0
elif len(flds) == 2:
level_serial = flds[1]
for level in ("a", "b", "c", "dev"):
if level_serial.startswith(level):
serial = int(level_serial[len(level) :])
break
if serial is None:
msg = f'Invalid PYBIND11_VERSION_PATCH: "{patch_level_serial}"'
raise RuntimeError(msg)
version_hex_str = f"{major:02x}{minor:02x}{patch:02x}{level[:1]}{serial:x}"
return f"0x{version_hex_str.upper()}"
# PYBIND11_GLOBAL_SDIST will build a different sdist, with the python-headers
# files, and the sys.prefix files (CMake and headers).
global_sdist = os.environ.get("PYBIND11_GLOBAL_SDIST", False)
setup_py = Path(
"tools/setup_global.py.in" if global_sdist else "tools/setup_main.py.in"
)
extra_cmd = 'cmdclass["sdist"] = SDist\n'
to_src = (
(Path("pyproject.toml"), Path("tools/pyproject.toml")),
(Path("setup.py"), setup_py),
)
# Read the listed version
loc: Dict[str, str] = {}
code = compile(VERSION_FILE.read_text(encoding="utf-8"), "pybind11/_version.py", "exec")
exec(code, loc)
version = loc["__version__"]
# Verify that the version matches the one in C++
matches = dict(VERSION_REGEX.findall(COMMON_FILE.read_text(encoding="utf8")))
cpp_version = "{MAJOR}.{MINOR}.{PATCH}".format(**matches)
if version != cpp_version:
msg = f"Python version {version} does not match C++ version {cpp_version}!"
raise RuntimeError(msg)
version_hex = matches.get("HEX", "MISSING")
exp_version_hex = METHOD_NAME(matches)
if version_hex != exp_version_hex:
msg = f"PYBIND11_VERSION_HEX {version_hex} does not match expected value {exp_version_hex}!"
raise RuntimeError(msg)
# TODO: use literals & overload (typing extensions or Python 3.8)
def get_and_replace(
filename: Path, binary: bool = False, **opts: str
) -> Union[bytes, str]:
if binary:
contents = filename.read_bytes()
return string.Template(contents.decode()).substitute(opts).encode()
return string.Template(filename.read_text()).substitute(opts)
# Use our input files instead when making the SDist (and anything that depends
# on it, like a wheel)
class SDist(setuptools.command.sdist.sdist): # type: ignore[misc]
def make_release_tree(self, base_dir: str, files: List[str]) -> None:
super().make_release_tree(base_dir, files)
for to, src in to_src:
txt = get_and_replace(src, binary=True, version=version, extra_cmd="")
dest = Path(base_dir) / to
# This is normally linked, so unlink before writing!
dest.unlink()
dest.write_bytes(txt) # type: ignore[arg-type]
# Remove the CMake install directory when done
@contextlib.contextmanager
def remove_output(*sources: str) -> Iterator[None]:
try:
yield
finally:
for src in sources:
shutil.rmtree(src)
with remove_output("pybind11/include", "pybind11/share"):
# Generate the files if they are not present.
with TemporaryDirectory() as tmpdir:
cmd = ["cmake", "-S", ".", "-B", tmpdir] + [
"-DCMAKE_INSTALL_PREFIX=pybind11",
"-DBUILD_TESTING=OFF",
"-DPYBIND11_NOPYTHON=ON",
]
if "CMAKE_ARGS" in os.environ:
fcommand = [
c
for c in os.environ["CMAKE_ARGS"].split()
if "DCMAKE_INSTALL_PREFIX" not in c
]
cmd += fcommand
subprocess.run(cmd, check=True, cwd=DIR, stdout=sys.stdout, stderr=sys.stderr)
subprocess.run(
["cmake", "--install", tmpdir],
check=True,
cwd=DIR,
stdout=sys.stdout,
stderr=sys.stderr,
)
txt = get_and_replace(setup_py, version=version, extra_cmd=extra_cmd)
code = compile(txt, setup_py, "exec")
exec(code, {"SDist": SDist}) | null |